diff --git a/.gitmodules b/.gitmodules
index bf3c35f42..f611a0acc 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -76,6 +76,9 @@
 [submodule "Telegram/lib_webview"]
 	path = Telegram/lib_webview
 	url = https://github.com/desktop-app/lib_webview.git
+[submodule "Telegram/ThirdParty/jemalloc"]
+	path = Telegram/ThirdParty/jemalloc
+	url = https://github.com/jemalloc/jemalloc
 [submodule "Telegram/ThirdParty/dispatch"]
 	path = Telegram/ThirdParty/dispatch
 	url = https://github.com/apple/swift-corelibs-libdispatch
diff --git a/Telegram/ThirdParty/jemalloc b/Telegram/ThirdParty/jemalloc
new file mode 160000
index 000000000..54eaed1d8
--- /dev/null
+++ b/Telegram/ThirdParty/jemalloc
@@ -0,0 +1 @@
+Subproject commit 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
diff --git a/Telegram/ThirdParty/scudo/CMakeLists.txt b/Telegram/ThirdParty/scudo/CMakeLists.txt
deleted file mode 100644
index 60092005c..000000000
--- a/Telegram/ThirdParty/scudo/CMakeLists.txt
+++ /dev/null
@@ -1,253 +0,0 @@
-add_compiler_rt_component(scudo_standalone)
-
-include_directories(../.. include)
-
-set(SCUDO_CFLAGS)
-
-list(APPEND SCUDO_CFLAGS
-  -Werror=conversion
-  -Wall
-  -Wextra
-  -pedantic
-  -g
-  -nostdinc++)
-
-# Remove -stdlib= which is unused when passing -nostdinc++.
-string(REGEX REPLACE "-stdlib=[a-zA-Z+]*" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
-
-append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SCUDO_CFLAGS)
-
-append_list_if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG -fno-exceptions SCUDO_CFLAGS)
-
-append_list_if(COMPILER_RT_HAS_WNO_PEDANTIC -Wno-pedantic SCUDO_CFLAGS)
-
-# FIXME: find cleaner way to agree with GWPAsan flags
-append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SCUDO_CFLAGS)
-
-if(COMPILER_RT_DEBUG)
-  list(APPEND SCUDO_CFLAGS -O0 -DSCUDO_DEBUG=1 -DSCUDO_ENABLE_HOOKS=1)
-else()
-  list(APPEND SCUDO_CFLAGS -O3)
-endif()
-
-append_list_if(COMPILER_RT_HAS_WTHREAD_SAFETY_FLAG -Werror=thread-safety
-  SCUDO_CFLAGS)
-
-set(SCUDO_LINK_FLAGS)
-
-list(APPEND SCUDO_LINK_FLAGS -Wl,-z,defs,-z,now,-z,relro)
-
-list(APPEND SCUDO_LINK_FLAGS -ffunction-sections -fdata-sections -Wl,--gc-sections)
-
-# We don't use the C++ standard library, so avoid including it by mistake.
-append_list_if(COMPILER_RT_HAS_NOSTDLIBXX_FLAG -nostdlib++ SCUDO_LINK_FLAGS)
-append_list_if(CXX_SUPPORTS_UNWINDLIB_NONE_FLAG --unwindlib=none SCUDO_LINK_FLAGS)
-
-if(COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH)
-  list(APPEND SCUDO_CFLAGS "--sysroot=${COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH}")
-endif()
-
-if(ANDROID)
-  list(APPEND SCUDO_CFLAGS -fno-emulated-tls)
-
-# Put the shared library in the global group. For more details, see
-# android-changes-for-ndk-developers.md#changes-to-library-search-order
-  append_list_if(COMPILER_RT_HAS_Z_GLOBAL -Wl,-z,global SCUDO_LINK_FLAGS)
-endif()
-
-set(SCUDO_HEADERS
-  allocator_common.h
-  allocator_config.h
-  atomic_helpers.h
-  bytemap.h
-  checksum.h
-  chunk.h
-  condition_variable.h
-  condition_variable_base.h
-  condition_variable_linux.h
-  combined.h
-  common.h
-  flags_parser.h
-  flags.h
-  fuchsia.h
-  internal_defs.h
-  linux.h
-  list.h
-  local_cache.h
-  memtag.h
-  mem_map.h
-  mem_map_base.h
-  mem_map_fuchsia.h
-  mem_map_linux.h
-  mutex.h
-  options.h
-  platform.h
-  primary32.h
-  primary64.h
-  quarantine.h
-  release.h
-  report.h
-  report_linux.h
-  secondary.h
-  size_class_map.h
-  stack_depot.h
-  stats.h
-  string_utils.h
-  timing.h
-  tsd_exclusive.h
-  tsd_shared.h
-  tsd.h
-  vector.h
-  wrappers_c_checks.h
-  wrappers_c.h
-
-  include/scudo/interface.h
-  )
-
-set(SCUDO_SOURCES
-  checksum.cpp
-  common.cpp
-  condition_variable_linux.cpp
-  crc32_hw.cpp
-  flags_parser.cpp
-  flags.cpp
-  fuchsia.cpp
-  linux.cpp
-  mem_map.cpp
-  mem_map_fuchsia.cpp
-  mem_map_linux.cpp
-  release.cpp
-  report.cpp
-  report_linux.cpp
-  string_utils.cpp
-  timing.cpp
-  )
-
-# Temporary hack until LLVM libc supports inttypes.h print format macros
-# See: https://github.com/llvm/llvm-project/issues/63317#issuecomment-1591906241
-if(LLVM_LIBC_INCLUDE_SCUDO)
-  list(REMOVE_ITEM SCUDO_HEADERS timing.h)
-  list(REMOVE_ITEM SCUDO_SOURCES timing.cpp)
-endif()
-
-# Enable the necessary instruction set for scudo_crc32.cpp, if available.
-# Newer compiler versions use -mcrc32 rather than -msse4.2.
-if (COMPILER_RT_HAS_MCRC32_FLAG)
-  set_source_files_properties(crc32_hw.cpp PROPERTIES COMPILE_FLAGS -mcrc32)
-elseif (COMPILER_RT_HAS_MSSE4_2_FLAG)
-  set_source_files_properties(crc32_hw.cpp PROPERTIES COMPILE_FLAGS -msse4.2)
-endif()
-
-# Enable the AArch64 CRC32 feature for crc32_hw.cpp, if available.
-# Note that it is enabled by default starting with armv8.1-a.
-if (COMPILER_RT_HAS_MCRC_FLAG)
-  set_source_files_properties(crc32_hw.cpp PROPERTIES COMPILE_FLAGS -mcrc)
-endif()
-
-set(SCUDO_SOURCES_C_WRAPPERS
-  wrappers_c.cpp
-  )
-
-set(SCUDO_SOURCES_CXX_WRAPPERS
-  wrappers_cpp.cpp
-  )
-
-set(SCUDO_OBJECT_LIBS)
-set(SCUDO_LINK_LIBS)
-
-if (COMPILER_RT_HAS_GWP_ASAN)
-  if(COMPILER_RT_USE_LLVM_UNWINDER)
-    list(APPEND SCUDO_LINK_LIBS ${COMPILER_RT_UNWINDER_LINK_LIBS} dl)
-  elseif (COMPILER_RT_HAS_GCC_S_LIB)
-    list(APPEND SCUDO_LINK_LIBS gcc_s)
-  elseif (COMPILER_RT_HAS_GCC_LIB)
-    list(APPEND SCUDO_LINK_LIBS gcc)
-  elseif (NOT COMPILER_RT_USE_BUILTINS_LIBRARY)
-    message(FATAL_ERROR "No suitable unwinder library")
-  endif()
-
-  add_dependencies(scudo_standalone gwp_asan)
-  list(APPEND SCUDO_OBJECT_LIBS
-       RTGwpAsan RTGwpAsanBacktraceLibc RTGwpAsanSegvHandler
-       RTGwpAsanOptionsParser)
-
-  append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fno-omit-frame-pointer
-                                                         -mno-omit-leaf-frame-pointer
-                 SCUDO_CFLAGS)
-  list(APPEND SCUDO_CFLAGS -DGWP_ASAN_HOOKS)
-
-endif()
-
-if(COMPILER_RT_BUILD_SCUDO_STANDALONE_WITH_LLVM_LIBC)
-  include_directories(${COMPILER_RT_BINARY_DIR}/../libc/include/)
-
-  set(SCUDO_DEPS libc-headers)
-
-  list(APPEND SCUDO_CFLAGS "-ffreestanding")
-endif()
-
-append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread SCUDO_LINK_FLAGS)
-
-append_list_if(FUCHSIA zircon SCUDO_LINK_LIBS)
-
-if(COMPILER_RT_DEFAULT_TARGET_ARCH MATCHES "mips|mips64|mipsel|mips64el")
-  list(APPEND SCUDO_LINK_LIBS atomic)
-endif()
-
-if(COMPILER_RT_HAS_SCUDO_STANDALONE)
-  add_compiler_rt_object_libraries(RTScudoStandalone
-    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
-    SOURCES ${SCUDO_SOURCES}
-    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
-    CFLAGS ${SCUDO_CFLAGS}
-    DEPS ${SCUDO_DEPS})
-  add_compiler_rt_object_libraries(RTScudoStandaloneCWrappers
-    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
-    SOURCES ${SCUDO_SOURCES_C_WRAPPERS}
-    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
-    CFLAGS ${SCUDO_CFLAGS}
-    DEPS ${SCUDO_DEPS})
-  add_compiler_rt_object_libraries(RTScudoStandaloneCxxWrappers
-    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
-    SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS}
-    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
-    CFLAGS ${SCUDO_CFLAGS}
-    DEPS ${SCUDO_DEPS})
-
-  add_compiler_rt_runtime(clang_rt.scudo_standalone
-    STATIC
-    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
-    SOURCES ${SCUDO_SOURCES} ${SCUDO_SOURCES_C_WRAPPERS}
-    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
-    CFLAGS ${SCUDO_CFLAGS}
-    DEPS ${SCUDO_DEPS}
-    OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
-    PARENT_TARGET scudo_standalone)
-  add_compiler_rt_runtime(clang_rt.scudo_standalone_cxx
-    STATIC
-    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
-    SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS}
-    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
-    CFLAGS ${SCUDO_CFLAGS}
-    DEPS ${SCUDO_DEPS}
-    PARENT_TARGET scudo_standalone)
-
-  if(COMPILER_RT_SCUDO_STANDALONE_BUILD_SHARED)
-    add_compiler_rt_runtime(clang_rt.scudo_standalone
-      SHARED
-      ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
-      SOURCES ${SCUDO_SOURCES} ${SCUDO_SOURCES_C_WRAPPERS} ${SCUDO_SOURCES_CXX_WRAPPERS}
-      ADDITIONAL_HEADERS ${SCUDO_HEADERS}
-      CFLAGS ${SCUDO_CFLAGS}
-      DEPS ${SCUDO_DEPS}
-      OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
-      LINK_FLAGS ${SCUDO_LINK_FLAGS}
-      LINK_LIBS ${SCUDO_LINK_LIBS}
-      PARENT_TARGET scudo_standalone)
-  endif()
-
-  add_subdirectory(benchmarks)
-  if(COMPILER_RT_INCLUDE_TESTS)
-    add_subdirectory(tests)
-  endif()
-endif()
diff --git a/Telegram/ThirdParty/scudo/allocator_common.h b/Telegram/ThirdParty/scudo/allocator_common.h
deleted file mode 100644
index 95f4776ac..000000000
--- a/Telegram/ThirdParty/scudo/allocator_common.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//===-- allocator_common.h --------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_ALLOCATOR_COMMON_H_
-#define SCUDO_ALLOCATOR_COMMON_H_
-
-#include "common.h"
-#include "list.h"
-
-namespace scudo {
-
-template <class SizeClassAllocator> struct TransferBatch {
-  typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
-  typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
-
-  static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
-  void setFromArray(CompactPtrT *Array, u16 N) {
-    DCHECK_LE(N, MaxNumCached);
-    Count = N;
-    memcpy(Batch, Array, sizeof(Batch[0]) * Count);
-  }
-  void appendFromArray(CompactPtrT *Array, u16 N) {
-    DCHECK_LE(N, MaxNumCached - Count);
-    memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
-    // u16 will be promoted to int by arithmetic type conversion.
-    Count = static_cast<u16>(Count + N);
-  }
-  void appendFromTransferBatch(TransferBatch *B, u16 N) {
-    DCHECK_LE(N, MaxNumCached - Count);
-    DCHECK_GE(B->Count, N);
-    // Append from the back of `B`.
-    memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
-    // u16 will be promoted to int by arithmetic type conversion.
-    Count = static_cast<u16>(Count + N);
-    B->Count = static_cast<u16>(B->Count - N);
-  }
-  void clear() { Count = 0; }
-  void add(CompactPtrT P) {
-    DCHECK_LT(Count, MaxNumCached);
-    Batch[Count++] = P;
-  }
-  void moveToArray(CompactPtrT *Array) {
-    memcpy(Array, Batch, sizeof(Batch[0]) * Count);
-    clear();
-  }
-  u16 getCount() const { return Count; }
-  bool isEmpty() const { return Count == 0U; }
-  CompactPtrT get(u16 I) const {
-    DCHECK_LE(I, Count);
-    return Batch[I];
-  }
-  TransferBatch *Next;
-
-private:
-  CompactPtrT Batch[MaxNumCached];
-  u16 Count;
-};
-
-// A BatchGroup is used to collect blocks. Each group has a group id to
-// identify the group kind of contained blocks.
-template <class SizeClassAllocator> struct BatchGroup {
-  // `Next` is used by IntrusiveList.
-  BatchGroup *Next;
-  // The compact base address of each group
-  uptr CompactPtrGroupBase;
-  // Cache value of SizeClassAllocatorLocalCache::getMaxCached()
-  u16 MaxCachedPerBatch;
-  // Number of blocks pushed into this group. This is an increment-only
-  // counter.
-  uptr PushedBlocks;
-  // This is used to track how many bytes are not in-use since last time we
-  // tried to release pages.
-  uptr BytesInBGAtLastCheckpoint;
-  // Blocks are managed by TransferBatch in a list.
-  SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
-};
-
-} // namespace scudo
-
-#endif // SCUDO_ALLOCATOR_COMMON_H_
diff --git a/Telegram/ThirdParty/scudo/allocator_config.h b/Telegram/ThirdParty/scudo/allocator_config.h
deleted file mode 100644
index 3c6aa3acb..000000000
--- a/Telegram/ThirdParty/scudo/allocator_config.h
+++ /dev/null
@@ -1,280 +0,0 @@
-//===-- allocator_config.h --------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_ALLOCATOR_CONFIG_H_
-#define SCUDO_ALLOCATOR_CONFIG_H_
-
-#include "combined.h"
-#include "common.h"
-#include "condition_variable.h"
-#include "flags.h"
-#include "primary32.h"
-#include "primary64.h"
-#include "secondary.h"
-#include "size_class_map.h"
-#include "tsd_exclusive.h"
-#include "tsd_shared.h"
-
-// To import a custom configuration, define `SCUDO_USE_CUSTOM_CONFIG` and
-// aliasing the `Config` like:
-//
-// namespace scudo {
-//   // The instance of Scudo will be initiated with `Config`.
-//   typedef CustomConfig Config;
-//   // Aliasing as default configuration to run the tests with this config.
-//   typedef CustomConfig DefaultConfig;
-// } // namespace scudo
-//
-// Put them in the header `custom_scudo_config.h` then you will be using the
-// custom configuration and able to run all the tests as well.
-#ifdef SCUDO_USE_CUSTOM_CONFIG
-#include "custom_scudo_config.h"
-#endif
-
-namespace scudo {
-
-// The combined allocator uses a structure as a template argument that
-// specifies the configuration options for the various subcomponents of the
-// allocator.
-//
-// struct ExampleConfig {
-//   // Indicates possible support for Memory Tagging.
-//   static const bool MaySupportMemoryTagging = false;
-//
-//   // Thread-Specific Data Registry used, shared or exclusive.
-//   template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
-//
-//   struct Primary {
-//     // SizeClassMap to use with the Primary.
-//     using SizeClassMap = DefaultSizeClassMap;
-//
-//     // Log2 of the size of a size class region, as used by the Primary.
-//     static const uptr RegionSizeLog = 30U;
-//
-//     // Log2 of the size of block group, as used by the Primary. Each group
-//     // contains a range of memory addresses, blocks in the range will belong
-//     // to the same group. In general, single region may have 1 or 2MB group
-//     // size. Multiple regions will have the group size equal to the region
-//     // size because the region size is usually smaller than 1 MB.
-//     // Smaller value gives fine-grained control of memory usage but the
-//     // trade-off is that it may take longer time of deallocation.
-//     static const uptr GroupSizeLog = 20U;
-//
-//     // Defines the type and scale of a compact pointer. A compact pointer can
-//     // be understood as the offset of a pointer within the region it belongs
-//     // to, in increments of a power-of-2 scale.
-//     // eg: Ptr = Base + (CompactPtr << Scale).
-//     typedef u32 CompactPtrT;
-//     static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-//
-//     // Indicates support for offsetting the start of a region by
-//     // a random number of pages. Only used with primary64.
-//     static const bool EnableRandomOffset = true;
-//
-//     // Call map for user memory with at least this size. Only used with
-//     // primary64.
-//     static const uptr MapSizeIncrement = 1UL << 18;
-//
-//     // Defines the minimal & maximal release interval that can be set.
-//     static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-//     static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-//
-//     // Use condition variable to shorten the waiting time of refillment of
-//     // freelist. Note that this depends on the implementation of condition
-//     // variable on each platform and the performance may vary so that it
-//     // doesn't guarantee a performance benefit.
-//     // Note that both variables have to be defined to enable it.
-//     static const bool UseConditionVariable = true;
-//     using ConditionVariableT = ConditionVariableLinux;
-//   };
-//   // Defines the type of Primary allocator to use.
-//   template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
-//
-//   // Defines the type of cache used by the Secondary. Some additional
-//   // configuration entries can be necessary depending on the Cache.
-//   struct Secondary {
-//     struct Cache {
-//       static const u32 EntriesArraySize = 32U;
-//       static const u32 QuarantineSize = 0U;
-//       static const u32 DefaultMaxEntriesCount = 32U;
-//       static const uptr DefaultMaxEntrySize = 1UL << 19;
-//       static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-//       static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-//     };
-//     // Defines the type of Secondary Cache to use.
-//     template <typename Config> using CacheT = MapAllocatorCache<Config>;
-//   };
-//   // Defines the type of Secondary allocator to use.
-//   template <typename Config> using SecondaryT = MapAllocator<Config>;
-// };
-
-#ifndef SCUDO_USE_CUSTOM_CONFIG
-
-// Default configurations for various platforms. Note this is only enabled when
-// there's no custom configuration in the build system.
-struct DefaultConfig {
-  static const bool MaySupportMemoryTagging = true;
-  template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
-
-  struct Primary {
-    using SizeClassMap = DefaultSizeClassMap;
-#if SCUDO_CAN_USE_PRIMARY64
-    static const uptr RegionSizeLog = 32U;
-    static const uptr GroupSizeLog = 21U;
-    typedef uptr CompactPtrT;
-    static const uptr CompactPtrScale = 0;
-    static const bool EnableRandomOffset = true;
-    static const uptr MapSizeIncrement = 1UL << 18;
-#else
-    static const uptr RegionSizeLog = 19U;
-    static const uptr GroupSizeLog = 19U;
-    typedef uptr CompactPtrT;
-#endif
-    static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-  };
-#if SCUDO_CAN_USE_PRIMARY64
-  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
-#else
-  template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
-#endif
-
-  struct Secondary {
-    struct Cache {
-      static const u32 EntriesArraySize = 32U;
-      static const u32 QuarantineSize = 0U;
-      static const u32 DefaultMaxEntriesCount = 32U;
-      static const uptr DefaultMaxEntrySize = 1UL << 19;
-      static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-      static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    };
-    template <typename Config> using CacheT = MapAllocatorCache<Config>;
-  };
-
-  template <typename Config> using SecondaryT = MapAllocator<Config>;
-};
-
-#endif // SCUDO_USE_CUSTOM_CONFIG
-
-struct AndroidConfig {
-  static const bool MaySupportMemoryTagging = true;
-  template <class A>
-  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
-
-  struct Primary {
-    using SizeClassMap = AndroidSizeClassMap;
-#if SCUDO_CAN_USE_PRIMARY64
-    static const uptr RegionSizeLog = 28U;
-    typedef u32 CompactPtrT;
-    static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-    static const uptr GroupSizeLog = 20U;
-    static const bool EnableRandomOffset = true;
-    static const uptr MapSizeIncrement = 1UL << 18;
-#else
-    static const uptr RegionSizeLog = 18U;
-    static const uptr GroupSizeLog = 18U;
-    typedef uptr CompactPtrT;
-#endif
-    static const s32 MinReleaseToOsIntervalMs = 1000;
-    static const s32 MaxReleaseToOsIntervalMs = 1000;
-  };
-#if SCUDO_CAN_USE_PRIMARY64
-  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
-#else
-  template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
-#endif
-
-  struct Secondary {
-    struct Cache {
-      static const u32 EntriesArraySize = 256U;
-      static const u32 QuarantineSize = 32U;
-      static const u32 DefaultMaxEntriesCount = 32U;
-      static const uptr DefaultMaxEntrySize = 2UL << 20;
-      static const s32 MinReleaseToOsIntervalMs = 0;
-      static const s32 MaxReleaseToOsIntervalMs = 1000;
-    };
-    template <typename Config> using CacheT = MapAllocatorCache<Config>;
-  };
-
-  template <typename Config> using SecondaryT = MapAllocator<Config>;
-};
-
-#if SCUDO_CAN_USE_PRIMARY64
-struct FuchsiaConfig {
-  static const bool MaySupportMemoryTagging = false;
-  template <class A>
-  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
-
-  struct Primary {
-    using SizeClassMap = FuchsiaSizeClassMap;
-#if SCUDO_RISCV64
-    // Support 39-bit VMA for riscv-64
-    static const uptr RegionSizeLog = 28U;
-    static const uptr GroupSizeLog = 19U;
-#else
-    static const uptr RegionSizeLog = 30U;
-    static const uptr GroupSizeLog = 21U;
-#endif
-    typedef u32 CompactPtrT;
-    static const bool EnableRandomOffset = true;
-    static const uptr MapSizeIncrement = 1UL << 18;
-    static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-    static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-  };
-  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
-
-  struct Secondary {
-    template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
-  };
-  template <typename Config> using SecondaryT = MapAllocator<Config>;
-};
-
-struct TrustyConfig {
-  static const bool MaySupportMemoryTagging = true;
-  template <class A>
-  using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
-
-  struct Primary {
-    using SizeClassMap = TrustySizeClassMap;
-    static const uptr RegionSizeLog = 28U;
-    static const uptr GroupSizeLog = 20U;
-    typedef u32 CompactPtrT;
-    static const bool EnableRandomOffset = false;
-    static const uptr MapSizeIncrement = 1UL << 12;
-    static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-    static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-  };
-  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
-
-  struct Secondary {
-    template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
-  };
-
-  template <typename Config> using SecondaryT = MapAllocator<Config>;
-};
-#endif
-
-#ifndef SCUDO_USE_CUSTOM_CONFIG
-
-#if SCUDO_ANDROID
-typedef AndroidConfig Config;
-#elif SCUDO_FUCHSIA
-typedef FuchsiaConfig Config;
-#elif SCUDO_TRUSTY
-typedef TrustyConfig Config;
-#else
-typedef DefaultConfig Config;
-#endif
-
-#endif // SCUDO_USE_CUSTOM_CONFIG
-
-} // namespace scudo
-
-#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/Telegram/ThirdParty/scudo/atomic_helpers.h b/Telegram/ThirdParty/scudo/atomic_helpers.h
deleted file mode 100644
index a68ffd162..000000000
--- a/Telegram/ThirdParty/scudo/atomic_helpers.h
+++ /dev/null
@@ -1,145 +0,0 @@
-//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_ATOMIC_H_
-#define SCUDO_ATOMIC_H_
-
-#include "internal_defs.h"
-
-namespace scudo {
-
-enum memory_order {
-  memory_order_relaxed = 0,
-  memory_order_consume = 1,
-  memory_order_acquire = 2,
-  memory_order_release = 3,
-  memory_order_acq_rel = 4,
-  memory_order_seq_cst = 5
-};
-static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
-static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
-static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
-static_assert(memory_order_release == __ATOMIC_RELEASE, "");
-static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
-static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
-
-struct atomic_u8 {
-  typedef u8 Type;
-  volatile Type ValDoNotUse;
-};
-
-struct atomic_u16 {
-  typedef u16 Type;
-  volatile Type ValDoNotUse;
-};
-
-struct atomic_s32 {
-  typedef s32 Type;
-  volatile Type ValDoNotUse;
-};
-
-struct atomic_u32 {
-  typedef u32 Type;
-  volatile Type ValDoNotUse;
-};
-
-struct atomic_u64 {
-  typedef u64 Type;
-  // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
-  alignas(8) volatile Type ValDoNotUse;
-};
-
-struct atomic_uptr {
-  typedef uptr Type;
-  volatile Type ValDoNotUse;
-};
-
-template <typename T>
-inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
-  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
-  typename T::Type V;
-  __atomic_load(&A->ValDoNotUse, &V, MO);
-  return V;
-}
-
-template <typename T>
-inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
-  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
-  __atomic_store(&A->ValDoNotUse, &V, MO);
-}
-
-inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
-
-template <typename T>
-inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
-                                         memory_order MO) {
-  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
-  return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
-                                         memory_order MO) {
-  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
-  return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
-                                         memory_order MO) {
-  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
-  return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
-                                        memory_order MO) {
-  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
-  return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
-                                        memory_order MO) {
-  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
-  typename T::Type R;
-  __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
-  return R;
-}
-
-template <typename T>
-inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
-                                           typename T::Type Xchg,
-                                           memory_order MO) {
-  return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
-                                   __ATOMIC_RELAXED);
-}
-
-// Clutter-reducing helpers.
-
-template <typename T>
-inline typename T::Type atomic_load_relaxed(const volatile T *A) {
-  return atomic_load(A, memory_order_relaxed);
-}
-
-template <typename T>
-inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
-  atomic_store(A, V, memory_order_relaxed);
-}
-
-template <typename T>
-inline typename T::Type
-atomic_compare_exchange_strong(volatile T *A, typename T::Type Cmp,
-                               typename T::Type Xchg, memory_order MO) {
-  atomic_compare_exchange_strong(A, &Cmp, Xchg, MO);
-  return Cmp;
-}
-
-} // namespace scudo
-
-#endif // SCUDO_ATOMIC_H_
diff --git a/Telegram/ThirdParty/scudo/benchmarks/CMakeLists.txt b/Telegram/ThirdParty/scudo/benchmarks/CMakeLists.txt
deleted file mode 100644
index 26d023c79..000000000
--- a/Telegram/ThirdParty/scudo/benchmarks/CMakeLists.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-# To build these benchmarks, build the target "ScudoBenchmarks.$ARCH", where
-# $ARCH is the name of the target architecture. For example,
-# ScudoBenchmarks.x86_64 for 64-bit x86. The benchmark executable is then
-# available under projects/compiler-rt/lib/scudo/standalone/benchmarks/ in the
-# build directory.
-
-include(AddLLVM)
-
-set(SCUDO_BENCHMARK_CFLAGS -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone)
-if(ANDROID)
-  list(APPEND SCUDO_BENCHMARK_CFLAGS -fno-emulated-tls)
-endif()
-string(REPLACE ";" " " SCUDO_BENCHMARK_CFLAGS " ${SCUDO_BENCHMARK_CFLAGS}")
-
-foreach(arch ${SCUDO_STANDALONE_SUPPORTED_ARCH})
-  add_benchmark(ScudoBenchmarks.${arch}
-                malloc_benchmark.cpp
-                $<TARGET_OBJECTS:RTScudoStandalone.${arch}>)
-  set_property(TARGET ScudoBenchmarks.${arch} APPEND_STRING PROPERTY
-               COMPILE_FLAGS "${SCUDO_BENCHMARK_CFLAGS}")
-
-  if (COMPILER_RT_HAS_GWP_ASAN)
-    add_benchmark(
-      ScudoBenchmarksWithGwpAsan.${arch} malloc_benchmark.cpp
-      $<TARGET_OBJECTS:RTScudoStandalone.${arch}>
-      $<TARGET_OBJECTS:RTGwpAsan.${arch}>
-      $<TARGET_OBJECTS:RTGwpAsanBacktraceLibc.${arch}>
-      $<TARGET_OBJECTS:RTGwpAsanSegvHandler.${arch}>)
-    set_property(
-      TARGET ScudoBenchmarksWithGwpAsan.${arch} APPEND_STRING PROPERTY
-      COMPILE_FLAGS "${SCUDO_BENCHMARK_CFLAGS} -DGWP_ASAN_HOOKS")
-  endif()
-endforeach()
diff --git a/Telegram/ThirdParty/scudo/benchmarks/malloc_benchmark.cpp b/Telegram/ThirdParty/scudo/benchmarks/malloc_benchmark.cpp
deleted file mode 100644
index 4fb05b761..000000000
--- a/Telegram/ThirdParty/scudo/benchmarks/malloc_benchmark.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-//===-- malloc_benchmark.cpp ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "allocator_config.h"
-#include "combined.h"
-#include "common.h"
-
-#include "benchmark/benchmark.h"
-
-#include <memory>
-#include <vector>
-
-void *CurrentAllocator;
-template <typename Config> void PostInitCallback() {
-  reinterpret_cast<scudo::Allocator<Config> *>(CurrentAllocator)->initGwpAsan();
-}
-
-template <typename Config> static void BM_malloc_free(benchmark::State &State) {
-  using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
-  auto Deleter = [](AllocatorT *A) {
-    A->unmapTestOnly();
-    delete A;
-  };
-  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
-                                                           Deleter);
-  CurrentAllocator = Allocator.get();
-
-  const size_t NBytes = State.range(0);
-  size_t PageSize = scudo::getPageSizeCached();
-
-  for (auto _ : State) {
-    void *Ptr = Allocator->allocate(NBytes, scudo::Chunk::Origin::Malloc);
-    auto *Data = reinterpret_cast<uint8_t *>(Ptr);
-    for (size_t I = 0; I < NBytes; I += PageSize)
-      Data[I] = 1;
-    benchmark::DoNotOptimize(Ptr);
-    Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
-  }
-
-  State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NBytes));
-}
-
-static const size_t MinSize = 8;
-static const size_t MaxSize = 128 * 1024;
-
-// FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
-// cleanly.
-BENCHMARK_TEMPLATE(BM_malloc_free, scudo::AndroidConfig)
-    ->Range(MinSize, MaxSize);
-#if SCUDO_CAN_USE_PRIMARY64
-BENCHMARK_TEMPLATE(BM_malloc_free, scudo::FuchsiaConfig)
-    ->Range(MinSize, MaxSize);
-#endif
-
-template <typename Config>
-static void BM_malloc_free_loop(benchmark::State &State) {
-  using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
-  auto Deleter = [](AllocatorT *A) {
-    A->unmapTestOnly();
-    delete A;
-  };
-  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
-                                                           Deleter);
-  CurrentAllocator = Allocator.get();
-
-  const size_t NumIters = State.range(0);
-  size_t PageSize = scudo::getPageSizeCached();
-  std::vector<void *> Ptrs(NumIters);
-
-  for (auto _ : State) {
-    size_t SizeLog2 = 0;
-    for (void *&Ptr : Ptrs) {
-      Ptr = Allocator->allocate(1 << SizeLog2, scudo::Chunk::Origin::Malloc);
-      auto *Data = reinterpret_cast<uint8_t *>(Ptr);
-      for (size_t I = 0; I < 1 << SizeLog2; I += PageSize)
-        Data[I] = 1;
-      benchmark::DoNotOptimize(Ptr);
-      SizeLog2 = (SizeLog2 + 1) % 16;
-    }
-    for (void *&Ptr : Ptrs)
-      Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
-  }
-
-  State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NumIters) *
-                          8192);
-}
-
-static const size_t MinIters = 8;
-static const size_t MaxIters = 32 * 1024;
-
-// FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
-// cleanly.
-BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::AndroidConfig)
-    ->Range(MinIters, MaxIters);
-#if SCUDO_CAN_USE_PRIMARY64
-BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::FuchsiaConfig)
-    ->Range(MinIters, MaxIters);
-#endif
-
-BENCHMARK_MAIN();
diff --git a/Telegram/ThirdParty/scudo/bytemap.h b/Telegram/ThirdParty/scudo/bytemap.h
deleted file mode 100644
index 248e096d0..000000000
--- a/Telegram/ThirdParty/scudo/bytemap.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- bytemap.h -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_BYTEMAP_H_
-#define SCUDO_BYTEMAP_H_
-
-#include "atomic_helpers.h"
-#include "common.h"
-#include "mutex.h"
-
-namespace scudo {
-
-template <uptr Size> class FlatByteMap {
-public:
-  void init() { DCHECK(Size == 0 || Map[0] == 0); }
-
-  void unmapTestOnly() { memset(Map, 0, Size); }
-
-  void set(uptr Index, u8 Value) {
-    DCHECK_LT(Index, Size);
-    DCHECK_EQ(0U, Map[Index]);
-    Map[Index] = Value;
-  }
-  u8 operator[](uptr Index) {
-    DCHECK_LT(Index, Size);
-    return Map[Index];
-  }
-
-  void disable() {}
-  void enable() {}
-
-private:
-  u8 Map[Size] = {};
-};
-
-} // namespace scudo
-
-#endif // SCUDO_BYTEMAP_H_
diff --git a/Telegram/ThirdParty/scudo/checksum.cpp b/Telegram/ThirdParty/scudo/checksum.cpp
deleted file mode 100644
index 2c277391a..000000000
--- a/Telegram/ThirdParty/scudo/checksum.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-//===-- checksum.cpp --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "checksum.h"
-#include "atomic_helpers.h"
-#include "chunk.h"
-
-#if defined(__x86_64__) || defined(__i386__)
-#include <cpuid.h>
-#elif defined(__arm__) || defined(__aarch64__)
-#if SCUDO_FUCHSIA
-#include <zircon/features.h>
-#include <zircon/syscalls.h>
-#else
-#include <sys/auxv.h>
-#endif
-#endif
-
-namespace scudo {
-
-Checksum HashAlgorithm = {Checksum::BSD};
-
-#if defined(__x86_64__) || defined(__i386__)
-// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
-// CRC32 requires the SSE 4.2 instruction set.
-#ifndef bit_SSE4_2
-#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
-#endif
-
-#ifndef signature_HYGON_ebx // They are not defined in gcc.
-// HYGON: "HygonGenuine".
-#define signature_HYGON_ebx 0x6f677948
-#define signature_HYGON_edx 0x6e65476e
-#define signature_HYGON_ecx 0x656e6975
-#endif
-
-bool hasHardwareCRC32() {
-  u32 Eax, Ebx = 0, Ecx = 0, Edx = 0;
-  __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
-  const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
-                       (Edx == signature_INTEL_edx) &&
-                       (Ecx == signature_INTEL_ecx);
-  const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) &&
-                     (Ecx == signature_AMD_ecx);
-  const bool IsHygon = (Ebx == signature_HYGON_ebx) &&
-                       (Edx == signature_HYGON_edx) &&
-                       (Ecx == signature_HYGON_ecx);
-  if (!IsIntel && !IsAMD && !IsHygon)
-    return false;
-  __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
-  return !!(Ecx & bit_SSE4_2);
-}
-#elif defined(__arm__) || defined(__aarch64__)
-#ifndef AT_HWCAP
-#define AT_HWCAP 16
-#endif
-#ifndef HWCAP_CRC32
-#define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms.
-#endif
-
-bool hasHardwareCRC32() {
-#if SCUDO_FUCHSIA
-  u32 HWCap;
-  const zx_status_t Status =
-      zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
-  if (Status != ZX_OK)
-    return false;
-  return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32);
-#else
-  return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
-#endif // SCUDO_FUCHSIA
-}
-#else
-// No hardware CRC32 implemented in Scudo for other architectures.
-bool hasHardwareCRC32() { return false; }
-#endif // defined(__x86_64__) || defined(__i386__)
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/checksum.h b/Telegram/ThirdParty/scudo/checksum.h
deleted file mode 100644
index f8eda81fd..000000000
--- a/Telegram/ThirdParty/scudo/checksum.h
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- checksum.h ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CHECKSUM_H_
-#define SCUDO_CHECKSUM_H_
-
-#include "internal_defs.h"
-
-// Hardware CRC32 is supported at compilation via the following:
-// - for i386 & x86_64: -mcrc32 (earlier: -msse4.2)
-// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
-// An additional check must be performed at runtime as well to make sure the
-// emitted instructions are valid on the target host.
-
-#if defined(__CRC32__)
-// NB: clang has <crc32intrin.h> but GCC does not
-#include <smmintrin.h>
-#define CRC32_INTRINSIC                                                        \
-  FIRST_32_SECOND_64(__builtin_ia32_crc32si, __builtin_ia32_crc32di)
-#elif defined(__SSE4_2__)
-#include <smmintrin.h>
-#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
-#endif
-#ifdef __ARM_FEATURE_CRC32
-#include <arm_acle.h>
-#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
-#endif
-
-namespace scudo {
-
-enum class Checksum : u8 {
-  BSD = 0,
-  HardwareCRC32 = 1,
-};
-
-// BSD checksum, unlike a software CRC32, doesn't use any array lookup. We save
-// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
-// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
-// odds with CRC32, but enough for our needs.
-inline u16 computeBSDChecksum(u16 Sum, uptr Data) {
-  for (u8 I = 0; I < sizeof(Data); I++) {
-    Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
-    Sum = static_cast<u16>(Sum + (Data & 0xff));
-    Data >>= 8;
-  }
-  return Sum;
-}
-
-bool hasHardwareCRC32();
-WEAK u32 computeHardwareCRC32(u32 Crc, uptr Data);
-
-} // namespace scudo
-
-#endif // SCUDO_CHECKSUM_H_
diff --git a/Telegram/ThirdParty/scudo/chunk.h b/Telegram/ThirdParty/scudo/chunk.h
deleted file mode 100644
index 9228df047..000000000
--- a/Telegram/ThirdParty/scudo/chunk.h
+++ /dev/null
@@ -1,143 +0,0 @@
-//===-- chunk.h -------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CHUNK_H_
-#define SCUDO_CHUNK_H_
-
-#include "platform.h"
-
-#include "atomic_helpers.h"
-#include "checksum.h"
-#include "common.h"
-#include "report.h"
-
-namespace scudo {
-
-extern Checksum HashAlgorithm;
-
-inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
-  // If the hardware CRC32 feature is defined here, it was enabled everywhere,
-  // as opposed to only for crc32_hw.cpp. This means that other hardware
-  // specific instructions were likely emitted at other places, and as a result
-  // there is no reason to not use it here.
-#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-  u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
-  for (uptr I = 0; I < ArraySize; I++)
-    Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
-  return static_cast<u16>(Crc ^ (Crc >> 16));
-#else
-  if (HashAlgorithm == Checksum::HardwareCRC32) {
-    u32 Crc = computeHardwareCRC32(Seed, Value);
-    for (uptr I = 0; I < ArraySize; I++)
-      Crc = computeHardwareCRC32(Crc, Array[I]);
-    return static_cast<u16>(Crc ^ (Crc >> 16));
-  } else {
-    u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed), Value);
-    for (uptr I = 0; I < ArraySize; I++)
-      Checksum = computeBSDChecksum(Checksum, Array[I]);
-    return Checksum;
-  }
-#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
-       // defined(__ARM_FEATURE_CRC32)
-}
-
-namespace Chunk {
-
-// Note that in an ideal world, `State` and `Origin` should be `enum class`, and
-// the associated `UnpackedHeader` fields of their respective enum class type
-// but https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 prevents it from
-// happening, as it will error, complaining the number of bits is not enough.
-enum Origin : u8 {
-  Malloc = 0,
-  New = 1,
-  NewArray = 2,
-  Memalign = 3,
-};
-
-enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
-
-typedef u64 PackedHeader;
-// Update the 'Mask' constants to reflect changes in this structure.
-struct UnpackedHeader {
-  uptr ClassId : 8;
-  u8 State : 2;
-  // Origin if State == Allocated, or WasZeroed otherwise.
-  u8 OriginOrWasZeroed : 2;
-  uptr SizeOrUnusedBytes : 20;
-  uptr Offset : 16;
-  uptr Checksum : 16;
-};
-typedef atomic_u64 AtomicPackedHeader;
-static_assert(sizeof(UnpackedHeader) == sizeof(PackedHeader), "");
-
-// Those constants are required to silence some -Werror=conversion errors when
-// assigning values to the related bitfield variables.
-constexpr uptr ClassIdMask = (1UL << 8) - 1;
-constexpr u8 StateMask = (1U << 2) - 1;
-constexpr u8 OriginMask = (1U << 2) - 1;
-constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
-constexpr uptr OffsetMask = (1UL << 16) - 1;
-constexpr uptr ChecksumMask = (1UL << 16) - 1;
-
-constexpr uptr getHeaderSize() {
-  return roundUp(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
-}
-
-inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
-  return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
-                                                getHeaderSize());
-}
-
-inline const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
-  return reinterpret_cast<const AtomicPackedHeader *>(
-      reinterpret_cast<uptr>(Ptr) - getHeaderSize());
-}
-
-// We do not need a cryptographically strong hash for the checksum, but a CRC
-// type function that can alert us in the event a header is invalid or
-// corrupted. Ideally slightly better than a simple xor of all fields.
-static inline u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
-                                        UnpackedHeader *Header) {
-  UnpackedHeader ZeroChecksumHeader = *Header;
-  ZeroChecksumHeader.Checksum = 0;
-  uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
-  memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
-  return computeChecksum(Cookie, reinterpret_cast<uptr>(Ptr), HeaderHolder,
-                         ARRAY_SIZE(HeaderHolder));
-}
-
-inline void storeHeader(u32 Cookie, void *Ptr,
-                        UnpackedHeader *NewUnpackedHeader) {
-  NewUnpackedHeader->Checksum =
-      computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
-  PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
-  atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
-}
-
-inline void loadHeader(u32 Cookie, const void *Ptr,
-                       UnpackedHeader *NewUnpackedHeader) {
-  PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
-  *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
-  if (UNLIKELY(NewUnpackedHeader->Checksum !=
-               computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader)))
-    reportHeaderCorruption(const_cast<void *>(Ptr));
-}
-
-inline bool isValid(u32 Cookie, const void *Ptr,
-                    UnpackedHeader *NewUnpackedHeader) {
-  PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
-  *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
-  return NewUnpackedHeader->Checksum ==
-         computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
-}
-
-} // namespace Chunk
-
-} // namespace scudo
-
-#endif // SCUDO_CHUNK_H_
diff --git a/Telegram/ThirdParty/scudo/combined.h b/Telegram/ThirdParty/scudo/combined.h
deleted file mode 100644
index 4624f83d1..000000000
--- a/Telegram/ThirdParty/scudo/combined.h
+++ /dev/null
@@ -1,1538 +0,0 @@
-//===-- combined.h ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_COMBINED_H_
-#define SCUDO_COMBINED_H_
-
-#include "chunk.h"
-#include "common.h"
-#include "flags.h"
-#include "flags_parser.h"
-#include "local_cache.h"
-#include "mem_map.h"
-#include "memtag.h"
-#include "options.h"
-#include "quarantine.h"
-#include "report.h"
-#include "secondary.h"
-#include "stack_depot.h"
-#include "string_utils.h"
-#include "tsd.h"
-
-#include "scudo/interface.h"
-
-#ifdef GWP_ASAN_HOOKS
-#include "gwp_asan/guarded_pool_allocator.h"
-#include "gwp_asan/optional/backtrace.h"
-#include "gwp_asan/optional/segv_handler.h"
-#endif // GWP_ASAN_HOOKS
-
-extern "C" inline void EmptyCallback() {}
-
-#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
-// This function is not part of the NDK so it does not appear in any public
-// header files. We only declare/use it when targeting the platform.
-extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
-                                                     size_t num_entries);
-#endif
-
-namespace scudo {
-
-template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
-class Allocator {
-public:
-  using PrimaryT = typename Config::template PrimaryT<Config>;
-  using SecondaryT = typename Config::template SecondaryT<Config>;
-  using CacheT = typename PrimaryT::CacheT;
-  typedef Allocator<Config, PostInitCallback> ThisT;
-  typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
-
-  void callPostInitCallback() {
-    pthread_once(&PostInitNonce, PostInitCallback);
-  }
-
-  struct QuarantineCallback {
-    explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
-        : Allocator(Instance), Cache(LocalCache) {}
-
-    // Chunk recycling function, returns a quarantined chunk to the backend,
-    // first making sure it hasn't been tampered with.
-    void recycle(void *Ptr) {
-      Chunk::UnpackedHeader Header;
-      Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
-      if (UNLIKELY(Header.State != Chunk::State::Quarantined))
-        reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
-
-      Header.State = Chunk::State::Available;
-      Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
-
-      if (allocatorSupportsMemoryTagging<Config>())
-        Ptr = untagPointer(Ptr);
-      void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
-      Cache.deallocate(Header.ClassId, BlockBegin);
-    }
-
-    // We take a shortcut when allocating a quarantine batch by working with the
-    // appropriate class ID instead of using Size. The compiler should optimize
-    // the class ID computation and work with the associated cache directly.
-    void *allocate(UNUSED uptr Size) {
-      const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
-          sizeof(QuarantineBatch) + Chunk::getHeaderSize());
-      void *Ptr = Cache.allocate(QuarantineClassId);
-      // Quarantine batch allocation failure is fatal.
-      if (UNLIKELY(!Ptr))
-        reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
-
-      Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
-                                     Chunk::getHeaderSize());
-      Chunk::UnpackedHeader Header = {};
-      Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
-      Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
-      Header.State = Chunk::State::Allocated;
-      Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
-
-      // Reset tag to 0 as this chunk may have been previously used for a tagged
-      // user allocation.
-      if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
-        storeTags(reinterpret_cast<uptr>(Ptr),
-                  reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
-
-      return Ptr;
-    }
-
-    void deallocate(void *Ptr) {
-      const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
-          sizeof(QuarantineBatch) + Chunk::getHeaderSize());
-      Chunk::UnpackedHeader Header;
-      Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
-
-      if (UNLIKELY(Header.State != Chunk::State::Allocated))
-        reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
-      DCHECK_EQ(Header.ClassId, QuarantineClassId);
-      DCHECK_EQ(Header.Offset, 0);
-      DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
-
-      Header.State = Chunk::State::Available;
-      Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
-      Cache.deallocate(QuarantineClassId,
-                       reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
-                                                Chunk::getHeaderSize()));
-    }
-
-  private:
-    ThisT &Allocator;
-    CacheT &Cache;
-  };
-
-  typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
-  typedef typename QuarantineT::CacheT QuarantineCacheT;
-
-  void init() {
-    performSanityChecks();
-
-    // Check if hardware CRC32 is supported in the binary and by the platform,
-    // if so, opt for the CRC32 hardware version of the checksum.
-    if (&computeHardwareCRC32 && hasHardwareCRC32())
-      HashAlgorithm = Checksum::HardwareCRC32;
-
-    if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
-      Cookie = static_cast<u32>(getMonotonicTime() ^
-                                (reinterpret_cast<uptr>(this) >> 4));
-
-    initFlags();
-    reportUnrecognizedFlags();
-
-    // Store some flags locally.
-    if (getFlags()->may_return_null)
-      Primary.Options.set(OptionBit::MayReturnNull);
-    if (getFlags()->zero_contents)
-      Primary.Options.setFillContentsMode(ZeroFill);
-    else if (getFlags()->pattern_fill_contents)
-      Primary.Options.setFillContentsMode(PatternOrZeroFill);
-    if (getFlags()->dealloc_type_mismatch)
-      Primary.Options.set(OptionBit::DeallocTypeMismatch);
-    if (getFlags()->delete_size_mismatch)
-      Primary.Options.set(OptionBit::DeleteSizeMismatch);
-    if (allocatorSupportsMemoryTagging<Config>() &&
-        systemSupportsMemoryTagging())
-      Primary.Options.set(OptionBit::UseMemoryTagging);
-
-    QuarantineMaxChunkSize =
-        static_cast<u32>(getFlags()->quarantine_max_chunk_size);
-
-    Stats.init();
-    const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
-    Primary.init(ReleaseToOsIntervalMs);
-    Secondary.init(&Stats, ReleaseToOsIntervalMs);
-    Quarantine.init(
-        static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
-        static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
-
-    mapAndInitializeRingBuffer();
-  }
-
-  // Initialize the embedded GWP-ASan instance. Requires the main allocator to
-  // be functional, best called from PostInitCallback.
-  void initGwpAsan() {
-#ifdef GWP_ASAN_HOOKS
-    gwp_asan::options::Options Opt;
-    Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
-    Opt.MaxSimultaneousAllocations =
-        getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
-    Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
-    Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
-    Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
-    // Embedded GWP-ASan is locked through the Scudo atfork handler (via
-    // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
-    // handler.
-    Opt.InstallForkHandlers = false;
-    Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
-    GuardedAlloc.init(Opt);
-
-    if (Opt.InstallSignalHandlers)
-      gwp_asan::segv_handler::installSignalHandlers(
-          &GuardedAlloc, Printf,
-          gwp_asan::backtrace::getPrintBacktraceFunction(),
-          gwp_asan::backtrace::getSegvBacktraceFunction(),
-          Opt.Recoverable);
-
-    GuardedAllocSlotSize =
-        GuardedAlloc.getAllocatorState()->maximumAllocationSize();
-    Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
-                            GuardedAllocSlotSize);
-#endif // GWP_ASAN_HOOKS
-  }
-
-#ifdef GWP_ASAN_HOOKS
-  const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
-    return GuardedAlloc.getMetadataRegion();
-  }
-
-  const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
-    return GuardedAlloc.getAllocatorState();
-  }
-#endif // GWP_ASAN_HOOKS
-
-  ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
-    TSDRegistry.initThreadMaybe(this, MinimalInit);
-  }
-
-  void unmapTestOnly() {
-    unmapRingBuffer();
-    TSDRegistry.unmapTestOnly(this);
-    Primary.unmapTestOnly();
-    Secondary.unmapTestOnly();
-#ifdef GWP_ASAN_HOOKS
-    if (getFlags()->GWP_ASAN_InstallSignalHandlers)
-      gwp_asan::segv_handler::uninstallSignalHandlers();
-    GuardedAlloc.uninitTestOnly();
-#endif // GWP_ASAN_HOOKS
-  }
-
-  TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
-  QuarantineT *getQuarantine() { return &Quarantine; }
-
-  // The Cache must be provided zero-initialized.
-  void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
-
-  // Release the resources used by a TSD, which involves:
-  // - draining the local quarantine cache to the global quarantine;
-  // - releasing the cached pointers back to the Primary;
-  // - unlinking the local stats from the global ones (destroying the cache does
-  //   the last two items).
-  void commitBack(TSD<ThisT> *TSD) {
-    TSD->assertLocked(/*BypassCheck=*/true);
-    Quarantine.drain(&TSD->getQuarantineCache(),
-                     QuarantineCallback(*this, TSD->getCache()));
-    TSD->getCache().destroy(&Stats);
-  }
-
-  void drainCache(TSD<ThisT> *TSD) {
-    TSD->assertLocked(/*BypassCheck=*/true);
-    Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
-                               QuarantineCallback(*this, TSD->getCache()));
-    TSD->getCache().drain();
-  }
-  void drainCaches() { TSDRegistry.drainCaches(this); }
-
-  ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
-    if (!allocatorSupportsMemoryTagging<Config>())
-      return Ptr;
-    auto UntaggedPtr = untagPointer(Ptr);
-    if (UntaggedPtr != Ptr)
-      return UntaggedPtr;
-    // Secondary, or pointer allocated while memory tagging is unsupported or
-    // disabled. The tag mismatch is okay in the latter case because tags will
-    // not be checked.
-    return addHeaderTag(Ptr);
-  }
-
-  ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
-    if (!allocatorSupportsMemoryTagging<Config>())
-      return Ptr;
-    return addFixedTag(Ptr, 2);
-  }
-
-  ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
-    return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
-  }
-
-  NOINLINE u32 collectStackTrace() {
-#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
-    // Discard collectStackTrace() frame and allocator function frame.
-    constexpr uptr DiscardFrames = 2;
-    uptr Stack[MaxTraceSize + DiscardFrames];
-    uptr Size =
-        android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
-    Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
-    return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
-#else
-    return 0;
-#endif
-  }
-
-  uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
-                                         uptr ClassId) {
-    if (!Options.get(OptionBit::UseOddEvenTags))
-      return 0;
-
-    // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
-    // even, and vice versa. Blocks are laid out Size bytes apart, and adding
-    // Size to Ptr will flip the least significant set bit of Size in Ptr, so
-    // that bit will have the pattern 010101... for consecutive blocks, which we
-    // can use to determine which tag mask to use.
-    return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
-  }
-
-  NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
-                          uptr Alignment = MinAlignment,
-                          bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
-    initThreadMaybe();
-
-    const Options Options = Primary.Options.load();
-    if (UNLIKELY(Alignment > MaxAlignment)) {
-      if (Options.get(OptionBit::MayReturnNull))
-        return nullptr;
-      reportAlignmentTooBig(Alignment, MaxAlignment);
-    }
-    if (Alignment < MinAlignment)
-      Alignment = MinAlignment;
-
-#ifdef GWP_ASAN_HOOKS
-    if (UNLIKELY(GuardedAlloc.shouldSample())) {
-      if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
-        Stats.lock();
-        Stats.add(StatAllocated, GuardedAllocSlotSize);
-        Stats.sub(StatFree, GuardedAllocSlotSize);
-        Stats.unlock();
-        return Ptr;
-      }
-    }
-#endif // GWP_ASAN_HOOKS
-
-    const FillContentsMode FillContents = ZeroContents ? ZeroFill
-                                          : TSDRegistry.getDisableMemInit()
-                                              ? NoFill
-                                              : Options.getFillContentsMode();
-
-    // If the requested size happens to be 0 (more common than you might think),
-    // allocate MinAlignment bytes on top of the header. Then add the extra
-    // bytes required to fulfill the alignment requirements: we allocate enough
-    // to be sure that there will be an address in the block that will satisfy
-    // the alignment.
-    const uptr NeededSize =
-        roundUp(Size, MinAlignment) +
-        ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
-
-    // Takes care of extravagantly large sizes as well as integer overflows.
-    static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
-    if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
-      if (Options.get(OptionBit::MayReturnNull))
-        return nullptr;
-      reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
-    }
-    DCHECK_LE(Size, NeededSize);
-
-    void *Block = nullptr;
-    uptr ClassId = 0;
-    uptr SecondaryBlockEnd = 0;
-    if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
-      ClassId = SizeClassMap::getClassIdBySize(NeededSize);
-      DCHECK_NE(ClassId, 0U);
-      bool UnlockRequired;
-      auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
-      TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-      Block = TSD->getCache().allocate(ClassId);
-      // If the allocation failed, retry in each successively larger class until
-      // it fits. If it fails to fit in the largest class, fallback to the
-      // Secondary.
-      if (UNLIKELY(!Block)) {
-        while (ClassId < SizeClassMap::LargestClassId && !Block)
-          Block = TSD->getCache().allocate(++ClassId);
-        if (!Block)
-          ClassId = 0;
-      }
-      if (UnlockRequired)
-        TSD->unlock();
-    }
-    if (UNLIKELY(ClassId == 0)) {
-      Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
-                                 FillContents);
-    }
-
-    if (UNLIKELY(!Block)) {
-      if (Options.get(OptionBit::MayReturnNull))
-        return nullptr;
-      printStats();
-      reportOutOfMemory(NeededSize);
-    }
-
-    const uptr BlockUptr = reinterpret_cast<uptr>(Block);
-    const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
-    const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
-
-    void *Ptr = reinterpret_cast<void *>(UserPtr);
-    void *TaggedPtr = Ptr;
-    if (LIKELY(ClassId)) {
-      // We only need to zero or tag the contents for Primary backed
-      // allocations. We only set tags for primary allocations in order to avoid
-      // faulting potentially large numbers of pages for large secondary
-      // allocations. We assume that guard pages are enough to protect these
-      // allocations.
-      //
-      // FIXME: When the kernel provides a way to set the background tag of a
-      // mapping, we should be able to tag secondary allocations as well.
-      //
-      // When memory tagging is enabled, zeroing the contents is done as part of
-      // setting the tag.
-      if (UNLIKELY(useMemoryTagging<Config>(Options))) {
-        uptr PrevUserPtr;
-        Chunk::UnpackedHeader Header;
-        const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
-        const uptr BlockEnd = BlockUptr + BlockSize;
-        // If possible, try to reuse the UAF tag that was set by deallocate().
-        // For simplicity, only reuse tags if we have the same start address as
-        // the previous allocation. This handles the majority of cases since
-        // most allocations will not be more aligned than the minimum alignment.
-        //
-        // We need to handle situations involving reclaimed chunks, and retag
-        // the reclaimed portions if necessary. In the case where the chunk is
-        // fully reclaimed, the chunk's header will be zero, which will trigger
-        // the code path for new mappings and invalid chunks that prepares the
-        // chunk from scratch. There are three possibilities for partial
-        // reclaiming:
-        //
-        // (1) Header was reclaimed, data was partially reclaimed.
-        // (2) Header was not reclaimed, all data was reclaimed (e.g. because
-        //     data started on a page boundary).
-        // (3) Header was not reclaimed, data was partially reclaimed.
-        //
-        // Case (1) will be handled in the same way as for full reclaiming,
-        // since the header will be zero.
-        //
-        // We can detect case (2) by loading the tag from the start
-        // of the chunk. If it is zero, it means that either all data was
-        // reclaimed (since we never use zero as the chunk tag), or that the
-        // previous allocation was of size zero. Either way, we need to prepare
-        // a new chunk from scratch.
-        //
-        // We can detect case (3) by moving to the next page (if covered by the
-        // chunk) and loading the tag of its first granule. If it is zero, it
-        // means that all following pages may need to be retagged. On the other
-        // hand, if it is nonzero, we can assume that all following pages are
-        // still tagged, according to the logic that if any of the pages
-        // following the next page were reclaimed, the next page would have been
-        // reclaimed as well.
-        uptr TaggedUserPtr;
-        if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
-            PrevUserPtr == UserPtr &&
-            (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
-          uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
-          const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
-          if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
-            PrevEnd = NextPage;
-          TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
-          resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
-          if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
-            // If an allocation needs to be zeroed (i.e. calloc) we can normally
-            // avoid zeroing the memory now since we can rely on memory having
-            // been zeroed on free, as this is normally done while setting the
-            // UAF tag. But if tagging was disabled per-thread when the memory
-            // was freed, it would not have been retagged and thus zeroed, and
-            // therefore it needs to be zeroed now.
-            memset(TaggedPtr, 0,
-                   Min(Size, roundUp(PrevEnd - TaggedUserPtr,
-                                     archMemoryTagGranuleSize())));
-          } else if (Size) {
-            // Clear any stack metadata that may have previously been stored in
-            // the chunk data.
-            memset(TaggedPtr, 0, archMemoryTagGranuleSize());
-          }
-        } else {
-          const uptr OddEvenMask =
-              computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
-          TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
-        }
-        storePrimaryAllocationStackMaybe(Options, Ptr);
-      } else {
-        Block = addHeaderTag(Block);
-        Ptr = addHeaderTag(Ptr);
-        if (UNLIKELY(FillContents != NoFill)) {
-          // This condition is not necessarily unlikely, but since memset is
-          // costly, we might as well mark it as such.
-          memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
-                 PrimaryT::getSizeByClassId(ClassId));
-        }
-      }
-    } else {
-      Block = addHeaderTag(Block);
-      Ptr = addHeaderTag(Ptr);
-      if (UNLIKELY(useMemoryTagging<Config>(Options))) {
-        storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
-        storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
-      }
-    }
-
-    Chunk::UnpackedHeader Header = {};
-    if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
-      const uptr Offset = UserPtr - UnalignedUserPtr;
-      DCHECK_GE(Offset, 2 * sizeof(u32));
-      // The BlockMarker has no security purpose, but is specifically meant for
-      // the chunk iteration function that can be used in debugging situations.
-      // It is the only situation where we have to locate the start of a chunk
-      // based on its block address.
-      reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
-      reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
-      Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
-    }
-    Header.ClassId = ClassId & Chunk::ClassIdMask;
-    Header.State = Chunk::State::Allocated;
-    Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
-    Header.SizeOrUnusedBytes =
-        (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
-        Chunk::SizeOrUnusedBytesMask;
-    Chunk::storeHeader(Cookie, Ptr, &Header);
-
-    return TaggedPtr;
-  }
-
-  NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
-                           UNUSED uptr Alignment = MinAlignment) {
-    if (UNLIKELY(!Ptr))
-      return;
-
-    // For a deallocation, we only ensure minimal initialization, meaning thread
-    // local data will be left uninitialized for now (when using ELF TLS). The
-    // fallback cache will be used instead. This is a workaround for a situation
-    // where the only heap operation performed in a thread would be a free past
-    // the TLS destructors, ending up in initialized thread specific data never
-    // being destroyed properly. Any other heap operation will do a full init.
-    initThreadMaybe(/*MinimalInit=*/true);
-
-#ifdef GWP_ASAN_HOOKS
-    if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
-      GuardedAlloc.deallocate(Ptr);
-      Stats.lock();
-      Stats.add(StatFree, GuardedAllocSlotSize);
-      Stats.sub(StatAllocated, GuardedAllocSlotSize);
-      Stats.unlock();
-      return;
-    }
-#endif // GWP_ASAN_HOOKS
-
-    if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
-      reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
-
-    void *TaggedPtr = Ptr;
-    Ptr = getHeaderTaggedPointer(Ptr);
-
-    Chunk::UnpackedHeader Header;
-    Chunk::loadHeader(Cookie, Ptr, &Header);
-
-    if (UNLIKELY(Header.State != Chunk::State::Allocated))
-      reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
-
-    const Options Options = Primary.Options.load();
-    if (Options.get(OptionBit::DeallocTypeMismatch)) {
-      if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
-        // With the exception of memalign'd chunks, that can be still be free'd.
-        if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
-            Origin != Chunk::Origin::Malloc)
-          reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
-                                    Header.OriginOrWasZeroed, Origin);
-      }
-    }
-
-    const uptr Size = getSize(Ptr, &Header);
-    if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
-      if (UNLIKELY(DeleteSize != Size))
-        reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
-    }
-
-    quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
-  }
-
-  void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
-    initThreadMaybe();
-
-    const Options Options = Primary.Options.load();
-    if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
-      if (Options.get(OptionBit::MayReturnNull))
-        return nullptr;
-      reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
-    }
-
-    // The following cases are handled by the C wrappers.
-    DCHECK_NE(OldPtr, nullptr);
-    DCHECK_NE(NewSize, 0);
-
-#ifdef GWP_ASAN_HOOKS
-    if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
-      uptr OldSize = GuardedAlloc.getSize(OldPtr);
-      void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
-      if (NewPtr)
-        memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
-      GuardedAlloc.deallocate(OldPtr);
-      Stats.lock();
-      Stats.add(StatFree, GuardedAllocSlotSize);
-      Stats.sub(StatAllocated, GuardedAllocSlotSize);
-      Stats.unlock();
-      return NewPtr;
-    }
-#endif // GWP_ASAN_HOOKS
-
-    void *OldTaggedPtr = OldPtr;
-    OldPtr = getHeaderTaggedPointer(OldPtr);
-
-    if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
-      reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
-
-    Chunk::UnpackedHeader Header;
-    Chunk::loadHeader(Cookie, OldPtr, &Header);
-
-    if (UNLIKELY(Header.State != Chunk::State::Allocated))
-      reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
-
-    // Pointer has to be allocated with a malloc-type function. Some
-    // applications think that it is OK to realloc a memalign'ed pointer, which
-    // will trigger this check. It really isn't.
-    if (Options.get(OptionBit::DeallocTypeMismatch)) {
-      if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
-        reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
-                                  Header.OriginOrWasZeroed,
-                                  Chunk::Origin::Malloc);
-    }
-
-    void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
-    uptr BlockEnd;
-    uptr OldSize;
-    const uptr ClassId = Header.ClassId;
-    if (LIKELY(ClassId)) {
-      BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
-                 SizeClassMap::getSizeByClassId(ClassId);
-      OldSize = Header.SizeOrUnusedBytes;
-    } else {
-      BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
-      OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
-                            Header.SizeOrUnusedBytes);
-    }
-    // If the new chunk still fits in the previously allocated block (with a
-    // reasonable delta), we just keep the old block, and update the chunk
-    // header to reflect the size change.
-    if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
-      if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
-        Header.SizeOrUnusedBytes =
-            (ClassId ? NewSize
-                     : BlockEnd -
-                           (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
-            Chunk::SizeOrUnusedBytesMask;
-        Chunk::storeHeader(Cookie, OldPtr, &Header);
-        if (UNLIKELY(useMemoryTagging<Config>(Options))) {
-          if (ClassId) {
-            resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
-                              reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
-                              NewSize, untagPointer(BlockEnd));
-            storePrimaryAllocationStackMaybe(Options, OldPtr);
-          } else {
-            storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
-          }
-        }
-        return OldTaggedPtr;
-      }
-    }
-
-    // Otherwise we allocate a new one, and deallocate the old one. Some
-    // allocators will allocate an even larger chunk (by a fixed factor) to
-    // allow for potential further in-place realloc. The gains of such a trick
-    // are currently unclear.
-    void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
-    if (LIKELY(NewPtr)) {
-      memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
-      quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
-    }
-    return NewPtr;
-  }
-
-  // TODO(kostyak): disable() is currently best-effort. There are some small
-  //                windows of time when an allocation could still succeed after
-  //                this function finishes. We will revisit that later.
-  void disable() NO_THREAD_SAFETY_ANALYSIS {
-    initThreadMaybe();
-#ifdef GWP_ASAN_HOOKS
-    GuardedAlloc.disable();
-#endif
-    TSDRegistry.disable();
-    Stats.disable();
-    Quarantine.disable();
-    Primary.disable();
-    Secondary.disable();
-  }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS {
-    initThreadMaybe();
-    Secondary.enable();
-    Primary.enable();
-    Quarantine.enable();
-    Stats.enable();
-    TSDRegistry.enable();
-#ifdef GWP_ASAN_HOOKS
-    GuardedAlloc.enable();
-#endif
-  }
-
-  // The function returns the amount of bytes required to store the statistics,
-  // which might be larger than the amount of bytes provided. Note that the
-  // statistics buffer is not necessarily constant between calls to this
-  // function. This can be called with a null buffer or zero size for buffer
-  // sizing purposes.
-  uptr getStats(char *Buffer, uptr Size) {
-    ScopedString Str;
-    const uptr Length = getStats(&Str) + 1;
-    if (Length < Size)
-      Size = Length;
-    if (Buffer && Size) {
-      memcpy(Buffer, Str.data(), Size);
-      Buffer[Size - 1] = '\0';
-    }
-    return Length;
-  }
-
-  void printStats() {
-    ScopedString Str;
-    getStats(&Str);
-    Str.output();
-  }
-
-  void printFragmentationInfo() {
-    ScopedString Str;
-    Primary.getFragmentationInfo(&Str);
-    // Secondary allocator dumps the fragmentation data in getStats().
-    Str.output();
-  }
-
-  void releaseToOS(ReleaseToOS ReleaseType) {
-    initThreadMaybe();
-    if (ReleaseType == ReleaseToOS::ForceAll)
-      drainCaches();
-    Primary.releaseToOS(ReleaseType);
-    Secondary.releaseToOS();
-  }
-
-  // Iterate over all chunks and call a callback for all busy chunks located
-  // within the provided memory range. Said callback must not use this allocator
-  // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
-  void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
-                         void *Arg) {
-    initThreadMaybe();
-    if (archSupportsMemoryTagging())
-      Base = untagPointer(Base);
-    const uptr From = Base;
-    const uptr To = Base + Size;
-    bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
-                                systemSupportsMemoryTagging();
-    auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
-                   Arg](uptr Block) {
-      if (Block < From || Block >= To)
-        return;
-      uptr Chunk;
-      Chunk::UnpackedHeader Header;
-      if (MayHaveTaggedPrimary) {
-        // A chunk header can either have a zero tag (tagged primary) or the
-        // header tag (secondary, or untagged primary). We don't know which so
-        // try both.
-        ScopedDisableMemoryTagChecks x;
-        if (!getChunkFromBlock(Block, &Chunk, &Header) &&
-            !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
-          return;
-      } else {
-        if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
-          return;
-      }
-      if (Header.State == Chunk::State::Allocated) {
-        uptr TaggedChunk = Chunk;
-        if (allocatorSupportsMemoryTagging<Config>())
-          TaggedChunk = untagPointer(TaggedChunk);
-        if (useMemoryTagging<Config>(Primary.Options.load()))
-          TaggedChunk = loadTag(Chunk);
-        Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
-                 Arg);
-      }
-    };
-    Primary.iterateOverBlocks(Lambda);
-    Secondary.iterateOverBlocks(Lambda);
-#ifdef GWP_ASAN_HOOKS
-    GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
-#endif
-  }
-
-  bool canReturnNull() {
-    initThreadMaybe();
-    return Primary.Options.load().get(OptionBit::MayReturnNull);
-  }
-
-  bool setOption(Option O, sptr Value) {
-    initThreadMaybe();
-    if (O == Option::MemtagTuning) {
-      // Enabling odd/even tags involves a tradeoff between use-after-free
-      // detection and buffer overflow detection. Odd/even tags make it more
-      // likely for buffer overflows to be detected by increasing the size of
-      // the guaranteed "red zone" around the allocation, but on the other hand
-      // use-after-free is less likely to be detected because the tag space for
-      // any particular chunk is cut in half. Therefore we use this tuning
-      // setting to control whether odd/even tags are enabled.
-      if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
-        Primary.Options.set(OptionBit::UseOddEvenTags);
-      else if (Value == M_MEMTAG_TUNING_UAF)
-        Primary.Options.clear(OptionBit::UseOddEvenTags);
-      return true;
-    } else {
-      // We leave it to the various sub-components to decide whether or not they
-      // want to handle the option, but we do not want to short-circuit
-      // execution if one of the setOption was to return false.
-      const bool PrimaryResult = Primary.setOption(O, Value);
-      const bool SecondaryResult = Secondary.setOption(O, Value);
-      const bool RegistryResult = TSDRegistry.setOption(O, Value);
-      return PrimaryResult && SecondaryResult && RegistryResult;
-    }
-    return false;
-  }
-
-  // Return the usable size for a given chunk. Technically we lie, as we just
-  // report the actual size of a chunk. This is done to counteract code actively
-  // writing past the end of a chunk (like sqlite3) when the usable size allows
-  // for it, which then forces realloc to copy the usable size of a chunk as
-  // opposed to its actual size.
-  uptr getUsableSize(const void *Ptr) {
-    if (UNLIKELY(!Ptr))
-      return 0;
-
-    return getAllocSize(Ptr);
-  }
-
-  uptr getAllocSize(const void *Ptr) {
-    initThreadMaybe();
-
-#ifdef GWP_ASAN_HOOKS
-    if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
-      return GuardedAlloc.getSize(Ptr);
-#endif // GWP_ASAN_HOOKS
-
-    Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
-    Chunk::UnpackedHeader Header;
-    Chunk::loadHeader(Cookie, Ptr, &Header);
-
-    // Getting the alloc size of a chunk only makes sense if it's allocated.
-    if (UNLIKELY(Header.State != Chunk::State::Allocated))
-      reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
-
-    return getSize(Ptr, &Header);
-  }
-
-  void getStats(StatCounters S) {
-    initThreadMaybe();
-    Stats.get(S);
-  }
-
-  // Returns true if the pointer provided was allocated by the current
-  // allocator instance, which is compliant with tcmalloc's ownership concept.
-  // A corrupted chunk will not be reported as owned, which is WAI.
-  bool isOwned(const void *Ptr) {
-    initThreadMaybe();
-#ifdef GWP_ASAN_HOOKS
-    if (GuardedAlloc.pointerIsMine(Ptr))
-      return true;
-#endif // GWP_ASAN_HOOKS
-    if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
-      return false;
-    Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
-    Chunk::UnpackedHeader Header;
-    return Chunk::isValid(Cookie, Ptr, &Header) &&
-           Header.State == Chunk::State::Allocated;
-  }
-
-  bool useMemoryTaggingTestOnly() const {
-    return useMemoryTagging<Config>(Primary.Options.load());
-  }
-  void disableMemoryTagging() {
-    // If we haven't been initialized yet, we need to initialize now in order to
-    // prevent a future call to initThreadMaybe() from enabling memory tagging
-    // based on feature detection. But don't call initThreadMaybe() because it
-    // may end up calling the allocator (via pthread_atfork, via the post-init
-    // callback), which may cause mappings to be created with memory tagging
-    // enabled.
-    TSDRegistry.initOnceMaybe(this);
-    if (allocatorSupportsMemoryTagging<Config>()) {
-      Secondary.disableMemoryTagging();
-      Primary.Options.clear(OptionBit::UseMemoryTagging);
-    }
-  }
-
-  void setTrackAllocationStacks(bool Track) {
-    initThreadMaybe();
-    if (getFlags()->allocation_ring_buffer_size <= 0) {
-      DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
-      return;
-    }
-    if (Track)
-      Primary.Options.set(OptionBit::TrackAllocationStacks);
-    else
-      Primary.Options.clear(OptionBit::TrackAllocationStacks);
-  }
-
-  void setFillContents(FillContentsMode FillContents) {
-    initThreadMaybe();
-    Primary.Options.setFillContentsMode(FillContents);
-  }
-
-  void setAddLargeAllocationSlack(bool AddSlack) {
-    initThreadMaybe();
-    if (AddSlack)
-      Primary.Options.set(OptionBit::AddLargeAllocationSlack);
-    else
-      Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
-  }
-
-  const char *getStackDepotAddress() const {
-    return reinterpret_cast<const char *>(&Depot);
-  }
-
-  const char *getRegionInfoArrayAddress() const {
-    return Primary.getRegionInfoArrayAddress();
-  }
-
-  static uptr getRegionInfoArraySize() {
-    return PrimaryT::getRegionInfoArraySize();
-  }
-
-  const char *getRingBufferAddress() {
-    initThreadMaybe();
-    return RawRingBuffer;
-  }
-
-  uptr getRingBufferSize() {
-    initThreadMaybe();
-    return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
-  }
-
-  static const uptr MaxTraceSize = 64;
-
-  static void collectTraceMaybe(const StackDepot *Depot,
-                                uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
-    uptr RingPos, Size;
-    if (!Depot->find(Hash, &RingPos, &Size))
-      return;
-    for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
-      Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
-  }
-
-  static void getErrorInfo(struct scudo_error_info *ErrorInfo,
-                           uintptr_t FaultAddr, const char *DepotPtr,
-                           const char *RegionInfoPtr, const char *RingBufferPtr,
-                           size_t RingBufferSize, const char *Memory,
-                           const char *MemoryTags, uintptr_t MemoryAddr,
-                           size_t MemorySize) {
-    *ErrorInfo = {};
-    if (!allocatorSupportsMemoryTagging<Config>() ||
-        MemoryAddr + MemorySize < MemoryAddr)
-      return;
-
-    auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
-    size_t NextErrorReport = 0;
-
-    // Check for OOB in the current block and the two surrounding blocks. Beyond
-    // that, UAF is more likely.
-    if (extractTag(FaultAddr) != 0)
-      getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
-                         RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
-                         MemorySize, 0, 2);
-
-    // Check the ring buffer. For primary allocations this will only find UAF;
-    // for secondary allocations we can find either UAF or OOB.
-    getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
-                           RingBufferPtr, RingBufferSize);
-
-    // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
-    // Beyond that we are likely to hit false positives.
-    if (extractTag(FaultAddr) != 0)
-      getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
-                         RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
-                         MemorySize, 2, 16);
-  }
-
-private:
-  typedef typename PrimaryT::SizeClassMap SizeClassMap;
-
-  static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
-  static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
-  static const uptr MinAlignment = 1UL << MinAlignmentLog;
-  static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
-  static const uptr MaxAllowedMallocSize =
-      FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
-
-  static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
-                "Minimal alignment must at least cover a chunk header.");
-  static_assert(!allocatorSupportsMemoryTagging<Config>() ||
-                    MinAlignment >= archMemoryTagGranuleSize(),
-                "");
-
-  static const u32 BlockMarker = 0x44554353U;
-
-  // These are indexes into an "array" of 32-bit values that store information
-  // inline with a chunk that is relevant to diagnosing memory tag faults, where
-  // 0 corresponds to the address of the user memory. This means that only
-  // negative indexes may be used. The smallest index that may be used is -2,
-  // which corresponds to 8 bytes before the user memory, because the chunk
-  // header size is 8 bytes and in allocators that support memory tagging the
-  // minimum alignment is at least the tag granule size (16 on aarch64).
-  static const sptr MemTagAllocationTraceIndex = -2;
-  static const sptr MemTagAllocationTidIndex = -1;
-
-  u32 Cookie = 0;
-  u32 QuarantineMaxChunkSize = 0;
-
-  GlobalStats Stats;
-  PrimaryT Primary;
-  SecondaryT Secondary;
-  QuarantineT Quarantine;
-  TSDRegistryT TSDRegistry;
-  pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
-
-#ifdef GWP_ASAN_HOOKS
-  gwp_asan::GuardedPoolAllocator GuardedAlloc;
-  uptr GuardedAllocSlotSize = 0;
-#endif // GWP_ASAN_HOOKS
-
-  StackDepot Depot;
-
-  struct AllocationRingBuffer {
-    struct Entry {
-      atomic_uptr Ptr;
-      atomic_uptr AllocationSize;
-      atomic_u32 AllocationTrace;
-      atomic_u32 AllocationTid;
-      atomic_u32 DeallocationTrace;
-      atomic_u32 DeallocationTid;
-    };
-
-    atomic_uptr Pos;
-    // An array of Size (at least one) elements of type Entry is immediately
-    // following to this struct.
-  };
-  // Pointer to memory mapped area starting with AllocationRingBuffer struct,
-  // and immediately followed by Size elements of type Entry.
-  char *RawRingBuffer = {};
-  u32 RingBufferElements = 0;
-  MemMapT RawRingBufferMap;
-
-  // The following might get optimized out by the compiler.
-  NOINLINE void performSanityChecks() {
-    // Verify that the header offset field can hold the maximum offset. In the
-    // case of the Secondary allocator, it takes care of alignment and the
-    // offset will always be small. In the case of the Primary, the worst case
-    // scenario happens in the last size class, when the backend allocation
-    // would already be aligned on the requested alignment, which would happen
-    // to be the maximum alignment that would fit in that size class. As a
-    // result, the maximum offset will be at most the maximum alignment for the
-    // last size class minus the header size, in multiples of MinAlignment.
-    Chunk::UnpackedHeader Header = {};
-    const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
-                                         SizeClassMap::MaxSize - MinAlignment);
-    const uptr MaxOffset =
-        (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
-    Header.Offset = MaxOffset & Chunk::OffsetMask;
-    if (UNLIKELY(Header.Offset != MaxOffset))
-      reportSanityCheckError("offset");
-
-    // Verify that we can fit the maximum size or amount of unused bytes in the
-    // header. Given that the Secondary fits the allocation to a page, the worst
-    // case scenario happens in the Primary. It will depend on the second to
-    // last and last class sizes, as well as the dynamic base for the Primary.
-    // The following is an over-approximation that works for our needs.
-    const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
-    Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
-    if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
-      reportSanityCheckError("size (or unused bytes)");
-
-    const uptr LargestClassId = SizeClassMap::LargestClassId;
-    Header.ClassId = LargestClassId;
-    if (UNLIKELY(Header.ClassId != LargestClassId))
-      reportSanityCheckError("class ID");
-  }
-
-  static inline void *getBlockBegin(const void *Ptr,
-                                    Chunk::UnpackedHeader *Header) {
-    return reinterpret_cast<void *>(
-        reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
-        (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
-  }
-
-  // Return the size of a chunk as requested during its allocation.
-  inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
-    const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
-    if (LIKELY(Header->ClassId))
-      return SizeOrUnusedBytes;
-    if (allocatorSupportsMemoryTagging<Config>())
-      Ptr = untagPointer(const_cast<void *>(Ptr));
-    return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
-           reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
-  }
-
-  void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
-                                   Chunk::UnpackedHeader *Header,
-                                   uptr Size) NO_THREAD_SAFETY_ANALYSIS {
-    void *Ptr = getHeaderTaggedPointer(TaggedPtr);
-    // If the quarantine is disabled, the actual size of a chunk is 0 or larger
-    // than the maximum allowed, we return a chunk directly to the backend.
-    // This purposefully underflows for Size == 0.
-    const bool BypassQuarantine = !Quarantine.getCacheSize() ||
-                                  ((Size - 1) >= QuarantineMaxChunkSize) ||
-                                  !Header->ClassId;
-    if (BypassQuarantine)
-      Header->State = Chunk::State::Available;
-    else
-      Header->State = Chunk::State::Quarantined;
-    Header->OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
-                                Header->ClassId &&
-                                !TSDRegistry.getDisableMemInit();
-    Chunk::storeHeader(Cookie, Ptr, Header);
-
-    if (UNLIKELY(useMemoryTagging<Config>(Options))) {
-      u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
-      storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
-      if (Header->ClassId) {
-        if (!TSDRegistry.getDisableMemInit()) {
-          uptr TaggedBegin, TaggedEnd;
-          const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
-              Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
-              Header->ClassId);
-          // Exclude the previous tag so that immediate use after free is
-          // detected 100% of the time.
-          setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
-                       &TaggedEnd);
-        }
-      }
-    }
-    if (BypassQuarantine) {
-      if (allocatorSupportsMemoryTagging<Config>())
-        Ptr = untagPointer(Ptr);
-      void *BlockBegin = getBlockBegin(Ptr, Header);
-      const uptr ClassId = Header->ClassId;
-      if (LIKELY(ClassId)) {
-        bool UnlockRequired;
-        auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
-        TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-        const bool CacheDrained =
-            TSD->getCache().deallocate(ClassId, BlockBegin);
-        if (UnlockRequired)
-          TSD->unlock();
-        // When we have drained some blocks back to the Primary from TSD, that
-        // implies that we may have the chance to release some pages as well.
-        // Note that in order not to block other thread's accessing the TSD,
-        // release the TSD first then try the page release.
-        if (CacheDrained)
-          Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
-      } else {
-        if (UNLIKELY(useMemoryTagging<Config>(Options)))
-          storeTags(reinterpret_cast<uptr>(BlockBegin),
-                    reinterpret_cast<uptr>(Ptr));
-        Secondary.deallocate(Options, BlockBegin);
-      }
-    } else {
-      bool UnlockRequired;
-      auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
-      TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-      Quarantine.put(&TSD->getQuarantineCache(),
-                     QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
-      if (UnlockRequired)
-        TSD->unlock();
-    }
-  }
-
-  bool getChunkFromBlock(uptr Block, uptr *Chunk,
-                         Chunk::UnpackedHeader *Header) {
-    *Chunk =
-        Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
-    return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
-  }
-
-  static uptr getChunkOffsetFromBlock(const char *Block) {
-    u32 Offset = 0;
-    if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
-      Offset = reinterpret_cast<const u32 *>(Block)[1];
-    return Offset + Chunk::getHeaderSize();
-  }
-
-  // Set the tag of the granule past the end of the allocation to 0, to catch
-  // linear overflows even if a previous larger allocation used the same block
-  // and tag. Only do this if the granule past the end is in our block, because
-  // this would otherwise lead to a SEGV if the allocation covers the entire
-  // block and our block is at the end of a mapping. The tag of the next block's
-  // header granule will be set to 0, so it will serve the purpose of catching
-  // linear overflows in this case.
-  //
-  // For allocations of size 0 we do not end up storing the address tag to the
-  // memory tag space, which getInlineErrorInfo() normally relies on to match
-  // address tags against chunks. To allow matching in this case we store the
-  // address tag in the first byte of the chunk.
-  void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
-    DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
-    uptr UntaggedEnd = untagPointer(End);
-    if (UntaggedEnd != BlockEnd) {
-      storeTag(UntaggedEnd);
-      if (Size == 0)
-        *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
-    }
-  }
-
-  void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
-                           uptr BlockEnd) {
-    // Prepare the granule before the chunk to store the chunk header by setting
-    // its tag to 0. Normally its tag will already be 0, but in the case where a
-    // chunk holding a low alignment allocation is reused for a higher alignment
-    // allocation, the chunk may already have a non-zero tag from the previous
-    // allocation.
-    storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
-
-    uptr TaggedBegin, TaggedEnd;
-    setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
-
-    storeEndMarker(TaggedEnd, Size, BlockEnd);
-    return reinterpret_cast<void *>(TaggedBegin);
-  }
-
-  void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
-                         uptr BlockEnd) {
-    uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
-    uptr RoundNewPtr;
-    if (RoundOldPtr >= NewPtr) {
-      // If the allocation is shrinking we just need to set the tag past the end
-      // of the allocation to 0. See explanation in storeEndMarker() above.
-      RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
-    } else {
-      // Set the memory tag of the region
-      // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
-      // to the pointer tag stored in OldPtr.
-      RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
-    }
-    storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
-  }
-
-  void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
-    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
-      return;
-    auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
-    Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
-    Ptr32[MemTagAllocationTidIndex] = getThreadID();
-  }
-
-  void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
-                            uptr AllocationSize, u32 DeallocationTrace,
-                            u32 DeallocationTid) {
-    uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
-    typename AllocationRingBuffer::Entry *Entry =
-        getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
-
-    // First invalidate our entry so that we don't attempt to interpret a
-    // partially written state in getSecondaryErrorInfo(). The fences below
-    // ensure that the compiler does not move the stores to Ptr in between the
-    // stores to the other fields.
-    atomic_store_relaxed(&Entry->Ptr, 0);
-
-    __atomic_signal_fence(__ATOMIC_SEQ_CST);
-    atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
-    atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
-    atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
-    atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
-    atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
-    __atomic_signal_fence(__ATOMIC_SEQ_CST);
-
-    atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
-  }
-
-  void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
-                                          uptr Size) {
-    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
-      return;
-
-    u32 Trace = collectStackTrace();
-    u32 Tid = getThreadID();
-
-    auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
-    Ptr32[MemTagAllocationTraceIndex] = Trace;
-    Ptr32[MemTagAllocationTidIndex] = Tid;
-
-    storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
-  }
-
-  void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
-                                   u8 PrevTag, uptr Size) {
-    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
-      return;
-
-    auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
-    u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
-    u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
-
-    u32 DeallocationTrace = collectStackTrace();
-    u32 DeallocationTid = getThreadID();
-
-    storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
-                         AllocationTrace, AllocationTid, Size,
-                         DeallocationTrace, DeallocationTid);
-  }
-
-  static const size_t NumErrorReports =
-      sizeof(((scudo_error_info *)nullptr)->reports) /
-      sizeof(((scudo_error_info *)nullptr)->reports[0]);
-
-  static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
-                                 size_t &NextErrorReport, uintptr_t FaultAddr,
-                                 const StackDepot *Depot,
-                                 const char *RegionInfoPtr, const char *Memory,
-                                 const char *MemoryTags, uintptr_t MemoryAddr,
-                                 size_t MemorySize, size_t MinDistance,
-                                 size_t MaxDistance) {
-    uptr UntaggedFaultAddr = untagPointer(FaultAddr);
-    u8 FaultAddrTag = extractTag(FaultAddr);
-    BlockInfo Info =
-        PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
-
-    auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
-      if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
-          Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
-        return false;
-      *Data = &Memory[Addr - MemoryAddr];
-      *Tag = static_cast<u8>(
-          MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
-      return true;
-    };
-
-    auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
-                         Chunk::UnpackedHeader *Header, const u32 **Data,
-                         u8 *Tag) {
-      const char *BlockBegin;
-      u8 BlockBeginTag;
-      if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
-        return false;
-      uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
-      *ChunkAddr = Addr + ChunkOffset;
-
-      const char *ChunkBegin;
-      if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
-        return false;
-      *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
-          ChunkBegin - Chunk::getHeaderSize());
-      *Data = reinterpret_cast<const u32 *>(ChunkBegin);
-
-      // Allocations of size 0 will have stashed the tag in the first byte of
-      // the chunk, see storeEndMarker().
-      if (Header->SizeOrUnusedBytes == 0)
-        *Tag = static_cast<u8>(*ChunkBegin);
-
-      return true;
-    };
-
-    if (NextErrorReport == NumErrorReports)
-      return;
-
-    auto CheckOOB = [&](uptr BlockAddr) {
-      if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
-        return false;
-
-      uptr ChunkAddr;
-      Chunk::UnpackedHeader Header;
-      const u32 *Data;
-      uint8_t Tag;
-      if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
-          Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
-        return false;
-
-      auto *R = &ErrorInfo->reports[NextErrorReport++];
-      R->error_type =
-          UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
-      R->allocation_address = ChunkAddr;
-      R->allocation_size = Header.SizeOrUnusedBytes;
-      collectTraceMaybe(Depot, R->allocation_trace,
-                        Data[MemTagAllocationTraceIndex]);
-      R->allocation_tid = Data[MemTagAllocationTidIndex];
-      return NextErrorReport == NumErrorReports;
-    };
-
-    if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
-      return;
-
-    for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
-      if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
-          CheckOOB(Info.BlockBegin - I * Info.BlockSize))
-        return;
-  }
-
-  static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
-                                     size_t &NextErrorReport,
-                                     uintptr_t FaultAddr,
-                                     const StackDepot *Depot,
-                                     const char *RingBufferPtr,
-                                     size_t RingBufferSize) {
-    auto *RingBuffer =
-        reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
-    size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
-    if (!RingBuffer || RingBufferElements == 0)
-      return;
-    uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
-
-    for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
-                           NextErrorReport != NumErrorReports;
-         --I) {
-      auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
-      uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
-      if (!EntryPtr)
-        continue;
-
-      uptr UntaggedEntryPtr = untagPointer(EntryPtr);
-      uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
-      u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
-      u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
-      u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
-      u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
-
-      if (DeallocationTid) {
-        // For UAF we only consider in-bounds fault addresses because
-        // out-of-bounds UAF is rare and attempting to detect it is very likely
-        // to result in false positives.
-        if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
-          continue;
-      } else {
-        // Ring buffer OOB is only possible with secondary allocations. In this
-        // case we are guaranteed a guard region of at least a page on either
-        // side of the allocation (guard page on the right, guard page + tagged
-        // region on the left), so ignore any faults outside of that range.
-        if (FaultAddr < EntryPtr - getPageSizeCached() ||
-            FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
-          continue;
-
-        // For UAF the ring buffer will contain two entries, one for the
-        // allocation and another for the deallocation. Don't report buffer
-        // overflow/underflow using the allocation entry if we have already
-        // collected a report from the deallocation entry.
-        bool Found = false;
-        for (uptr J = 0; J != NextErrorReport; ++J) {
-          if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
-            Found = true;
-            break;
-          }
-        }
-        if (Found)
-          continue;
-      }
-
-      auto *R = &ErrorInfo->reports[NextErrorReport++];
-      if (DeallocationTid)
-        R->error_type = USE_AFTER_FREE;
-      else if (FaultAddr < EntryPtr)
-        R->error_type = BUFFER_UNDERFLOW;
-      else
-        R->error_type = BUFFER_OVERFLOW;
-
-      R->allocation_address = UntaggedEntryPtr;
-      R->allocation_size = EntrySize;
-      collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
-      R->allocation_tid = AllocationTid;
-      collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
-      R->deallocation_tid = DeallocationTid;
-    }
-  }
-
-  uptr getStats(ScopedString *Str) {
-    Primary.getStats(Str);
-    Secondary.getStats(Str);
-    Quarantine.getStats(Str);
-    TSDRegistry.getStats(Str);
-    return Str->length();
-  }
-
-  static typename AllocationRingBuffer::Entry *
-  getRingBufferEntry(char *RawRingBuffer, uptr N) {
-    return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
-        &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
-  }
-  static const typename AllocationRingBuffer::Entry *
-  getRingBufferEntry(const char *RawRingBuffer, uptr N) {
-    return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
-        &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
-  }
-
-  void mapAndInitializeRingBuffer() {
-    if (getFlags()->allocation_ring_buffer_size <= 0)
-      return;
-    u32 AllocationRingBufferSize =
-        static_cast<u32>(getFlags()->allocation_ring_buffer_size);
-    MemMapT MemMap;
-    MemMap.map(
-        /*Addr=*/0U,
-        roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
-                getPageSizeCached()),
-        "scudo:ring_buffer");
-    RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
-    RawRingBufferMap = MemMap;
-    RingBufferElements = AllocationRingBufferSize;
-    static_assert(sizeof(AllocationRingBuffer) %
-                          alignof(typename AllocationRingBuffer::Entry) ==
-                      0,
-                  "invalid alignment");
-  }
-
-  void unmapRingBuffer() {
-    auto *RingBuffer = getRingBuffer();
-    if (RingBuffer != nullptr) {
-      RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
-                             RawRingBufferMap.getCapacity());
-    }
-    RawRingBuffer = nullptr;
-  }
-
-  static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
-    return sizeof(AllocationRingBuffer) +
-           RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
-  }
-
-  static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
-    if (Bytes < sizeof(AllocationRingBuffer)) {
-      return 0;
-    }
-    return (Bytes - sizeof(AllocationRingBuffer)) /
-           sizeof(typename AllocationRingBuffer::Entry);
-  }
-
-  inline AllocationRingBuffer *getRingBuffer() {
-    return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
-  }
-};
-
-} // namespace scudo
-
-#endif // SCUDO_COMBINED_H_
diff --git a/Telegram/ThirdParty/scudo/common.cpp b/Telegram/ThirdParty/scudo/common.cpp
deleted file mode 100644
index 06e930638..000000000
--- a/Telegram/ThirdParty/scudo/common.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- common.cpp ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "common.h"
-#include "atomic_helpers.h"
-#include "string_utils.h"
-
-namespace scudo {
-
-uptr PageSizeCached;
-uptr getPageSize();
-
-uptr getPageSizeSlow() {
-  PageSizeCached = getPageSize();
-  CHECK_NE(PageSizeCached, 0);
-  return PageSizeCached;
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/common.h b/Telegram/ThirdParty/scudo/common.h
deleted file mode 100644
index ae45683f1..000000000
--- a/Telegram/ThirdParty/scudo/common.h
+++ /dev/null
@@ -1,232 +0,0 @@
-//===-- common.h ------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_COMMON_H_
-#define SCUDO_COMMON_H_
-
-#include "internal_defs.h"
-
-#include "fuchsia.h"
-#include "linux.h"
-#include "trusty.h"
-
-#include <stddef.h>
-#include <string.h>
-#include <unistd.h>
-
-namespace scudo {
-
-template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
-  static_assert(sizeof(Dest) == sizeof(Source), "");
-  Dest D;
-  memcpy(&D, &S, sizeof(D));
-  return D;
-}
-
-inline constexpr bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
-
-inline constexpr uptr roundUp(uptr X, uptr Boundary) {
-  DCHECK(isPowerOfTwo(Boundary));
-  return (X + Boundary - 1) & ~(Boundary - 1);
-}
-inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) {
-  return ((X + Boundary - 1) / Boundary) * Boundary;
-}
-
-inline constexpr uptr roundDown(uptr X, uptr Boundary) {
-  DCHECK(isPowerOfTwo(Boundary));
-  return X & ~(Boundary - 1);
-}
-inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) {
-  return (X / Boundary) * Boundary;
-}
-
-inline constexpr bool isAligned(uptr X, uptr Alignment) {
-  DCHECK(isPowerOfTwo(Alignment));
-  return (X & (Alignment - 1)) == 0;
-}
-inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) {
-  return X % Alignment == 0;
-}
-
-template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
-
-template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
-
-template <class T> void Swap(T &A, T &B) {
-  T Tmp = A;
-  A = B;
-  B = Tmp;
-}
-
-inline uptr getMostSignificantSetBitIndex(uptr X) {
-  DCHECK_NE(X, 0U);
-  return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
-}
-
-inline uptr roundUpPowerOfTwo(uptr Size) {
-  DCHECK(Size);
-  if (isPowerOfTwo(Size))
-    return Size;
-  const uptr Up = getMostSignificantSetBitIndex(Size);
-  DCHECK_LT(Size, (1UL << (Up + 1)));
-  DCHECK_GT(Size, (1UL << Up));
-  return 1UL << (Up + 1);
-}
-
-inline uptr getLeastSignificantSetBitIndex(uptr X) {
-  DCHECK_NE(X, 0U);
-  return static_cast<uptr>(__builtin_ctzl(X));
-}
-
-inline uptr getLog2(uptr X) {
-  DCHECK(isPowerOfTwo(X));
-  return getLeastSignificantSetBitIndex(X);
-}
-
-inline u32 getRandomU32(u32 *State) {
-  // ANSI C linear congruential PRNG (16-bit output).
-  // return (*State = *State * 1103515245 + 12345) >> 16;
-  // XorShift (32-bit output).
-  *State ^= *State << 13;
-  *State ^= *State >> 17;
-  *State ^= *State << 5;
-  return *State;
-}
-
-inline u32 getRandomModN(u32 *State, u32 N) {
-  return getRandomU32(State) % N; // [0, N)
-}
-
-template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
-  if (N <= 1)
-    return;
-  u32 State = *RandState;
-  for (u32 I = N - 1; I > 0; I--)
-    Swap(A[I], A[getRandomModN(&State, I + 1)]);
-  *RandState = State;
-}
-
-inline void computePercentage(uptr Numerator, uptr Denominator, uptr *Integral,
-                              uptr *Fractional) {
-  constexpr uptr Digits = 100;
-  if (Denominator == 0) {
-    *Integral = 100;
-    *Fractional = 0;
-    return;
-  }
-
-  *Integral = Numerator * Digits / Denominator;
-  *Fractional =
-      (((Numerator * Digits) % Denominator) * Digits + Denominator / 2) /
-      Denominator;
-}
-
-// Platform specific functions.
-
-extern uptr PageSizeCached;
-uptr getPageSizeSlow();
-inline uptr getPageSizeCached() {
-#if SCUDO_ANDROID && defined(PAGE_SIZE)
-  // Most Android builds have a build-time constant page size.
-  return PAGE_SIZE;
-#endif
-  if (LIKELY(PageSizeCached))
-    return PageSizeCached;
-  return getPageSizeSlow();
-}
-
-// Returns 0 if the number of CPUs could not be determined.
-u32 getNumberOfCPUs();
-
-const char *getEnv(const char *Name);
-
-u64 getMonotonicTime();
-// Gets the time faster but with less accuracy. Can call getMonotonicTime
-// if no fast version is available.
-u64 getMonotonicTimeFast();
-
-u32 getThreadID();
-
-// Our randomness gathering function is limited to 256 bytes to ensure we get
-// as many bytes as requested, and avoid interruptions (on Linux).
-constexpr uptr MaxRandomLength = 256U;
-bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
-
-// Platform memory mapping functions.
-
-#define MAP_ALLOWNOMEM (1U << 0)
-#define MAP_NOACCESS (1U << 1)
-#define MAP_RESIZABLE (1U << 2)
-#define MAP_MEMTAG (1U << 3)
-#define MAP_PRECOMMIT (1U << 4)
-
-// Our platform memory mapping use is restricted to 3 scenarios:
-// - reserve memory at a random address (MAP_NOACCESS);
-// - commit memory in a previously reserved space;
-// - commit memory at a random address.
-// As such, only a subset of parameters combinations is valid, which is checked
-// by the function implementation. The Data parameter allows to pass opaque
-// platform specific data to the function.
-// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
-void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
-          MapPlatformData *Data = nullptr);
-
-// Indicates that we are getting rid of the whole mapping, which might have
-// further consequences on Data, depending on the platform.
-#define UNMAP_ALL (1U << 0)
-
-void unmap(void *Addr, uptr Size, uptr Flags = 0,
-           MapPlatformData *Data = nullptr);
-
-void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
-                         MapPlatformData *Data = nullptr);
-
-void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
-                      MapPlatformData *Data = nullptr);
-
-// Logging related functions.
-
-void setAbortMessage(const char *Message);
-
-struct BlockInfo {
-  uptr BlockBegin;
-  uptr BlockSize;
-  uptr RegionBegin;
-  uptr RegionEnd;
-};
-
-enum class Option : u8 {
-  ReleaseInterval,      // Release to OS interval in milliseconds.
-  MemtagTuning,         // Whether to tune tagging for UAF or overflow.
-  ThreadDisableMemInit, // Whether to disable automatic heap initialization and,
-                        // where possible, memory tagging, on this thread.
-  MaxCacheEntriesCount, // Maximum number of blocks that can be cached.
-  MaxCacheEntrySize,    // Maximum size of a block that can be cached.
-  MaxTSDsCount,         // Number of usable TSDs for the shared registry.
-};
-
-enum class ReleaseToOS : u8 {
-  Normal, // Follow the normal rules for releasing pages to the OS
-  Force,  // Force release pages to the OS, but avoid cases that take too long.
-  ForceAll, // Force release every page possible regardless of how long it will
-            // take.
-};
-
-constexpr unsigned char PatternFillByte = 0xAB;
-
-enum FillContentsMode {
-  NoFill = 0,
-  ZeroFill = 1,
-  PatternOrZeroFill = 2 // Pattern fill unless the memory is known to be
-                        // zero-initialized already.
-};
-
-} // namespace scudo
-
-#endif // SCUDO_COMMON_H_
diff --git a/Telegram/ThirdParty/scudo/condition_variable.h b/Telegram/ThirdParty/scudo/condition_variable.h
deleted file mode 100644
index 4afebdc9d..000000000
--- a/Telegram/ThirdParty/scudo/condition_variable.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- condition_variable.h ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CONDITION_VARIABLE_H_
-#define SCUDO_CONDITION_VARIABLE_H_
-
-#include "condition_variable_base.h"
-
-#include "common.h"
-#include "platform.h"
-
-#include "condition_variable_linux.h"
-
-namespace scudo {
-
-// A default implementation of default condition variable. It doesn't do a real
-// `wait`, instead it spins a short amount of time only.
-class ConditionVariableDummy
-    : public ConditionVariableBase<ConditionVariableDummy> {
-public:
-  void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {}
-
-  void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) {
-    M.unlock();
-
-    constexpr u32 SpinTimes = 64;
-    volatile u32 V = 0;
-    for (u32 I = 0; I < SpinTimes; ++I) {
-      u32 Tmp = V + 1;
-      V = Tmp;
-    }
-
-    M.lock();
-  }
-};
-
-template <typename Config, typename = const bool>
-struct ConditionVariableState {
-  static constexpr bool enabled() { return false; }
-  // This is only used for compilation purpose so that we won't end up having
-  // many conditional compilations. If you want to use `ConditionVariableDummy`,
-  // define `ConditionVariableT` in your allocator configuration. See
-  // allocator_config.h for more details.
-  using ConditionVariableT = ConditionVariableDummy;
-};
-
-template <typename Config>
-struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
-  static constexpr bool enabled() { return Config::UseConditionVariable; }
-  using ConditionVariableT = typename Config::ConditionVariableT;
-};
-
-} // namespace scudo
-
-#endif // SCUDO_CONDITION_VARIABLE_H_
diff --git a/Telegram/ThirdParty/scudo/condition_variable_base.h b/Telegram/ThirdParty/scudo/condition_variable_base.h
deleted file mode 100644
index 416c327fe..000000000
--- a/Telegram/ThirdParty/scudo/condition_variable_base.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- condition_variable_base.h -------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_
-#define SCUDO_CONDITION_VARIABLE_BASE_H_
-
-#include "mutex.h"
-#include "thread_annotations.h"
-
-namespace scudo {
-
-template <typename Derived> class ConditionVariableBase {
-public:
-  constexpr ConditionVariableBase() = default;
-
-  void bindTestOnly(HybridMutex &Mutex) {
-#if SCUDO_DEBUG
-    boundMutex = &Mutex;
-#else
-    (void)Mutex;
-#endif
-  }
-
-  void notifyAll(HybridMutex &M) REQUIRES(M) {
-#if SCUDO_DEBUG
-    CHECK_EQ(&M, boundMutex);
-#endif
-    getDerived()->notifyAllImpl(M);
-  }
-
-  void wait(HybridMutex &M) REQUIRES(M) {
-#if SCUDO_DEBUG
-    CHECK_EQ(&M, boundMutex);
-#endif
-    getDerived()->waitImpl(M);
-  }
-
-protected:
-  Derived *getDerived() { return static_cast<Derived *>(this); }
-
-#if SCUDO_DEBUG
-  // Because thread-safety analysis doesn't support pointer aliasing, we are not
-  // able to mark the proper annotations without false positive. Instead, we
-  // pass the lock and do the same-lock check separately.
-  HybridMutex *boundMutex = nullptr;
-#endif
-};
-
-} // namespace scudo
-
-#endif // SCUDO_CONDITION_VARIABLE_BASE_H_
diff --git a/Telegram/ThirdParty/scudo/condition_variable_linux.cpp b/Telegram/ThirdParty/scudo/condition_variable_linux.cpp
deleted file mode 100644
index e6d9bd177..000000000
--- a/Telegram/ThirdParty/scudo/condition_variable_linux.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-#if SCUDO_LINUX
-
-#include "condition_variable_linux.h"
-
-#include "atomic_helpers.h"
-
-#include <limits.h>
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-namespace scudo {
-
-void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) {
-  const u32 V = atomic_load_relaxed(&Counter);
-  atomic_store_relaxed(&Counter, V + 1);
-
-  // TODO(chiahungduan): Move the waiters from the futex waiting queue
-  // `Counter` to futex waiting queue `M` so that the awoken threads won't be
-  // blocked again due to locked `M` by current thread.
-  if (LastNotifyAll != V) {
-    syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAKE_PRIVATE,
-            INT_MAX, nullptr, nullptr, 0);
-  }
-
-  LastNotifyAll = V + 1;
-}
-
-void ConditionVariableLinux::waitImpl(HybridMutex &M) {
-  const u32 V = atomic_load_relaxed(&Counter) + 1;
-  atomic_store_relaxed(&Counter, V);
-
-  // TODO: Use ScopedUnlock when it's supported.
-  M.unlock();
-  syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAIT_PRIVATE, V,
-          nullptr, nullptr, 0);
-  M.lock();
-}
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX
diff --git a/Telegram/ThirdParty/scudo/condition_variable_linux.h b/Telegram/ThirdParty/scudo/condition_variable_linux.h
deleted file mode 100644
index cd0732873..000000000
--- a/Telegram/ThirdParty/scudo/condition_variable_linux.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- condition_variable_linux.h ------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_
-#define SCUDO_CONDITION_VARIABLE_LINUX_H_
-
-#include "platform.h"
-
-#if SCUDO_LINUX
-
-#include "atomic_helpers.h"
-#include "condition_variable_base.h"
-#include "thread_annotations.h"
-
-namespace scudo {
-
-class ConditionVariableLinux
-    : public ConditionVariableBase<ConditionVariableLinux> {
-public:
-  void notifyAllImpl(HybridMutex &M) REQUIRES(M);
-
-  void waitImpl(HybridMutex &M) REQUIRES(M);
-
-private:
-  u32 LastNotifyAll = 0;
-  atomic_u32 Counter = {};
-};
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX
-
-#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_
diff --git a/Telegram/ThirdParty/scudo/crc32_hw.cpp b/Telegram/ThirdParty/scudo/crc32_hw.cpp
deleted file mode 100644
index 73f2ae000..000000000
--- a/Telegram/ThirdParty/scudo/crc32_hw.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//===-- crc32_hw.cpp --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "checksum.h"
-
-namespace scudo {
-
-#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-u32 computeHardwareCRC32(u32 Crc, uptr Data) {
-  return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
-}
-#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
-       // defined(__ARM_FEATURE_CRC32)
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/flags.cpp b/Telegram/ThirdParty/scudo/flags.cpp
deleted file mode 100644
index f498edfbd..000000000
--- a/Telegram/ThirdParty/scudo/flags.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-//===-- flags.cpp -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "flags.h"
-#include "common.h"
-#include "flags_parser.h"
-
-#include "scudo/interface.h"
-
-namespace scudo {
-
-Flags *getFlags() {
-  static Flags F;
-  return &F;
-}
-
-void Flags::setDefaults() {
-#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
-#include "flags.inc"
-#undef SCUDO_FLAG
-
-#ifdef GWP_ASAN_HOOKS
-#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description)                 \
-  GWP_ASAN_##Name = DefaultValue;
-#include "gwp_asan/options.inc"
-#undef GWP_ASAN_OPTION
-#endif // GWP_ASAN_HOOKS
-}
-
-void registerFlags(FlagParser *Parser, Flags *F) {
-#define SCUDO_FLAG(Type, Name, DefaultValue, Description)                      \
-  Parser->registerFlag(#Name, Description, FlagType::FT_##Type,                \
-                       reinterpret_cast<void *>(&F->Name));
-#include "flags.inc"
-#undef SCUDO_FLAG
-
-#ifdef GWP_ASAN_HOOKS
-#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description)                 \
-  Parser->registerFlag("GWP_ASAN_" #Name, Description, FlagType::FT_##Type,    \
-                       reinterpret_cast<void *>(&F->GWP_ASAN_##Name));
-#include "gwp_asan/options.inc"
-#undef GWP_ASAN_OPTION
-#endif // GWP_ASAN_HOOKS
-}
-
-static const char *getCompileDefinitionScudoDefaultOptions() {
-#ifdef SCUDO_DEFAULT_OPTIONS
-  return STRINGIFY(SCUDO_DEFAULT_OPTIONS);
-#else
-  return "";
-#endif
-}
-
-static const char *getScudoDefaultOptions() {
-  return (&__scudo_default_options) ? __scudo_default_options() : "";
-}
-
-void initFlags() {
-  Flags *F = getFlags();
-  F->setDefaults();
-  FlagParser Parser;
-  registerFlags(&Parser, F);
-  Parser.parseString(getCompileDefinitionScudoDefaultOptions());
-  Parser.parseString(getScudoDefaultOptions());
-  Parser.parseString(getEnv("SCUDO_OPTIONS"));
-  if (const char *V = getEnv("SCUDO_ALLOCATION_RING_BUFFER_SIZE")) {
-    Parser.parseStringPair("allocation_ring_buffer_size", V);
-  }
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/flags.h b/Telegram/ThirdParty/scudo/flags.h
deleted file mode 100644
index 2cd0a5b13..000000000
--- a/Telegram/ThirdParty/scudo/flags.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- flags.h -------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_FLAGS_H_
-#define SCUDO_FLAGS_H_
-
-#include "internal_defs.h"
-
-namespace scudo {
-
-struct Flags {
-#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
-#include "flags.inc"
-#undef SCUDO_FLAG
-
-#ifdef GWP_ASAN_HOOKS
-#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description)                 \
-  Type GWP_ASAN_##Name;
-#include "gwp_asan/options.inc"
-#undef GWP_ASAN_OPTION
-#endif // GWP_ASAN_HOOKS
-
-  void setDefaults();
-};
-
-Flags *getFlags();
-void initFlags();
-class FlagParser;
-void registerFlags(FlagParser *Parser, Flags *F);
-
-} // namespace scudo
-
-#endif // SCUDO_FLAGS_H_
diff --git a/Telegram/ThirdParty/scudo/flags.inc b/Telegram/ThirdParty/scudo/flags.inc
deleted file mode 100644
index f5a2bab50..000000000
--- a/Telegram/ThirdParty/scudo/flags.inc
+++ /dev/null
@@ -1,51 +0,0 @@
-//===-- flags.inc -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_FLAG
-#error "Define SCUDO_FLAG prior to including this file!"
-#endif
-
-SCUDO_FLAG(int, quarantine_size_kb, 0,
-           "Size (in kilobytes) of quarantine used to delay the actual "
-           "deallocation of chunks. Lower value may reduce memory usage but "
-           "decrease the effectiveness of the mitigation.")
-
-SCUDO_FLAG(int, thread_local_quarantine_size_kb, 0,
-           "Size (in kilobytes) of per-thread cache used to offload the global "
-           "quarantine. Lower value may reduce memory usage but might increase "
-           "the contention on the global quarantine.")
-
-SCUDO_FLAG(int, quarantine_max_chunk_size, 0,
-           "Size (in bytes) up to which chunks will be quarantined (if lower "
-           "than or equal to).")
-
-SCUDO_FLAG(bool, dealloc_type_mismatch, false,
-           "Terminate on a type mismatch in allocation-deallocation functions, "
-           "eg: malloc/delete, new/free, new/delete[], etc.")
-
-SCUDO_FLAG(bool, delete_size_mismatch, true,
-           "Terminate on a size mismatch between a sized-delete and the actual "
-           "size of a chunk (as provided to new/new[]).")
-
-SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
-
-SCUDO_FLAG(bool, pattern_fill_contents, false,
-           "Pattern fill chunk contents on allocation.")
-
-SCUDO_FLAG(bool, may_return_null, true,
-           "Indicate whether the allocator should terminate instead of "
-           "returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
-           "invalid allocation alignments, etc.")
-
-SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
-           "Interval (in milliseconds) at which to attempt release of unused "
-           "memory to the OS. Negative values disable the feature.")
-
-SCUDO_FLAG(int, allocation_ring_buffer_size, 32768,
-           "Entries to keep in the allocation ring buffer for scudo. "
-           "Values less or equal to zero disable the buffer.")
diff --git a/Telegram/ThirdParty/scudo/flags_parser.cpp b/Telegram/ThirdParty/scudo/flags_parser.cpp
deleted file mode 100644
index 3d8c6f378..000000000
--- a/Telegram/ThirdParty/scudo/flags_parser.cpp
+++ /dev/null
@@ -1,178 +0,0 @@
-//===-- flags_parser.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "flags_parser.h"
-#include "common.h"
-#include "report.h"
-
-#include <errno.h>
-#include <limits.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace scudo {
-
-class UnknownFlagsRegistry {
-  static const u32 MaxUnknownFlags = 16;
-  const char *UnknownFlagsNames[MaxUnknownFlags];
-  u32 NumberOfUnknownFlags;
-
-public:
-  void add(const char *Name) {
-    CHECK_LT(NumberOfUnknownFlags, MaxUnknownFlags);
-    UnknownFlagsNames[NumberOfUnknownFlags++] = Name;
-  }
-
-  void report() {
-    if (!NumberOfUnknownFlags)
-      return;
-    Printf("Scudo WARNING: found %d unrecognized flag(s):\n",
-           NumberOfUnknownFlags);
-    for (u32 I = 0; I < NumberOfUnknownFlags; ++I)
-      Printf("    %s\n", UnknownFlagsNames[I]);
-    NumberOfUnknownFlags = 0;
-  }
-};
-static UnknownFlagsRegistry UnknownFlags;
-
-void reportUnrecognizedFlags() { UnknownFlags.report(); }
-
-void FlagParser::printFlagDescriptions() {
-  Printf("Available flags for Scudo:\n");
-  for (u32 I = 0; I < NumberOfFlags; ++I)
-    Printf("\t%s\n\t\t- %s\n", Flags[I].Name, Flags[I].Desc);
-}
-
-static bool isSeparator(char C) {
-  return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
-         C == '\r';
-}
-
-static bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
-
-void FlagParser::skipWhitespace() {
-  while (isSeparator(Buffer[Pos]))
-    ++Pos;
-}
-
-void FlagParser::parseFlag() {
-  const uptr NameStart = Pos;
-  while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
-    ++Pos;
-  if (Buffer[Pos] != '=')
-    reportError("expected '='");
-  const char *Name = Buffer + NameStart;
-  const uptr ValueStart = ++Pos;
-  const char *Value;
-  if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
-    const char Quote = Buffer[Pos++];
-    while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
-      ++Pos;
-    if (Buffer[Pos] == 0)
-      reportError("unterminated string");
-    Value = Buffer + ValueStart + 1;
-    ++Pos; // consume the closing quote
-  } else {
-    while (!isSeparatorOrNull(Buffer[Pos]))
-      ++Pos;
-    Value = Buffer + ValueStart;
-  }
-  if (!runHandler(Name, Value, '='))
-    reportError("flag parsing failed.");
-}
-
-void FlagParser::parseFlags() {
-  while (true) {
-    skipWhitespace();
-    if (Buffer[Pos] == 0)
-      break;
-    parseFlag();
-  }
-}
-
-void FlagParser::parseString(const char *S) {
-  if (!S)
-    return;
-  // Backup current parser state to allow nested parseString() calls.
-  const char *OldBuffer = Buffer;
-  const uptr OldPos = Pos;
-  Buffer = S;
-  Pos = 0;
-
-  parseFlags();
-
-  Buffer = OldBuffer;
-  Pos = OldPos;
-}
-
-inline bool parseBool(const char *Value, bool *b) {
-  if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
-      strncmp(Value, "false", 5) == 0) {
-    *b = false;
-    return true;
-  }
-  if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
-      strncmp(Value, "true", 4) == 0) {
-    *b = true;
-    return true;
-  }
-  return false;
-}
-
-void FlagParser::parseStringPair(const char *Name, const char *Value) {
-  if (!runHandler(Name, Value, '\0'))
-    reportError("flag parsing failed.");
-}
-
-bool FlagParser::runHandler(const char *Name, const char *Value,
-                            const char Sep) {
-  for (u32 I = 0; I < NumberOfFlags; ++I) {
-    const uptr Len = strlen(Flags[I].Name);
-    if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != Sep)
-      continue;
-    bool Ok = false;
-    switch (Flags[I].Type) {
-    case FlagType::FT_bool:
-      Ok = parseBool(Value, reinterpret_cast<bool *>(Flags[I].Var));
-      if (!Ok)
-        reportInvalidFlag("bool", Value);
-      break;
-    case FlagType::FT_int:
-      char *ValueEnd;
-      errno = 0;
-      long V = strtol(Value, &ValueEnd, 10);
-      if (errno != 0 ||                 // strtol failed (over or underflow)
-          V > INT_MAX || V < INT_MIN || // overflows integer
-          // contains unexpected characters
-          (*ValueEnd != '"' && *ValueEnd != '\'' &&
-           !isSeparatorOrNull(*ValueEnd))) {
-        reportInvalidFlag("int", Value);
-        break;
-      }
-      *reinterpret_cast<int *>(Flags[I].Var) = static_cast<int>(V);
-      Ok = true;
-      break;
-    }
-    return Ok;
-  }
-  // Unrecognized flag. This is not a fatal error, we may print a warning later.
-  UnknownFlags.add(Name);
-  return true;
-}
-
-void FlagParser::registerFlag(const char *Name, const char *Desc, FlagType Type,
-                              void *Var) {
-  CHECK_LT(NumberOfFlags, MaxFlags);
-  Flags[NumberOfFlags].Name = Name;
-  Flags[NumberOfFlags].Desc = Desc;
-  Flags[NumberOfFlags].Type = Type;
-  Flags[NumberOfFlags].Var = Var;
-  ++NumberOfFlags;
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/flags_parser.h b/Telegram/ThirdParty/scudo/flags_parser.h
deleted file mode 100644
index ded496fda..000000000
--- a/Telegram/ThirdParty/scudo/flags_parser.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- flags_parser.h ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_FLAGS_PARSER_H_
-#define SCUDO_FLAGS_PARSER_H_
-
-#include "report.h"
-#include "string_utils.h"
-
-#include <stddef.h>
-
-namespace scudo {
-
-enum class FlagType : u8 {
-  FT_bool,
-  FT_int,
-};
-
-class FlagParser {
-public:
-  void registerFlag(const char *Name, const char *Desc, FlagType Type,
-                    void *Var);
-  void parseString(const char *S);
-  void printFlagDescriptions();
-  void parseStringPair(const char *Name, const char *Value);
-
-private:
-  static const u32 MaxFlags = 20;
-  struct Flag {
-    const char *Name;
-    const char *Desc;
-    FlagType Type;
-    void *Var;
-  } Flags[MaxFlags];
-
-  u32 NumberOfFlags = 0;
-  const char *Buffer = nullptr;
-  uptr Pos = 0;
-
-  void reportFatalError(const char *Error);
-  void skipWhitespace();
-  void parseFlags();
-  void parseFlag();
-  bool runHandler(const char *Name, const char *Value, char Sep);
-};
-
-void reportUnrecognizedFlags();
-
-} // namespace scudo
-
-#endif // SCUDO_FLAGS_PARSER_H_
diff --git a/Telegram/ThirdParty/scudo/fuchsia.cpp b/Telegram/ThirdParty/scudo/fuchsia.cpp
deleted file mode 100644
index 0788c4198..000000000
--- a/Telegram/ThirdParty/scudo/fuchsia.cpp
+++ /dev/null
@@ -1,237 +0,0 @@
-//===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-#if SCUDO_FUCHSIA
-
-#include "common.h"
-#include "mutex.h"
-#include "string_utils.h"
-
-#include <lib/sync/mutex.h> // for sync_mutex_t
-#include <stdlib.h>         // for getenv()
-#include <zircon/compiler.h>
-#include <zircon/process.h>
-#include <zircon/sanitizer.h>
-#include <zircon/status.h>
-#include <zircon/syscalls.h>
-
-namespace scudo {
-
-uptr getPageSize() { return _zx_system_get_page_size(); }
-
-void NORETURN die() { __builtin_trap(); }
-
-// We zero-initialize the Extra parameter of map(), make sure this is consistent
-// with ZX_HANDLE_INVALID.
-static_assert(ZX_HANDLE_INVALID == 0, "");
-
-static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
-                                uptr Size) {
-  char Error[128];
-  formatString(Error, sizeof(Error),
-               "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
-               Size >> 10, zx_status_get_string(Status));
-  outputRaw(Error);
-  die();
-}
-
-static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
-  // Only scenario so far.
-  DCHECK(Data);
-  DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
-
-  const zx_status_t Status = _zx_vmar_allocate(
-      _zx_vmar_root_self(),
-      ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
-      Size, &Data->Vmar, &Data->VmarBase);
-  if (UNLIKELY(Status != ZX_OK)) {
-    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-      dieOnError(Status, "zx_vmar_allocate", Size);
-    return nullptr;
-  }
-  return reinterpret_cast<void *>(Data->VmarBase);
-}
-
-void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
-          MapPlatformData *Data) {
-  DCHECK_EQ(Size % getPageSizeCached(), 0);
-  const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
-
-  // For MAP_NOACCESS, just allocate a Vmar and return.
-  if (Flags & MAP_NOACCESS)
-    return allocateVmar(Size, Data, AllowNoMem);
-
-  const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
-                               ? Data->Vmar
-                               : _zx_vmar_root_self();
-
-  zx_status_t Status;
-  zx_handle_t Vmo;
-  uint64_t VmoSize = 0;
-  if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
-    // If a Vmo was specified, it's a resize operation.
-    CHECK(Addr);
-    DCHECK(Flags & MAP_RESIZABLE);
-    Vmo = Data->Vmo;
-    VmoSize = Data->VmoSize;
-    Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
-    if (Status != ZX_OK) {
-      if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-        dieOnError(Status, "zx_vmo_set_size", VmoSize + Size);
-      return nullptr;
-    }
-  } else {
-    // Otherwise, create a Vmo and set its name.
-    Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
-    if (UNLIKELY(Status != ZX_OK)) {
-      if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-        dieOnError(Status, "zx_vmo_create", Size);
-      return nullptr;
-    }
-    _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
-  }
-
-  uintptr_t P;
-  zx_vm_option_t MapFlags =
-      ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
-  if (Addr)
-    DCHECK(Data);
-  const uint64_t Offset =
-      Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
-  if (Offset)
-    MapFlags |= ZX_VM_SPECIFIC;
-  Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
-  if (UNLIKELY(Status != ZX_OK)) {
-    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-      dieOnError(Status, "zx_vmar_map", Size);
-    return nullptr;
-  }
-
-  if (Flags & MAP_PRECOMMIT) {
-    Status = _zx_vmar_op_range(Vmar, ZX_VMAR_OP_COMMIT, P, Size,
-                               /*buffer=*/nullptr, /*buffer_size=*/0);
-  }
-
-  // No need to track the Vmo if we don't intend on resizing it. Close it.
-  if (Flags & MAP_RESIZABLE) {
-    DCHECK(Data);
-    if (Data->Vmo == ZX_HANDLE_INVALID)
-      Data->Vmo = Vmo;
-    else
-      DCHECK_EQ(Data->Vmo, Vmo);
-  } else {
-    CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
-  }
-  if (UNLIKELY(Status != ZX_OK)) {
-    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-      dieOnError(Status, "zx_vmar_op_range", Size);
-    return nullptr;
-  }
-
-  if (Data)
-    Data->VmoSize += Size;
-
-  return reinterpret_cast<void *>(P);
-}
-
-void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
-  if (Flags & UNMAP_ALL) {
-    DCHECK_NE(Data, nullptr);
-    const zx_handle_t Vmar = Data->Vmar;
-    DCHECK_NE(Vmar, _zx_vmar_root_self());
-    // Destroying the vmar effectively unmaps the whole mapping.
-    CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
-    CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
-  } else {
-    const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
-                                 ? Data->Vmar
-                                 : _zx_vmar_root_self();
-    const zx_status_t Status =
-        _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
-    if (UNLIKELY(Status != ZX_OK))
-      dieOnError(Status, "zx_vmar_unmap", Size);
-  }
-  if (Data) {
-    if (Data->Vmo != ZX_HANDLE_INVALID)
-      CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
-    memset(Data, 0, sizeof(*Data));
-  }
-}
-
-void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
-                         UNUSED MapPlatformData *Data) {
-  const zx_vm_option_t Prot =
-      (Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
-  DCHECK(Data);
-  DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
-  const zx_status_t Status = _zx_vmar_protect(Data->Vmar, Prot, Addr, Size);
-  if (Status != ZX_OK)
-    dieOnError(Status, "zx_vmar_protect", Size);
-}
-
-void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
-                      MapPlatformData *Data) {
-  // TODO: DCHECK the BaseAddress is consistent with the data in
-  // MapPlatformData.
-  DCHECK(Data);
-  DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
-  DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
-  const zx_status_t Status =
-      _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
-  CHECK_EQ(Status, ZX_OK);
-}
-
-const char *getEnv(const char *Name) { return getenv(Name); }
-
-// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
-// because the Fuchsia implementation of sync_mutex_t has clang thread safety
-// annotations. Were we to apply proper capability annotations to the top level
-// HybridMutex class itself, they would not be needed. As it stands, the
-// thread analysis thinks that we are locking the mutex and accidentally leaving
-// it locked on the way out.
-bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
-  // Size and alignment must be compatible between both types.
-  return sync_mutex_trylock(&M) == ZX_OK;
-}
-
-void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
-  sync_mutex_lock(&M);
-}
-
-void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
-  sync_mutex_unlock(&M);
-}
-
-void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {}
-
-u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
-u64 getMonotonicTimeFast() { return _zx_clock_get_monotonic(); }
-
-u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
-
-u32 getThreadID() { return 0; }
-
-bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
-  static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, "");
-  if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
-    return false;
-  _zx_cprng_draw(Buffer, Length);
-  return true;
-}
-
-void outputRaw(const char *Buffer) {
-  __sanitizer_log_write(Buffer, strlen(Buffer));
-}
-
-void setAbortMessage(const char *Message) {}
-
-} // namespace scudo
-
-#endif // SCUDO_FUCHSIA
diff --git a/Telegram/ThirdParty/scudo/fuchsia.h b/Telegram/ThirdParty/scudo/fuchsia.h
deleted file mode 100644
index c1dfd7638..000000000
--- a/Telegram/ThirdParty/scudo/fuchsia.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_FUCHSIA_H_
-#define SCUDO_FUCHSIA_H_
-
-#include "platform.h"
-
-#if SCUDO_FUCHSIA
-
-#include <stdint.h>
-#include <zircon/types.h>
-
-namespace scudo {
-
-struct MapPlatformData {
-  zx_handle_t Vmar;
-  zx_handle_t Vmo;
-  uintptr_t VmarBase;
-  uint64_t VmoSize;
-};
-
-} // namespace scudo
-
-#endif // SCUDO_FUCHSIA
-
-#endif // SCUDO_FUCHSIA_H_
diff --git a/Telegram/ThirdParty/scudo/fuzz/CMakeLists.txt b/Telegram/ThirdParty/scudo/fuzz/CMakeLists.txt
deleted file mode 100644
index d29c2f2fe..000000000
--- a/Telegram/ThirdParty/scudo/fuzz/CMakeLists.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-if (LLVM_USE_SANITIZE_COVERAGE)
-  add_executable(get_error_info_fuzzer
-      get_error_info_fuzzer.cpp)
-  set_target_properties(
-      get_error_info_fuzzer PROPERTIES FOLDER "Fuzzers")
-  target_compile_options(
-      get_error_info_fuzzer PRIVATE -fsanitize=fuzzer)
-  set_target_properties(
-      get_error_info_fuzzer PROPERTIES LINK_FLAGS -fsanitize=fuzzer)
-  target_include_directories(
-      get_error_info_fuzzer PRIVATE .. ../include)
-endif()
diff --git a/Telegram/ThirdParty/scudo/fuzz/get_error_info_fuzzer.cpp b/Telegram/ThirdParty/scudo/fuzz/get_error_info_fuzzer.cpp
deleted file mode 100644
index 5b01ebe11..000000000
--- a/Telegram/ThirdParty/scudo/fuzz/get_error_info_fuzzer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- get_error_info_fuzzer.cpp -----------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#define SCUDO_FUZZ
-#include "allocator_config.h"
-#include "combined.h"
-
-#include <fuzzer/FuzzedDataProvider.h>
-
-#include <string>
-#include <vector>
-
-extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
-  using AllocatorT = scudo::Allocator<scudo::AndroidConfig>;
-  FuzzedDataProvider FDP(Data, Size);
-
-  uintptr_t FaultAddr = FDP.ConsumeIntegral<uintptr_t>();
-  uintptr_t MemoryAddr = FDP.ConsumeIntegral<uintptr_t>();
-
-  std::string MemoryAndTags =
-      FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
-  const char *Memory = MemoryAndTags.c_str();
-  // Assume 16-byte alignment.
-  size_t MemorySize = (MemoryAndTags.length() / 17) * 16;
-  const char *MemoryTags = Memory + MemorySize;
-
-  std::string StackDepotBytes =
-      FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
-  std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0);
-  for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size();
-       ++i) {
-    StackDepot[i] = StackDepotBytes[i];
-  }
-
-  std::string RegionInfoBytes =
-      FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
-  std::vector<char> RegionInfo(AllocatorT::getRegionInfoArraySize(), 0);
-  for (size_t i = 0; i < RegionInfoBytes.length() && i < RegionInfo.size();
-       ++i) {
-    RegionInfo[i] = RegionInfoBytes[i];
-  }
-
-  std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
-
-  scudo_error_info ErrorInfo;
-  AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
-                           RegionInfo.data(), RingBufferBytes.data(),
-                           RingBufferBytes.size(), Memory, MemoryTags,
-                           MemoryAddr, MemorySize);
-  return 0;
-}
diff --git a/Telegram/ThirdParty/scudo/include/scudo/interface.h b/Telegram/ThirdParty/scudo/include/scudo/interface.h
deleted file mode 100644
index a2dedea91..000000000
--- a/Telegram/ThirdParty/scudo/include/scudo/interface.h
+++ /dev/null
@@ -1,182 +0,0 @@
-//===-- scudo/interface.h ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_INTERFACE_H_
-#define SCUDO_INTERFACE_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-extern "C" {
-
-__attribute__((weak)) const char *__scudo_default_options(void);
-
-// Post-allocation & pre-deallocation hooks.
-__attribute__((weak)) void __scudo_allocate_hook(void *ptr, size_t size);
-__attribute__((weak)) void __scudo_deallocate_hook(void *ptr);
-
-// `realloc` involves both deallocation and allocation but they are not reported
-// atomically. In one specific case which may keep taking a snapshot right in
-// the middle of `realloc` reporting the deallocation and allocation, it may
-// confuse the user by missing memory from `realloc`. To alleviate that case,
-// define the two `realloc` hooks to get the knowledge of the bundled hook
-// calls. These hooks are optional and should only be used when a hooks user
-// wants to track reallocs more closely.
-//
-// See more details in the comment of `realloc` in wrapper_c.inc.
-__attribute__((weak)) void
-__scudo_realloc_allocate_hook(void *old_ptr, void *new_ptr, size_t size);
-__attribute__((weak)) void __scudo_realloc_deallocate_hook(void *old_ptr);
-
-void __scudo_print_stats(void);
-
-typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
-
-// Determine the likely cause of a tag check fault or other memory protection
-// error on a system with memory tagging support. The results are returned via
-// the error_info data structure. Up to three possible causes are returned in
-// the reports array, in decreasing order of probability. The remaining elements
-// of reports are zero-initialized.
-//
-// This function may be called from a different process from the one that
-// crashed. In this case, various data structures must be copied from the
-// crashing process to the process that analyzes the crash.
-//
-// This interface is not guaranteed to be stable and may change at any time.
-// Furthermore, the version of scudo in the crashing process must be the same as
-// the version in the process that analyzes the crash.
-//
-// fault_addr is the fault address. On aarch64 this is available in the system
-// register FAR_ELx, or siginfo.si_addr in Linux 5.11 or above. This address
-// must include the pointer tag; this is available if SA_EXPOSE_TAGBITS was set
-// in sigaction.sa_flags when the signal handler was registered. Note that the
-// kernel strips the tag from the field sigcontext.fault_address, so this
-// address is not suitable to be passed as fault_addr.
-//
-// stack_depot is a pointer to the stack depot data structure, which may be
-// obtained by calling the function __scudo_get_stack_depot_addr() in the
-// crashing process. The size of the stack depot is available by calling the
-// function __scudo_get_stack_depot_size().
-//
-// region_info is a pointer to the region info data structure, which may be
-// obtained by calling the function __scudo_get_region_info_addr() in the
-// crashing process. The size of the region info is available by calling the
-// function __scudo_get_region_info_size().
-//
-// memory is a pointer to a region of memory surrounding the fault address.
-// The more memory available via this pointer, the more likely it is that the
-// function will be able to analyze a crash correctly. It is recommended to
-// provide an amount of memory equal to 16 * the primary allocator's largest
-// size class either side of the fault address.
-//
-// memory_tags is a pointer to an array of memory tags for the memory accessed
-// via memory. Each byte of this array corresponds to a region of memory of size
-// equal to the architecturally defined memory tag granule size (16 on aarch64).
-//
-// memory_addr is the start address of memory in the crashing process's address
-// space.
-//
-// memory_size is the size of the memory region referred to by the memory
-// pointer.
-void __scudo_get_error_info(struct scudo_error_info *error_info,
-                            uintptr_t fault_addr, const char *stack_depot,
-                            size_t stack_depot_size, const char *region_info,
-                            const char *ring_buffer, size_t ring_buffer_size,
-                            const char *memory, const char *memory_tags,
-                            uintptr_t memory_addr, size_t memory_size);
-
-enum scudo_error_type {
-  UNKNOWN,
-  USE_AFTER_FREE,
-  BUFFER_OVERFLOW,
-  BUFFER_UNDERFLOW,
-};
-
-struct scudo_error_report {
-  enum scudo_error_type error_type;
-
-  uintptr_t allocation_address;
-  uintptr_t allocation_size;
-
-  uint32_t allocation_tid;
-  uintptr_t allocation_trace[64];
-
-  uint32_t deallocation_tid;
-  uintptr_t deallocation_trace[64];
-};
-
-struct scudo_error_info {
-  struct scudo_error_report reports[3];
-};
-
-const char *__scudo_get_stack_depot_addr(void);
-size_t __scudo_get_stack_depot_size(void);
-
-const char *__scudo_get_region_info_addr(void);
-size_t __scudo_get_region_info_size(void);
-
-const char *__scudo_get_ring_buffer_addr(void);
-size_t __scudo_get_ring_buffer_size(void);
-
-#ifndef M_DECAY_TIME
-#define M_DECAY_TIME -100
-#endif
-
-#ifndef M_PURGE
-#define M_PURGE -101
-#endif
-
-#ifndef M_PURGE_ALL
-#define M_PURGE_ALL -104
-#endif
-
-// Tune the allocator's choice of memory tags to make it more likely that
-// a certain class of memory errors will be detected. The value argument should
-// be one of the M_MEMTAG_TUNING_* constants below.
-#ifndef M_MEMTAG_TUNING
-#define M_MEMTAG_TUNING -102
-#endif
-
-// Per-thread memory initialization tuning. The value argument should be one of:
-// 1: Disable automatic heap initialization and, where possible, memory tagging,
-//    on this thread.
-// 0: Normal behavior.
-#ifndef M_THREAD_DISABLE_MEM_INIT
-#define M_THREAD_DISABLE_MEM_INIT -103
-#endif
-
-#ifndef M_CACHE_COUNT_MAX
-#define M_CACHE_COUNT_MAX -200
-#endif
-
-#ifndef M_CACHE_SIZE_MAX
-#define M_CACHE_SIZE_MAX -201
-#endif
-
-#ifndef M_TSDS_COUNT_MAX
-#define M_TSDS_COUNT_MAX -202
-#endif
-
-// Tune for buffer overflows.
-#ifndef M_MEMTAG_TUNING_BUFFER_OVERFLOW
-#define M_MEMTAG_TUNING_BUFFER_OVERFLOW 0
-#endif
-
-// Tune for use-after-free.
-#ifndef M_MEMTAG_TUNING_UAF
-#define M_MEMTAG_TUNING_UAF 1
-#endif
-
-// Print internal stats to the log.
-#ifndef M_LOG_STATS
-#define M_LOG_STATS -205
-#endif
-
-} // extern "C"
-
-#endif // SCUDO_INTERFACE_H_
diff --git a/Telegram/ThirdParty/scudo/internal_defs.h b/Telegram/ThirdParty/scudo/internal_defs.h
deleted file mode 100644
index 27c6b451f..000000000
--- a/Telegram/ThirdParty/scudo/internal_defs.h
+++ /dev/null
@@ -1,166 +0,0 @@
-//===-- internal_defs.h -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_INTERNAL_DEFS_H_
-#define SCUDO_INTERNAL_DEFS_H_
-
-#include "platform.h"
-
-#include <stdint.h>
-
-#ifndef SCUDO_DEBUG
-#define SCUDO_DEBUG 0
-#endif
-
-#define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
-
-// String related macros.
-
-#define STRINGIFY_(S) #S
-#define STRINGIFY(S) STRINGIFY_(S)
-#define CONCATENATE_(S, C) S##C
-#define CONCATENATE(S, C) CONCATENATE_(S, C)
-
-// Attributes & builtins related macros.
-
-#define INTERFACE __attribute__((visibility("default")))
-#define HIDDEN __attribute__((visibility("hidden")))
-#define WEAK __attribute__((weak))
-#define ALWAYS_INLINE inline __attribute__((always_inline))
-#define ALIAS(X) __attribute__((alias(X)))
-#define FORMAT(F, A) __attribute__((format(printf, F, A)))
-#define NOINLINE __attribute__((noinline))
-#define NORETURN __attribute__((noreturn))
-#define LIKELY(X) __builtin_expect(!!(X), 1)
-#define UNLIKELY(X) __builtin_expect(!!(X), 0)
-#if defined(__i386__) || defined(__x86_64__)
-// __builtin_prefetch(X) generates prefetchnt0 on x86
-#define PREFETCH(X) __asm__("prefetchnta (%0)" : : "r"(X))
-#else
-#define PREFETCH(X) __builtin_prefetch(X)
-#endif
-#define UNUSED __attribute__((unused))
-#define USED __attribute__((used))
-#define NOEXCEPT noexcept
-
-// This check is only available on Clang. This is essentially an alias of
-// C++20's 'constinit' specifier which will take care of this when (if?) we can
-// ask all libc's that use Scudo to compile us with C++20. Dynamic
-// initialization is bad; Scudo is designed to be lazy-initializated on the
-// first call to malloc/free (and friends), and this generally happens in the
-// loader somewhere in libdl's init. After the loader is done, control is
-// transferred to libc's initialization, and the dynamic initializers are run.
-// If there's a dynamic initializer for Scudo, then it will clobber the
-// already-initialized Scudo, and re-initialize all its members back to default
-// values, causing various explosions. Unfortunately, marking
-// scudo::Allocator<>'s constructor as 'constexpr' isn't sufficient to prevent
-// dynamic initialization, as default initialization is fine under 'constexpr'
-// (but not 'constinit'). Clang at -O0, and gcc at all opt levels will emit a
-// dynamic initializer for any constant-initialized variables if there is a mix
-// of default-initialized and constant-initialized variables.
-//
-// If you're looking at this because your build failed, you probably introduced
-// a new member to scudo::Allocator<> (possibly transiently) that didn't have an
-// initializer. The fix is easy - just add one.
-#if defined(__has_attribute)
-#if __has_attribute(require_constant_initialization)
-#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION                                  \
-  __attribute__((__require_constant_initialization__))
-#else
-#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION
-#endif
-#endif
-
-namespace scudo {
-
-typedef uintptr_t uptr;
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
-typedef intptr_t sptr;
-typedef int8_t s8;
-typedef int16_t s16;
-typedef int32_t s32;
-typedef int64_t s64;
-
-// The following two functions have platform specific implementations.
-void outputRaw(const char *Buffer);
-void NORETURN die();
-
-#define RAW_CHECK_MSG(Expr, Msg)                                               \
-  do {                                                                         \
-    if (UNLIKELY(!(Expr))) {                                                   \
-      outputRaw(Msg);                                                          \
-      die();                                                                   \
-    }                                                                          \
-  } while (false)
-
-#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr)
-
-void NORETURN reportCheckFailed(const char *File, int Line,
-                                const char *Condition, u64 Value1, u64 Value2);
-#define CHECK_IMPL(C1, Op, C2)                                                 \
-  do {                                                                         \
-    if (UNLIKELY(!(C1 Op C2))) {                                               \
-      scudo::reportCheckFailed(__FILE__, __LINE__, #C1 " " #Op " " #C2,        \
-                               (scudo::u64)C1, (scudo::u64)C2);                \
-      scudo::die();                                                            \
-    }                                                                          \
-  } while (false)
-
-#define CHECK(A) CHECK_IMPL((A), !=, 0)
-#define CHECK_EQ(A, B) CHECK_IMPL((A), ==, (B))
-#define CHECK_NE(A, B) CHECK_IMPL((A), !=, (B))
-#define CHECK_LT(A, B) CHECK_IMPL((A), <, (B))
-#define CHECK_LE(A, B) CHECK_IMPL((A), <=, (B))
-#define CHECK_GT(A, B) CHECK_IMPL((A), >, (B))
-#define CHECK_GE(A, B) CHECK_IMPL((A), >=, (B))
-
-#if SCUDO_DEBUG
-#define DCHECK(A) CHECK(A)
-#define DCHECK_EQ(A, B) CHECK_EQ(A, B)
-#define DCHECK_NE(A, B) CHECK_NE(A, B)
-#define DCHECK_LT(A, B) CHECK_LT(A, B)
-#define DCHECK_LE(A, B) CHECK_LE(A, B)
-#define DCHECK_GT(A, B) CHECK_GT(A, B)
-#define DCHECK_GE(A, B) CHECK_GE(A, B)
-#else
-#define DCHECK(A)                                                              \
-  do {                                                                         \
-  } while (false && (A))
-#define DCHECK_EQ(A, B)                                                        \
-  do {                                                                         \
-  } while (false && (A) == (B))
-#define DCHECK_NE(A, B)                                                        \
-  do {                                                                         \
-  } while (false && (A) != (B))
-#define DCHECK_LT(A, B)                                                        \
-  do {                                                                         \
-  } while (false && (A) < (B))
-#define DCHECK_LE(A, B)                                                        \
-  do {                                                                         \
-  } while (false && (A) <= (B))
-#define DCHECK_GT(A, B)                                                        \
-  do {                                                                         \
-  } while (false && (A) > (B))
-#define DCHECK_GE(A, B)                                                        \
-  do {                                                                         \
-  } while (false && (A) >= (B))
-#endif
-
-// The superfluous die() call effectively makes this macro NORETURN.
-#define UNREACHABLE(Msg)                                                       \
-  do {                                                                         \
-    CHECK(0 && Msg);                                                           \
-    die();                                                                     \
-  } while (0)
-
-} // namespace scudo
-
-#endif // SCUDO_INTERNAL_DEFS_H_
diff --git a/Telegram/ThirdParty/scudo/linux.cpp b/Telegram/ThirdParty/scudo/linux.cpp
deleted file mode 100644
index 274695108..000000000
--- a/Telegram/ThirdParty/scudo/linux.cpp
+++ /dev/null
@@ -1,242 +0,0 @@
-//===-- linux.cpp -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-#if SCUDO_LINUX
-
-#include "common.h"
-#include "internal_defs.h"
-#include "linux.h"
-#include "mutex.h"
-#include "report_linux.h"
-#include "string_utils.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/futex.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <time.h>
-#include <unistd.h>
-
-#if SCUDO_ANDROID
-#include <sys/prctl.h>
-// Definitions of prctl arguments to set a vma name in Android kernels.
-#define ANDROID_PR_SET_VMA 0x53564d41
-#define ANDROID_PR_SET_VMA_ANON_NAME 0
-#endif
-
-namespace scudo {
-
-uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
-
-void NORETURN die() { abort(); }
-
-// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
-void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
-          UNUSED MapPlatformData *Data) {
-  int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
-  int MmapProt;
-  if (Flags & MAP_NOACCESS) {
-    MmapFlags |= MAP_NORESERVE;
-    MmapProt = PROT_NONE;
-  } else {
-    MmapProt = PROT_READ | PROT_WRITE;
-  }
-#if defined(__aarch64__)
-#ifndef PROT_MTE
-#define PROT_MTE 0x20
-#endif
-  if (Flags & MAP_MEMTAG)
-    MmapProt |= PROT_MTE;
-#endif
-  if (Addr)
-    MmapFlags |= MAP_FIXED;
-  void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
-  if (P == MAP_FAILED) {
-    if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
-      reportMapError(errno == ENOMEM ? Size : 0);
-    return nullptr;
-  }
-#if SCUDO_ANDROID
-  if (Name)
-    prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
-#endif
-  return P;
-}
-
-// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
-void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
-           UNUSED MapPlatformData *Data) {
-  if (munmap(Addr, Size) != 0)
-    reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
-}
-
-// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
-void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
-                         UNUSED MapPlatformData *Data) {
-  int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
-  if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
-    reportProtectError(Addr, Size, Prot);
-}
-
-// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
-void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
-                      UNUSED MapPlatformData *Data) {
-  void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
-
-  while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
-  }
-}
-
-// Calling getenv should be fine (c)(tm) at any time.
-const char *getEnv(const char *Name) { return getenv(Name); }
-
-namespace {
-enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
-}
-
-bool HybridMutex::tryLock() {
-  return atomic_compare_exchange_strong(&M, Unlocked, Locked,
-                                        memory_order_acquire) == Unlocked;
-}
-
-// The following is based on https://akkadia.org/drepper/futex.pdf.
-void HybridMutex::lockSlow() {
-  u32 V = atomic_compare_exchange_strong(&M, Unlocked, Locked,
-                                         memory_order_acquire);
-  if (V == Unlocked)
-    return;
-  if (V != Sleeping)
-    V = atomic_exchange(&M, Sleeping, memory_order_acquire);
-  while (V != Unlocked) {
-    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
-            nullptr, nullptr, 0);
-    V = atomic_exchange(&M, Sleeping, memory_order_acquire);
-  }
-}
-
-void HybridMutex::unlock() {
-  if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
-    atomic_store(&M, Unlocked, memory_order_release);
-    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
-            nullptr, nullptr, 0);
-  }
-}
-
-void HybridMutex::assertHeldImpl() {
-  CHECK(atomic_load(&M, memory_order_acquire) != Unlocked);
-}
-
-u64 getMonotonicTime() {
-  timespec TS;
-  clock_gettime(CLOCK_MONOTONIC, &TS);
-  return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
-         static_cast<u64>(TS.tv_nsec);
-}
-
-u64 getMonotonicTimeFast() {
-#if defined(CLOCK_MONOTONIC_COARSE)
-  timespec TS;
-  clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
-  return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
-         static_cast<u64>(TS.tv_nsec);
-#else
-  return getMonotonicTime();
-#endif
-}
-
-u32 getNumberOfCPUs() {
-  cpu_set_t CPUs;
-  // sched_getaffinity can fail for a variety of legitimate reasons (lack of
-  // CAP_SYS_NICE, syscall filtering, etc), in which case we shall return 0.
-  if (sched_getaffinity(0, sizeof(cpu_set_t), &CPUs) != 0)
-    return 0;
-  return static_cast<u32>(CPU_COUNT(&CPUs));
-}
-
-u32 getThreadID() {
-#if SCUDO_ANDROID
-  return static_cast<u32>(gettid());
-#else
-  return static_cast<u32>(syscall(SYS_gettid));
-#endif
-}
-
-// Blocking is possibly unused if the getrandom block is not compiled in.
-bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
-  if (!Buffer || !Length || Length > MaxRandomLength)
-    return false;
-  ssize_t ReadBytes;
-#if defined(SYS_getrandom)
-#if !defined(GRND_NONBLOCK)
-#define GRND_NONBLOCK 1
-#endif
-  // Up to 256 bytes, getrandom will not be interrupted.
-  ReadBytes =
-      syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK);
-  if (ReadBytes == static_cast<ssize_t>(Length))
-    return true;
-#endif // defined(SYS_getrandom)
-  // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
-  // Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
-  const int FileDesc = open("/dev/urandom", O_RDONLY);
-  if (FileDesc == -1)
-    return false;
-  ReadBytes = read(FileDesc, Buffer, Length);
-  close(FileDesc);
-  return (ReadBytes == static_cast<ssize_t>(Length));
-}
-
-// Allocation free syslog-like API.
-extern "C" WEAK int async_safe_write_log(int pri, const char *tag,
-                                         const char *msg);
-
-void outputRaw(const char *Buffer) {
-  if (&async_safe_write_log) {
-    constexpr s32 AndroidLogInfo = 4;
-    constexpr uptr MaxLength = 1024U;
-    char LocalBuffer[MaxLength];
-    while (strlen(Buffer) > MaxLength) {
-      uptr P;
-      for (P = MaxLength - 1; P > 0; P--) {
-        if (Buffer[P] == '\n') {
-          memcpy(LocalBuffer, Buffer, P);
-          LocalBuffer[P] = '\0';
-          async_safe_write_log(AndroidLogInfo, "scudo", LocalBuffer);
-          Buffer = &Buffer[P + 1];
-          break;
-        }
-      }
-      // If no newline was found, just log the buffer.
-      if (P == 0)
-        break;
-    }
-    async_safe_write_log(AndroidLogInfo, "scudo", Buffer);
-  } else {
-    (void)write(2, Buffer, strlen(Buffer));
-  }
-}
-
-extern "C" WEAK void android_set_abort_message(const char *);
-
-void setAbortMessage(const char *Message) {
-  if (&android_set_abort_message)
-    android_set_abort_message(Message);
-}
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX
diff --git a/Telegram/ThirdParty/scudo/linux.h b/Telegram/ThirdParty/scudo/linux.h
deleted file mode 100644
index 72acb6da8..000000000
--- a/Telegram/ThirdParty/scudo/linux.h
+++ /dev/null
@@ -1,25 +0,0 @@
-//===-- linux.h -------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_LINUX_H_
-#define SCUDO_LINUX_H_
-
-#include "platform.h"
-
-#if SCUDO_LINUX
-
-namespace scudo {
-
-// MapPlatformData is unused on Linux, define it as a minimally sized structure.
-struct MapPlatformData {};
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX
-
-#endif // SCUDO_LINUX_H_
diff --git a/Telegram/ThirdParty/scudo/list.h b/Telegram/ThirdParty/scudo/list.h
deleted file mode 100644
index 0137667d1..000000000
--- a/Telegram/ThirdParty/scudo/list.h
+++ /dev/null
@@ -1,240 +0,0 @@
-//===-- list.h --------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_LIST_H_
-#define SCUDO_LIST_H_
-
-#include "internal_defs.h"
-
-namespace scudo {
-
-// Intrusive POD singly and doubly linked list.
-// An object with all zero fields should represent a valid empty list. clear()
-// should be called on all non-zero-initialized objects before using.
-
-template <class T> class IteratorBase {
-public:
-  explicit IteratorBase(T *CurrentT) : Current(CurrentT) {}
-  IteratorBase &operator++() {
-    Current = Current->Next;
-    return *this;
-  }
-  bool operator!=(IteratorBase Other) const { return Current != Other.Current; }
-  T &operator*() { return *Current; }
-
-private:
-  T *Current;
-};
-
-template <class T> struct IntrusiveList {
-  bool empty() const { return Size == 0; }
-  uptr size() const { return Size; }
-
-  T *front() { return First; }
-  const T *front() const { return First; }
-  T *back() { return Last; }
-  const T *back() const { return Last; }
-
-  void clear() {
-    First = Last = nullptr;
-    Size = 0;
-  }
-
-  typedef IteratorBase<T> Iterator;
-  typedef IteratorBase<const T> ConstIterator;
-
-  Iterator begin() { return Iterator(First); }
-  Iterator end() { return Iterator(nullptr); }
-
-  ConstIterator begin() const { return ConstIterator(First); }
-  ConstIterator end() const { return ConstIterator(nullptr); }
-
-  void checkConsistency() const;
-
-protected:
-  uptr Size = 0;
-  T *First = nullptr;
-  T *Last = nullptr;
-};
-
-template <class T> void IntrusiveList<T>::checkConsistency() const {
-  if (Size == 0) {
-    CHECK_EQ(First, nullptr);
-    CHECK_EQ(Last, nullptr);
-  } else {
-    uptr Count = 0;
-    for (T *I = First;; I = I->Next) {
-      Count++;
-      if (I == Last)
-        break;
-    }
-    CHECK_EQ(this->size(), Count);
-    CHECK_EQ(Last->Next, nullptr);
-  }
-}
-
-template <class T> struct SinglyLinkedList : public IntrusiveList<T> {
-  using IntrusiveList<T>::First;
-  using IntrusiveList<T>::Last;
-  using IntrusiveList<T>::Size;
-  using IntrusiveList<T>::empty;
-
-  void push_back(T *X) {
-    X->Next = nullptr;
-    if (empty())
-      First = X;
-    else
-      Last->Next = X;
-    Last = X;
-    Size++;
-  }
-
-  void push_front(T *X) {
-    if (empty())
-      Last = X;
-    X->Next = First;
-    First = X;
-    Size++;
-  }
-
-  void pop_front() {
-    DCHECK(!empty());
-    First = First->Next;
-    if (!First)
-      Last = nullptr;
-    Size--;
-  }
-
-  // Insert X next to Prev
-  void insert(T *Prev, T *X) {
-    DCHECK(!empty());
-    DCHECK_NE(Prev, nullptr);
-    DCHECK_NE(X, nullptr);
-    X->Next = Prev->Next;
-    Prev->Next = X;
-    if (Last == Prev)
-      Last = X;
-    ++Size;
-  }
-
-  void extract(T *Prev, T *X) {
-    DCHECK(!empty());
-    DCHECK_NE(Prev, nullptr);
-    DCHECK_NE(X, nullptr);
-    DCHECK_EQ(Prev->Next, X);
-    Prev->Next = X->Next;
-    if (Last == X)
-      Last = Prev;
-    Size--;
-  }
-
-  void append_back(SinglyLinkedList<T> *L) {
-    DCHECK_NE(this, L);
-    if (L->empty())
-      return;
-    if (empty()) {
-      *this = *L;
-    } else {
-      Last->Next = L->First;
-      Last = L->Last;
-      Size += L->size();
-    }
-    L->clear();
-  }
-};
-
-template <class T> struct DoublyLinkedList : IntrusiveList<T> {
-  using IntrusiveList<T>::First;
-  using IntrusiveList<T>::Last;
-  using IntrusiveList<T>::Size;
-  using IntrusiveList<T>::empty;
-
-  void push_front(T *X) {
-    X->Prev = nullptr;
-    if (empty()) {
-      Last = X;
-    } else {
-      DCHECK_EQ(First->Prev, nullptr);
-      First->Prev = X;
-    }
-    X->Next = First;
-    First = X;
-    Size++;
-  }
-
-  // Inserts X before Y.
-  void insert(T *X, T *Y) {
-    if (Y == First)
-      return push_front(X);
-    T *Prev = Y->Prev;
-    // This is a hard CHECK to ensure consistency in the event of an intentional
-    // corruption of Y->Prev, to prevent a potential write-{4,8}.
-    CHECK_EQ(Prev->Next, Y);
-    Prev->Next = X;
-    X->Prev = Prev;
-    X->Next = Y;
-    Y->Prev = X;
-    Size++;
-  }
-
-  void push_back(T *X) {
-    X->Next = nullptr;
-    if (empty()) {
-      First = X;
-    } else {
-      DCHECK_EQ(Last->Next, nullptr);
-      Last->Next = X;
-    }
-    X->Prev = Last;
-    Last = X;
-    Size++;
-  }
-
-  void pop_front() {
-    DCHECK(!empty());
-    First = First->Next;
-    if (!First)
-      Last = nullptr;
-    else
-      First->Prev = nullptr;
-    Size--;
-  }
-
-  // The consistency of the adjacent links is aggressively checked in order to
-  // catch potential corruption attempts, that could yield a mirrored
-  // write-{4,8} primitive. nullptr checks are deemed less vital.
-  void remove(T *X) {
-    T *Prev = X->Prev;
-    T *Next = X->Next;
-    if (Prev) {
-      CHECK_EQ(Prev->Next, X);
-      Prev->Next = Next;
-    }
-    if (Next) {
-      CHECK_EQ(Next->Prev, X);
-      Next->Prev = Prev;
-    }
-    if (First == X) {
-      DCHECK_EQ(Prev, nullptr);
-      First = Next;
-    } else {
-      DCHECK_NE(Prev, nullptr);
-    }
-    if (Last == X) {
-      DCHECK_EQ(Next, nullptr);
-      Last = Prev;
-    } else {
-      DCHECK_NE(Next, nullptr);
-    }
-    Size--;
-  }
-};
-
-} // namespace scudo
-
-#endif // SCUDO_LIST_H_
diff --git a/Telegram/ThirdParty/scudo/local_cache.h b/Telegram/ThirdParty/scudo/local_cache.h
deleted file mode 100644
index 46d6affdc..000000000
--- a/Telegram/ThirdParty/scudo/local_cache.h
+++ /dev/null
@@ -1,189 +0,0 @@
-//===-- local_cache.h -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_LOCAL_CACHE_H_
-#define SCUDO_LOCAL_CACHE_H_
-
-#include "internal_defs.h"
-#include "list.h"
-#include "platform.h"
-#include "report.h"
-#include "stats.h"
-#include "string_utils.h"
-
-namespace scudo {
-
-template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
-  typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
-  typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
-
-  void init(GlobalStats *S, SizeClassAllocator *A) {
-    DCHECK(isEmpty());
-    Stats.init();
-    if (LIKELY(S))
-      S->link(&Stats);
-    Allocator = A;
-    initCache();
-  }
-
-  void destroy(GlobalStats *S) {
-    drain();
-    if (LIKELY(S))
-      S->unlink(&Stats);
-  }
-
-  void *allocate(uptr ClassId) {
-    DCHECK_LT(ClassId, NumClasses);
-    PerClass *C = &PerClassArray[ClassId];
-    if (C->Count == 0) {
-      // Refill half of the number of max cached.
-      DCHECK_GT(C->MaxCount / 2, 0U);
-      if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
-        return nullptr;
-      DCHECK_GT(C->Count, 0);
-    }
-    // We read ClassSize first before accessing Chunks because it's adjacent to
-    // Count, while Chunks might be further off (depending on Count). That keeps
-    // the memory accesses in close quarters.
-    const uptr ClassSize = C->ClassSize;
-    CompactPtrT CompactP = C->Chunks[--C->Count];
-    Stats.add(StatAllocated, ClassSize);
-    Stats.sub(StatFree, ClassSize);
-    return Allocator->decompactPtr(ClassId, CompactP);
-  }
-
-  bool deallocate(uptr ClassId, void *P) {
-    CHECK_LT(ClassId, NumClasses);
-    PerClass *C = &PerClassArray[ClassId];
-
-    // If the cache is full, drain half of blocks back to the main allocator.
-    const bool NeedToDrainCache = C->Count == C->MaxCount;
-    if (NeedToDrainCache)
-      drain(C, ClassId);
-    // See comment in allocate() about memory accesses.
-    const uptr ClassSize = C->ClassSize;
-    C->Chunks[C->Count++] =
-        Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
-    Stats.sub(StatAllocated, ClassSize);
-    Stats.add(StatFree, ClassSize);
-
-    return NeedToDrainCache;
-  }
-
-  bool isEmpty() const {
-    for (uptr I = 0; I < NumClasses; ++I)
-      if (PerClassArray[I].Count)
-        return false;
-    return true;
-  }
-
-  void drain() {
-    // Drain BatchClassId last as it may be needed while draining normal blocks.
-    for (uptr I = 0; I < NumClasses; ++I) {
-      if (I == BatchClassId)
-        continue;
-      while (PerClassArray[I].Count > 0)
-        drain(&PerClassArray[I], I);
-    }
-    while (PerClassArray[BatchClassId].Count > 0)
-      drain(&PerClassArray[BatchClassId], BatchClassId);
-    DCHECK(isEmpty());
-  }
-
-  void *getBatchClassBlock() {
-    void *B = allocate(BatchClassId);
-    if (UNLIKELY(!B))
-      reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
-    return B;
-  }
-
-  LocalStats &getStats() { return Stats; }
-
-  void getStats(ScopedString *Str) {
-    bool EmptyCache = true;
-    for (uptr I = 0; I < NumClasses; ++I) {
-      if (PerClassArray[I].Count == 0)
-        continue;
-
-      EmptyCache = false;
-      // The size of BatchClass is set to 0 intentionally. See the comment in
-      // initCache() for more details.
-      const uptr ClassSize = I == BatchClassId
-                                 ? SizeClassAllocator::getSizeByClassId(I)
-                                 : PerClassArray[I].ClassSize;
-      // Note that the string utils don't support printing u16 thus we cast it
-      // to a common use type uptr.
-      Str->append("    %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
-                  static_cast<uptr>(PerClassArray[I].Count),
-                  static_cast<uptr>(PerClassArray[I].MaxCount));
-    }
-
-    if (EmptyCache)
-      Str->append("    No block is cached.\n");
-  }
-
-  static u16 getMaxCached(uptr Size) {
-    return Min(SizeClassMap::MaxNumCachedHint,
-               SizeClassMap::getMaxCachedHint(Size));
-  }
-
-private:
-  static const uptr NumClasses = SizeClassMap::NumClasses;
-  static const uptr BatchClassId = SizeClassMap::BatchClassId;
-  struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
-    u16 Count;
-    u16 MaxCount;
-    // Note: ClassSize is zero for the transfer batch.
-    uptr ClassSize;
-    CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
-  };
-  PerClass PerClassArray[NumClasses] = {};
-  LocalStats Stats;
-  SizeClassAllocator *Allocator = nullptr;
-
-  NOINLINE void initCache() {
-    for (uptr I = 0; I < NumClasses; I++) {
-      PerClass *P = &PerClassArray[I];
-      const uptr Size = SizeClassAllocator::getSizeByClassId(I);
-      P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
-      if (I != BatchClassId) {
-        P->ClassSize = Size;
-      } else {
-        // ClassSize in this struct is only used for malloc/free stats, which
-        // should only track user allocations, not internal movements.
-        P->ClassSize = 0;
-      }
-    }
-  }
-
-  void destroyBatch(uptr ClassId, void *B) {
-    if (ClassId != BatchClassId)
-      deallocate(BatchClassId, B);
-  }
-
-  NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
-    const u16 NumBlocksRefilled =
-        Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
-    DCHECK_LE(NumBlocksRefilled, MaxRefill);
-    C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
-    return NumBlocksRefilled != 0;
-  }
-
-  NOINLINE void drain(PerClass *C, uptr ClassId) {
-    const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
-    Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
-    // u16 will be promoted to int by arithmetic type conversion.
-    C->Count = static_cast<u16>(C->Count - Count);
-    for (u16 I = 0; I < C->Count; I++)
-      C->Chunks[I] = C->Chunks[I + Count];
-  }
-};
-
-} // namespace scudo
-
-#endif // SCUDO_LOCAL_CACHE_H_
diff --git a/Telegram/ThirdParty/scudo/mem_map.cpp b/Telegram/ThirdParty/scudo/mem_map.cpp
deleted file mode 100644
index 115cc34e7..000000000
--- a/Telegram/ThirdParty/scudo/mem_map.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-//===-- mem_map.cpp ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "mem_map.h"
-
-#include "common.h"
-
-namespace scudo {
-
-bool MemMapDefault::mapImpl(uptr Addr, uptr Size, const char *Name,
-                            uptr Flags) {
-  void *MappedAddr =
-      ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
-  if (MappedAddr == nullptr)
-    return false;
-  Base = reinterpret_cast<uptr>(MappedAddr);
-  MappedBase = Base;
-  Capacity = Size;
-  return true;
-}
-
-void MemMapDefault::unmapImpl(uptr Addr, uptr Size) {
-  if (Size == Capacity) {
-    Base = MappedBase = Capacity = 0;
-  } else {
-    if (Base == Addr) {
-      Base = Addr + Size;
-      MappedBase = MappedBase == 0 ? Base : Max(MappedBase, Base);
-    }
-    Capacity -= Size;
-  }
-
-  ::scudo::unmap(reinterpret_cast<void *>(Addr), Size, UNMAP_ALL, &Data);
-}
-
-bool MemMapDefault::remapImpl(uptr Addr, uptr Size, const char *Name,
-                              uptr Flags) {
-  void *RemappedPtr =
-      ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
-  const uptr RemappedAddr = reinterpret_cast<uptr>(RemappedPtr);
-  MappedBase = MappedBase == 0 ? RemappedAddr : Min(MappedBase, RemappedAddr);
-  return RemappedAddr == Addr;
-}
-
-void MemMapDefault::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
-  DCHECK_NE(MappedBase, 0U);
-  DCHECK_GE(From, MappedBase);
-  return ::scudo::releasePagesToOS(MappedBase, From - MappedBase, Size, &Data);
-}
-
-void MemMapDefault::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
-  return ::scudo::setMemoryPermission(Addr, Size, Flags);
-}
-
-void ReservedMemoryDefault::releaseImpl() {
-  ::scudo::unmap(reinterpret_cast<void *>(Base), Capacity, UNMAP_ALL, &Data);
-}
-
-bool ReservedMemoryDefault::createImpl(uptr Addr, uptr Size, const char *Name,
-                                       uptr Flags) {
-  void *Reserved = ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name,
-                                Flags | MAP_NOACCESS, &Data);
-  if (Reserved == nullptr)
-    return false;
-
-  Base = reinterpret_cast<uptr>(Reserved);
-  Capacity = Size;
-
-  return true;
-}
-
-ReservedMemoryDefault::MemMapT ReservedMemoryDefault::dispatchImpl(uptr Addr,
-                                                                   uptr Size) {
-  ReservedMemoryDefault::MemMapT NewMap(Addr, Size);
-  NewMap.setMapPlatformData(Data);
-  return NewMap;
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/mem_map.h b/Telegram/ThirdParty/scudo/mem_map.h
deleted file mode 100644
index b92216cf2..000000000
--- a/Telegram/ThirdParty/scudo/mem_map.h
+++ /dev/null
@@ -1,92 +0,0 @@
-//===-- mem_map.h -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_MEM_MAP_H_
-#define SCUDO_MEM_MAP_H_
-
-#include "mem_map_base.h"
-
-#include "common.h"
-#include "internal_defs.h"
-
-// TODO: This is only used for `MapPlatformData`. Remove these includes when we
-// have all three platform specific `MemMap` and `ReservedMemory`
-// implementations.
-#include "fuchsia.h"
-#include "linux.h"
-#include "trusty.h"
-
-#include "mem_map_fuchsia.h"
-#include "mem_map_linux.h"
-
-namespace scudo {
-
-// This will be deprecated when every allocator has been supported by each
-// platform's `MemMap` implementation.
-class MemMapDefault final : public MemMapBase<MemMapDefault> {
-public:
-  constexpr MemMapDefault() = default;
-  MemMapDefault(uptr Base, uptr Capacity) : Base(Base), Capacity(Capacity) {}
-
-  // Impls for base functions.
-  bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
-  void unmapImpl(uptr Addr, uptr Size);
-  bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
-  void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
-  void releasePagesToOSImpl(uptr From, uptr Size) {
-    return releaseAndZeroPagesToOSImpl(From, Size);
-  }
-  void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
-  uptr getBaseImpl() { return Base; }
-  uptr getCapacityImpl() { return Capacity; }
-
-  void setMapPlatformData(MapPlatformData &NewData) { Data = NewData; }
-
-private:
-  uptr Base = 0;
-  uptr Capacity = 0;
-  uptr MappedBase = 0;
-  MapPlatformData Data = {};
-};
-
-// This will be deprecated when every allocator has been supported by each
-// platform's `MemMap` implementation.
-class ReservedMemoryDefault final
-    : public ReservedMemory<ReservedMemoryDefault, MemMapDefault> {
-public:
-  constexpr ReservedMemoryDefault() = default;
-
-  bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
-  void releaseImpl();
-  MemMapT dispatchImpl(uptr Addr, uptr Size);
-  uptr getBaseImpl() { return Base; }
-  uptr getCapacityImpl() { return Capacity; }
-
-private:
-  uptr Base = 0;
-  uptr Capacity = 0;
-  MapPlatformData Data = {};
-};
-
-#if SCUDO_LINUX
-using ReservedMemoryT = ReservedMemoryLinux;
-using MemMapT = ReservedMemoryT::MemMapT;
-#elif SCUDO_FUCHSIA
-using ReservedMemoryT = ReservedMemoryFuchsia;
-using MemMapT = ReservedMemoryT::MemMapT;
-#elif SCUDO_TRUSTY
-using ReservedMemoryT = ReservedMemoryDefault;
-using MemMapT = ReservedMemoryT::MemMapT;
-#else
-#error                                                                         \
-    "Unsupported platform, please implement the ReservedMemory for your platform!"
-#endif
-
-} // namespace scudo
-
-#endif // SCUDO_MEM_MAP_H_
diff --git a/Telegram/ThirdParty/scudo/mem_map_base.h b/Telegram/ThirdParty/scudo/mem_map_base.h
deleted file mode 100644
index 99ab0cba6..000000000
--- a/Telegram/ThirdParty/scudo/mem_map_base.h
+++ /dev/null
@@ -1,129 +0,0 @@
-//===-- mem_map_base.h ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_MEM_MAP_BASE_H_
-#define SCUDO_MEM_MAP_BASE_H_
-
-#include "common.h"
-
-namespace scudo {
-
-// In Scudo, every memory operation will be fulfilled through a
-// platform-specific `MemMap` instance. The essential APIs are listed in the
-// `MemMapBase` below. This is implemented in CRTP, so for each implementation,
-// it has to implement all of the 'Impl' named functions.
-template <class Derived> class MemMapBase {
-public:
-  constexpr MemMapBase() = default;
-
-  // This is used to map a new set of contiguous pages. Note that the `Addr` is
-  // only a suggestion to the system.
-  bool map(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
-    DCHECK(!isAllocated());
-    return invokeImpl(&Derived::mapImpl, Addr, Size, Name, Flags);
-  }
-
-  // This is used to unmap partial/full pages from the beginning or the end.
-  // I.e., the result pages are expected to be still contiguous.
-  void unmap(uptr Addr, uptr Size) {
-    DCHECK(isAllocated());
-    DCHECK((Addr == getBase()) || (Addr + Size == getBase() + getCapacity()));
-    invokeImpl(&Derived::unmapImpl, Addr, Size);
-  }
-
-  // This is used to remap a mapped range (either from map() or dispatched from
-  // ReservedMemory). For example, we have reserved several pages and then we
-  // want to remap them with different accessibility.
-  bool remap(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
-    DCHECK(isAllocated());
-    DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
-    return invokeImpl(&Derived::remapImpl, Addr, Size, Name, Flags);
-  }
-
-  // This is used to update the pages' access permission. For example, mark
-  // pages as no read/write permission.
-  void setMemoryPermission(uptr Addr, uptr Size, uptr Flags) {
-    DCHECK(isAllocated());
-    DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
-    return invokeImpl(&Derived::setMemoryPermissionImpl, Addr, Size, Flags);
-  }
-
-  // Suggest releasing a set of contiguous physical pages back to the OS. Note
-  // that only physical pages are supposed to be released. Any release of
-  // virtual pages may lead to undefined behavior.
-  void releasePagesToOS(uptr From, uptr Size) {
-    DCHECK(isAllocated());
-    DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
-    invokeImpl(&Derived::releasePagesToOSImpl, From, Size);
-  }
-  // This is similar to the above one except that any subsequent access to the
-  // released pages will return with zero-filled pages.
-  void releaseAndZeroPagesToOS(uptr From, uptr Size) {
-    DCHECK(isAllocated());
-    DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
-    invokeImpl(&Derived::releaseAndZeroPagesToOSImpl, From, Size);
-  }
-
-  uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
-  uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
-
-  bool isAllocated() { return getBase() != 0U; }
-
-protected:
-  template <typename R, typename... Args>
-  R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
-    return (static_cast<Derived *>(this)->*MemFn)(args...);
-  }
-};
-
-// `ReservedMemory` is a special memory handle which can be viewed as a page
-// allocator. `ReservedMemory` will reserve a contiguous pages and the later
-// page request can be fulfilled at the designated address. This is used when
-// we want to ensure the virtual address of the MemMap will be in a known range.
-// This is implemented in CRTP, so for each
-// implementation, it has to implement all of the 'Impl' named functions.
-template <class Derived, typename MemMapTy> class ReservedMemory {
-public:
-  using MemMapT = MemMapTy;
-  constexpr ReservedMemory() = default;
-
-  // Reserve a chunk of memory at a suggested address.
-  bool create(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
-    DCHECK(!isCreated());
-    return invokeImpl(&Derived::createImpl, Addr, Size, Name, Flags);
-  }
-
-  // Release the entire reserved memory.
-  void release() {
-    DCHECK(isCreated());
-    invokeImpl(&Derived::releaseImpl);
-  }
-
-  // Dispatch a sub-range of reserved memory. Note that any fragmentation of
-  // the reserved pages is managed by each implementation.
-  MemMapT dispatch(uptr Addr, uptr Size) {
-    DCHECK(isCreated());
-    DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
-    return invokeImpl(&Derived::dispatchImpl, Addr, Size);
-  }
-
-  uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
-  uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
-
-  bool isCreated() { return getBase() != 0U; }
-
-protected:
-  template <typename R, typename... Args>
-  R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
-    return (static_cast<Derived *>(this)->*MemFn)(args...);
-  }
-};
-
-} // namespace scudo
-
-#endif // SCUDO_MEM_MAP_BASE_H_
diff --git a/Telegram/ThirdParty/scudo/mem_map_fuchsia.cpp b/Telegram/ThirdParty/scudo/mem_map_fuchsia.cpp
deleted file mode 100644
index 0566ab065..000000000
--- a/Telegram/ThirdParty/scudo/mem_map_fuchsia.cpp
+++ /dev/null
@@ -1,252 +0,0 @@
-//===-- mem_map_fuchsia.cpp -------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "mem_map_fuchsia.h"
-
-#include "atomic_helpers.h"
-#include "common.h"
-#include "string_utils.h"
-
-#if SCUDO_FUCHSIA
-
-#include <zircon/process.h>
-#include <zircon/status.h>
-#include <zircon/syscalls.h>
-
-namespace scudo {
-
-static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
-                                uptr Size) {
-  char Error[128];
-  formatString(Error, sizeof(Error),
-               "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
-               Size >> 10, _zx_status_get_string(Status));
-  outputRaw(Error);
-  die();
-}
-
-static void setVmoName(zx_handle_t Vmo, const char *Name) {
-  size_t Len = strlen(Name);
-  DCHECK_LT(Len, ZX_MAX_NAME_LEN);
-  zx_status_t Status = _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, Len);
-  CHECK_EQ(Status, ZX_OK);
-}
-
-// Returns the (cached) base address of the root VMAR.
-static uptr getRootVmarBase() {
-  static atomic_uptr CachedResult = {0};
-
-  uptr Result = atomic_load(&CachedResult, memory_order_acquire);
-  if (UNLIKELY(!Result)) {
-    zx_info_vmar_t VmarInfo;
-    zx_status_t Status =
-        _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &VmarInfo,
-                            sizeof(VmarInfo), nullptr, nullptr);
-    CHECK_EQ(Status, ZX_OK);
-    CHECK_NE(VmarInfo.base, 0);
-
-    atomic_store(&CachedResult, VmarInfo.base, memory_order_release);
-    Result = VmarInfo.base;
-  }
-
-  return Result;
-}
-
-// Lazily creates and then always returns the same zero-sized VMO.
-static zx_handle_t getPlaceholderVmo() {
-  static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
-
-  zx_handle_t Vmo = atomic_load(&StoredVmo, memory_order_acquire);
-  if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
-    // Create a zero-sized placeholder VMO.
-    zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
-    if (UNLIKELY(Status != ZX_OK))
-      dieOnError(Status, "zx_vmo_create", 0);
-
-    setVmoName(Vmo, "scudo:reserved");
-
-    // Atomically store its handle. If some other thread wins the race, use its
-    // handle and discard ours.
-    zx_handle_t OldValue = atomic_compare_exchange_strong(
-        &StoredVmo, ZX_HANDLE_INVALID, Vmo, memory_order_acq_rel);
-    if (UNLIKELY(OldValue != ZX_HANDLE_INVALID)) {
-      Status = _zx_handle_close(Vmo);
-      CHECK_EQ(Status, ZX_OK);
-
-      Vmo = OldValue;
-    }
-  }
-
-  return Vmo;
-}
-
-MemMapFuchsia::MemMapFuchsia(uptr Base, uptr Capacity)
-    : MapAddr(Base), WindowBase(Base), WindowSize(Capacity) {
-  // Create the VMO.
-  zx_status_t Status = _zx_vmo_create(Capacity, 0, &Vmo);
-  if (UNLIKELY(Status != ZX_OK))
-    dieOnError(Status, "zx_vmo_create", Capacity);
-}
-
-bool MemMapFuchsia::mapImpl(UNUSED uptr Addr, uptr Size, const char *Name,
-                            uptr Flags) {
-  const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
-  const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
-  const bool NoAccess = !!(Flags & MAP_NOACCESS);
-
-  // Create the VMO.
-  zx_status_t Status = _zx_vmo_create(Size, 0, &Vmo);
-  if (UNLIKELY(Status != ZX_OK)) {
-    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-      dieOnError(Status, "zx_vmo_create", Size);
-    return false;
-  }
-
-  if (Name != nullptr)
-    setVmoName(Vmo, Name);
-
-  // Map it.
-  zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS;
-  if (!NoAccess)
-    MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
-  Status =
-      _zx_vmar_map(_zx_vmar_root_self(), MapFlags, 0, Vmo, 0, Size, &MapAddr);
-  if (UNLIKELY(Status != ZX_OK)) {
-    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-      dieOnError(Status, "zx_vmar_map", Size);
-
-    Status = _zx_handle_close(Vmo);
-    CHECK_EQ(Status, ZX_OK);
-
-    MapAddr = 0;
-    Vmo = ZX_HANDLE_INVALID;
-    return false;
-  }
-
-  if (PreCommit) {
-    Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
-                               Size, nullptr, 0);
-    CHECK_EQ(Status, ZX_OK);
-  }
-
-  WindowBase = MapAddr;
-  WindowSize = Size;
-  return true;
-}
-
-void MemMapFuchsia::unmapImpl(uptr Addr, uptr Size) {
-  zx_status_t Status;
-
-  if (Size == WindowSize) {
-    // NOTE: Closing first and then unmapping seems slightly faster than doing
-    // the same operations in the opposite order.
-    Status = _zx_handle_close(Vmo);
-    CHECK_EQ(Status, ZX_OK);
-    Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
-    CHECK_EQ(Status, ZX_OK);
-
-    MapAddr = WindowBase = WindowSize = 0;
-    Vmo = ZX_HANDLE_INVALID;
-  } else {
-    // Unmap the subrange.
-    Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
-    CHECK_EQ(Status, ZX_OK);
-
-    // Decommit the pages that we just unmapped.
-    Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, Addr - MapAddr, Size,
-                              nullptr, 0);
-    CHECK_EQ(Status, ZX_OK);
-
-    if (Addr == WindowBase)
-      WindowBase += Size;
-    WindowSize -= Size;
-  }
-}
-
-bool MemMapFuchsia::remapImpl(uptr Addr, uptr Size, const char *Name,
-                              uptr Flags) {
-  const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
-  const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
-  const bool NoAccess = !!(Flags & MAP_NOACCESS);
-
-  // NOTE: This will rename the *whole* VMO, not only the requested portion of
-  // it. But we cannot do better than this given the MemMap API. In practice,
-  // the upper layers of Scudo always pass the same Name for a given MemMap.
-  if (Name != nullptr)
-    setVmoName(Vmo, Name);
-
-  uptr MappedAddr;
-  zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS | ZX_VM_SPECIFIC_OVERWRITE;
-  if (!NoAccess)
-    MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
-  zx_status_t Status =
-      _zx_vmar_map(_zx_vmar_root_self(), MapFlags, Addr - getRootVmarBase(),
-                   Vmo, Addr - MapAddr, Size, &MappedAddr);
-  if (UNLIKELY(Status != ZX_OK)) {
-    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-      dieOnError(Status, "zx_vmar_map", Size);
-    return false;
-  }
-  DCHECK_EQ(Addr, MappedAddr);
-
-  if (PreCommit) {
-    Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
-                               Size, nullptr, 0);
-    CHECK_EQ(Status, ZX_OK);
-  }
-
-  return true;
-}
-
-void MemMapFuchsia::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
-  zx_status_t Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, From - MapAddr,
-                                        Size, nullptr, 0);
-  CHECK_EQ(Status, ZX_OK);
-}
-
-void MemMapFuchsia::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
-  const bool NoAccess = !!(Flags & MAP_NOACCESS);
-
-  zx_vm_option_t MapFlags = 0;
-  if (!NoAccess)
-    MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
-  zx_status_t Status =
-      _zx_vmar_protect(_zx_vmar_root_self(), MapFlags, Addr, Size);
-  CHECK_EQ(Status, ZX_OK);
-}
-
-bool ReservedMemoryFuchsia::createImpl(UNUSED uptr Addr, uptr Size,
-                                       UNUSED const char *Name, uptr Flags) {
-  const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
-
-  // Reserve memory by mapping the placeholder VMO without any permission.
-  zx_status_t Status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_ALLOW_FAULTS, 0,
-                                    getPlaceholderVmo(), 0, Size, &Base);
-  if (UNLIKELY(Status != ZX_OK)) {
-    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
-      dieOnError(Status, "zx_vmar_map", Size);
-    return false;
-  }
-
-  Capacity = Size;
-  return true;
-}
-
-void ReservedMemoryFuchsia::releaseImpl() {
-  zx_status_t Status = _zx_vmar_unmap(_zx_vmar_root_self(), Base, Capacity);
-  CHECK_EQ(Status, ZX_OK);
-}
-
-ReservedMemoryFuchsia::MemMapT ReservedMemoryFuchsia::dispatchImpl(uptr Addr,
-                                                                   uptr Size) {
-  return ReservedMemoryFuchsia::MemMapT(Addr, Size);
-}
-
-} // namespace scudo
-
-#endif // SCUDO_FUCHSIA
diff --git a/Telegram/ThirdParty/scudo/mem_map_fuchsia.h b/Telegram/ThirdParty/scudo/mem_map_fuchsia.h
deleted file mode 100644
index 2e66f89cf..000000000
--- a/Telegram/ThirdParty/scudo/mem_map_fuchsia.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//===-- mem_map_fuchsia.h ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_MEM_MAP_FUCHSIA_H_
-#define SCUDO_MEM_MAP_FUCHSIA_H_
-
-#include "mem_map_base.h"
-
-#if SCUDO_FUCHSIA
-
-#include <stdint.h>
-#include <zircon/types.h>
-
-namespace scudo {
-
-class MemMapFuchsia final : public MemMapBase<MemMapFuchsia> {
-public:
-  constexpr MemMapFuchsia() = default;
-
-  // Impls for base functions.
-  bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
-  void unmapImpl(uptr Addr, uptr Size);
-  bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
-  void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
-  void releasePagesToOSImpl(uptr From, uptr Size) {
-    return releaseAndZeroPagesToOSImpl(From, Size);
-  }
-  void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
-  uptr getBaseImpl() { return WindowBase; }
-  uptr getCapacityImpl() { return WindowSize; }
-
-private:
-  friend class ReservedMemoryFuchsia;
-
-  // Used by ReservedMemoryFuchsia::dispatch.
-  MemMapFuchsia(uptr Base, uptr Capacity);
-
-  // Virtual memory address corresponding to VMO offset 0.
-  uptr MapAddr = 0;
-
-  // Virtual memory base address and size of the VMO subrange that is still in
-  // use. unmapImpl() can shrink this range, either at the beginning or at the
-  // end.
-  uptr WindowBase = 0;
-  uptr WindowSize = 0;
-
-  zx_handle_t Vmo = ZX_HANDLE_INVALID;
-};
-
-class ReservedMemoryFuchsia final
-    : public ReservedMemory<ReservedMemoryFuchsia, MemMapFuchsia> {
-public:
-  constexpr ReservedMemoryFuchsia() = default;
-
-  bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
-  void releaseImpl();
-  MemMapT dispatchImpl(uptr Addr, uptr Size);
-  uptr getBaseImpl() { return Base; }
-  uptr getCapacityImpl() { return Capacity; }
-
-private:
-  uptr Base = 0;
-  uptr Capacity = 0;
-};
-
-} // namespace scudo
-
-#endif // SCUDO_FUCHSIA
-
-#endif // SCUDO_MEM_MAP_FUCHSIA_H_
diff --git a/Telegram/ThirdParty/scudo/mem_map_linux.cpp b/Telegram/ThirdParty/scudo/mem_map_linux.cpp
deleted file mode 100644
index 783c4f0d9..000000000
--- a/Telegram/ThirdParty/scudo/mem_map_linux.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-//===-- mem_map_linux.cpp ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-#if SCUDO_LINUX
-
-#include "mem_map_linux.h"
-
-#include "common.h"
-#include "internal_defs.h"
-#include "linux.h"
-#include "mutex.h"
-#include "report_linux.h"
-#include "string_utils.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/futex.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <time.h>
-#include <unistd.h>
-
-#if SCUDO_ANDROID
-// TODO(chiahungduan): Review if we still need the followings macros.
-#include <sys/prctl.h>
-// Definitions of prctl arguments to set a vma name in Android kernels.
-#define ANDROID_PR_SET_VMA 0x53564d41
-#define ANDROID_PR_SET_VMA_ANON_NAME 0
-#endif
-
-namespace scudo {
-
-static void *mmapWrapper(uptr Addr, uptr Size, const char *Name, uptr Flags) {
-  int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
-  int MmapProt;
-  if (Flags & MAP_NOACCESS) {
-    MmapFlags |= MAP_NORESERVE;
-    MmapProt = PROT_NONE;
-  } else {
-    MmapProt = PROT_READ | PROT_WRITE;
-  }
-#if defined(__aarch64__)
-#ifndef PROT_MTE
-#define PROT_MTE 0x20
-#endif
-  if (Flags & MAP_MEMTAG)
-    MmapProt |= PROT_MTE;
-#endif
-  if (Addr)
-    MmapFlags |= MAP_FIXED;
-  void *P =
-      mmap(reinterpret_cast<void *>(Addr), Size, MmapProt, MmapFlags, -1, 0);
-  if (P == MAP_FAILED) {
-    if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
-      reportMapError(errno == ENOMEM ? Size : 0);
-    return nullptr;
-  }
-#if SCUDO_ANDROID
-  if (Name)
-    prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
-#else
-  (void)Name;
-#endif
-
-  return P;
-}
-
-bool MemMapLinux::mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags) {
-  void *P = mmapWrapper(Addr, Size, Name, Flags);
-  if (P == nullptr)
-    return false;
-
-  MapBase = reinterpret_cast<uptr>(P);
-  MapCapacity = Size;
-  return true;
-}
-
-void MemMapLinux::unmapImpl(uptr Addr, uptr Size) {
-  // If we unmap all the pages, also mark `MapBase` to 0 to indicate invalid
-  // status.
-  if (Size == MapCapacity) {
-    MapBase = MapCapacity = 0;
-  } else {
-    // This is partial unmap and is unmapping the pages from the beginning,
-    // shift `MapBase` to the new base.
-    if (MapBase == Addr)
-      MapBase = Addr + Size;
-    MapCapacity -= Size;
-  }
-
-  if (munmap(reinterpret_cast<void *>(Addr), Size) != 0)
-    reportUnmapError(Addr, Size);
-}
-
-bool MemMapLinux::remapImpl(uptr Addr, uptr Size, const char *Name,
-                            uptr Flags) {
-  void *P = mmapWrapper(Addr, Size, Name, Flags);
-  if (reinterpret_cast<uptr>(P) != Addr)
-    reportMapError();
-  return true;
-}
-
-void MemMapLinux::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
-  int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
-  if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
-    reportProtectError(Addr, Size, Prot);
-}
-
-void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
-  void *Addr = reinterpret_cast<void *>(From);
-
-  while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
-  }
-}
-
-bool ReservedMemoryLinux::createImpl(uptr Addr, uptr Size, const char *Name,
-                                     uptr Flags) {
-  ReservedMemoryLinux::MemMapT MemMap;
-  if (!MemMap.map(Addr, Size, Name, Flags | MAP_NOACCESS))
-    return false;
-
-  MapBase = MemMap.getBase();
-  MapCapacity = MemMap.getCapacity();
-
-  return true;
-}
-
-void ReservedMemoryLinux::releaseImpl() {
-  if (munmap(reinterpret_cast<void *>(getBase()), getCapacity()) != 0)
-    reportUnmapError(getBase(), getCapacity());
-}
-
-ReservedMemoryLinux::MemMapT ReservedMemoryLinux::dispatchImpl(uptr Addr,
-                                                               uptr Size) {
-  return ReservedMemoryLinux::MemMapT(Addr, Size);
-}
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX
diff --git a/Telegram/ThirdParty/scudo/mem_map_linux.h b/Telegram/ThirdParty/scudo/mem_map_linux.h
deleted file mode 100644
index 7a89b3bff..000000000
--- a/Telegram/ThirdParty/scudo/mem_map_linux.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//===-- mem_map_linux.h -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_MEM_MAP_LINUX_H_
-#define SCUDO_MEM_MAP_LINUX_H_
-
-#include "platform.h"
-
-#if SCUDO_LINUX
-
-#include "common.h"
-#include "mem_map_base.h"
-
-namespace scudo {
-
-class MemMapLinux final : public MemMapBase<MemMapLinux> {
-public:
-  constexpr MemMapLinux() = default;
-  MemMapLinux(uptr Base, uptr Capacity)
-      : MapBase(Base), MapCapacity(Capacity) {}
-
-  // Impls for base functions.
-  bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
-  void unmapImpl(uptr Addr, uptr Size);
-  bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
-  void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
-  void releasePagesToOSImpl(uptr From, uptr Size) {
-    return releaseAndZeroPagesToOSImpl(From, Size);
-  }
-  void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
-  uptr getBaseImpl() { return MapBase; }
-  uptr getCapacityImpl() { return MapCapacity; }
-
-private:
-  uptr MapBase = 0;
-  uptr MapCapacity = 0;
-};
-
-// This will be deprecated when every allocator has been supported by each
-// platform's `MemMap` implementation.
-class ReservedMemoryLinux final
-    : public ReservedMemory<ReservedMemoryLinux, MemMapLinux> {
-public:
-  // The following two are the Impls for function in `MemMapBase`.
-  uptr getBaseImpl() { return MapBase; }
-  uptr getCapacityImpl() { return MapCapacity; }
-
-  // These threes are specific to `ReservedMemory`.
-  bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
-  void releaseImpl();
-  MemMapT dispatchImpl(uptr Addr, uptr Size);
-
-private:
-  uptr MapBase = 0;
-  uptr MapCapacity = 0;
-};
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX
-
-#endif // SCUDO_MEM_MAP_LINUX_H_
diff --git a/Telegram/ThirdParty/scudo/memtag.h b/Telegram/ThirdParty/scudo/memtag.h
deleted file mode 100644
index aaed2192a..000000000
--- a/Telegram/ThirdParty/scudo/memtag.h
+++ /dev/null
@@ -1,335 +0,0 @@
-//===-- memtag.h ------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_MEMTAG_H_
-#define SCUDO_MEMTAG_H_
-
-#include "internal_defs.h"
-
-#if SCUDO_CAN_USE_MTE
-#include <sys/auxv.h>
-#include <sys/prctl.h>
-#endif
-
-namespace scudo {
-
-#if (__clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)) ||  \
-    defined(SCUDO_FUZZ)
-
-// We assume that Top-Byte Ignore is enabled if the architecture supports memory
-// tagging. Not all operating systems enable TBI, so we only claim architectural
-// support for memory tagging if the operating system enables TBI.
-// HWASan uses the top byte for its own purpose and Scudo should not touch it.
-#if SCUDO_CAN_USE_MTE && !defined(SCUDO_DISABLE_TBI) &&                        \
-    !__has_feature(hwaddress_sanitizer)
-inline constexpr bool archSupportsMemoryTagging() { return true; }
-#else
-inline constexpr bool archSupportsMemoryTagging() { return false; }
-#endif
-
-inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
-
-inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
-
-inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
-
-#else
-
-inline constexpr bool archSupportsMemoryTagging() { return false; }
-
-inline NORETURN uptr archMemoryTagGranuleSize() {
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN uptr untagPointer(uptr Ptr) {
-  (void)Ptr;
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN uint8_t extractTag(uptr Ptr) {
-  (void)Ptr;
-  UNREACHABLE("memory tagging not supported");
-}
-
-#endif
-
-#if __clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)
-
-#if SCUDO_CAN_USE_MTE
-
-inline bool systemSupportsMemoryTagging() {
-#ifndef HWCAP2_MTE
-#define HWCAP2_MTE (1 << 18)
-#endif
-  return getauxval(AT_HWCAP2) & HWCAP2_MTE;
-}
-
-inline bool systemDetectsMemoryTagFaultsTestOnly() {
-#ifndef PR_SET_TAGGED_ADDR_CTRL
-#define PR_SET_TAGGED_ADDR_CTRL 54
-#endif
-#ifndef PR_GET_TAGGED_ADDR_CTRL
-#define PR_GET_TAGGED_ADDR_CTRL 56
-#endif
-#ifndef PR_TAGGED_ADDR_ENABLE
-#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
-#endif
-#ifndef PR_MTE_TCF_SHIFT
-#define PR_MTE_TCF_SHIFT 1
-#endif
-#ifndef PR_MTE_TAG_SHIFT
-#define PR_MTE_TAG_SHIFT 3
-#endif
-#ifndef PR_MTE_TCF_NONE
-#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
-#endif
-#ifndef PR_MTE_TCF_SYNC
-#define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
-#endif
-#ifndef PR_MTE_TCF_MASK
-#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
-#endif
-  int res = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
-  if (res == -1)
-    return false;
-  return (static_cast<unsigned long>(res) & PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
-}
-
-inline void enableSystemMemoryTaggingTestOnly() {
-  prctl(PR_SET_TAGGED_ADDR_CTRL,
-        PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | (0xfffe << PR_MTE_TAG_SHIFT),
-        0, 0, 0);
-}
-
-#else // !SCUDO_CAN_USE_MTE
-
-inline bool systemSupportsMemoryTagging() { return false; }
-
-inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN void enableSystemMemoryTaggingTestOnly() {
-  UNREACHABLE("memory tagging not supported");
-}
-
-#endif // SCUDO_CAN_USE_MTE
-
-class ScopedDisableMemoryTagChecks {
-  uptr PrevTCO;
-
-public:
-  ScopedDisableMemoryTagChecks() {
-    __asm__ __volatile__(
-        R"(
-        .arch_extension memtag
-        mrs %0, tco
-        msr tco, #1
-        )"
-        : "=r"(PrevTCO));
-  }
-
-  ~ScopedDisableMemoryTagChecks() {
-    __asm__ __volatile__(
-        R"(
-        .arch_extension memtag
-        msr tco, %0
-        )"
-        :
-        : "r"(PrevTCO));
-  }
-};
-
-inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
-  ExcludeMask |= 1; // Always exclude Tag 0.
-  uptr TaggedPtr;
-  __asm__ __volatile__(
-      R"(
-      .arch_extension memtag
-      irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
-      )"
-      : [TaggedPtr] "=r"(TaggedPtr)
-      : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
-  return TaggedPtr;
-}
-
-inline uptr addFixedTag(uptr Ptr, uptr Tag) {
-  DCHECK_LT(Tag, 16);
-  DCHECK_EQ(untagPointer(Ptr), Ptr);
-  return Ptr | (Tag << 56);
-}
-
-inline uptr storeTags(uptr Begin, uptr End) {
-  DCHECK_EQ(0, Begin % 16);
-  uptr LineSize, Next, Tmp;
-  __asm__ __volatile__(
-      R"(
-    .arch_extension memtag
-
-    // Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
-    // of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
-    // indicates that the DC instructions are unavailable.
-    DCZID .req %[Tmp]
-    mrs DCZID, dczid_el0
-    tbnz DCZID, #4, 3f
-    and DCZID, DCZID, #15
-    mov %[LineSize], #4
-    lsl %[LineSize], %[LineSize], DCZID
-    .unreq DCZID
-
-    // Our main loop doesn't handle the case where we don't need to perform any
-    // DC GZVA operations. If the size of our tagged region is less than
-    // twice the cache line size, bail out to the slow path since it's not
-    // guaranteed that we'll be able to do a DC GZVA.
-    Size .req %[Tmp]
-    sub Size, %[End], %[Cur]
-    cmp Size, %[LineSize], lsl #1
-    b.lt 3f
-    .unreq Size
-
-    LineMask .req %[Tmp]
-    sub LineMask, %[LineSize], #1
-
-    // STZG until the start of the next cache line.
-    orr %[Next], %[Cur], LineMask
-  1:
-    stzg %[Cur], [%[Cur]], #16
-    cmp %[Cur], %[Next]
-    b.lt 1b
-
-    // DC GZVA cache lines until we have no more full cache lines.
-    bic %[Next], %[End], LineMask
-    .unreq LineMask
-  2:
-    dc gzva, %[Cur]
-    add %[Cur], %[Cur], %[LineSize]
-    cmp %[Cur], %[Next]
-    b.lt 2b
-
-    // STZG until the end of the tagged region. This loop is also used to handle
-    // slow path cases.
-  3:
-    cmp %[Cur], %[End]
-    b.ge 4f
-    stzg %[Cur], [%[Cur]], #16
-    b 3b
-
-  4:
-  )"
-      : [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next),
-        [Tmp] "=&r"(Tmp)
-      : [End] "r"(End)
-      : "memory");
-  DCHECK_EQ(0, Begin % 16);
-  return Begin;
-}
-
-inline void storeTag(uptr Ptr) {
-  DCHECK_EQ(0, Ptr % 16);
-  __asm__ __volatile__(R"(
-    .arch_extension memtag
-    stg %0, [%0]
-  )"
-                       :
-                       : "r"(Ptr)
-                       : "memory");
-}
-
-inline uptr loadTag(uptr Ptr) {
-  DCHECK_EQ(0, Ptr % 16);
-  uptr TaggedPtr = Ptr;
-  __asm__ __volatile__(
-      R"(
-      .arch_extension memtag
-      ldg %0, [%0]
-      )"
-      : "+r"(TaggedPtr)
-      :
-      : "memory");
-  return TaggedPtr;
-}
-
-#else
-
-inline NORETURN bool systemSupportsMemoryTagging() {
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN void enableSystemMemoryTaggingTestOnly() {
-  UNREACHABLE("memory tagging not supported");
-}
-
-struct ScopedDisableMemoryTagChecks {
-  ScopedDisableMemoryTagChecks() {}
-};
-
-inline NORETURN uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
-  (void)Ptr;
-  (void)ExcludeMask;
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN uptr addFixedTag(uptr Ptr, uptr Tag) {
-  (void)Ptr;
-  (void)Tag;
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN uptr storeTags(uptr Begin, uptr End) {
-  (void)Begin;
-  (void)End;
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN void storeTag(uptr Ptr) {
-  (void)Ptr;
-  UNREACHABLE("memory tagging not supported");
-}
-
-inline NORETURN uptr loadTag(uptr Ptr) {
-  (void)Ptr;
-  UNREACHABLE("memory tagging not supported");
-}
-
-#endif
-
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wmissing-noreturn"
-inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
-                         uptr *TaggedBegin, uptr *TaggedEnd) {
-  *TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
-  *TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
-}
-#pragma GCC diagnostic pop
-
-inline void *untagPointer(void *Ptr) {
-  return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
-}
-
-inline void *loadTag(void *Ptr) {
-  return reinterpret_cast<void *>(loadTag(reinterpret_cast<uptr>(Ptr)));
-}
-
-inline void *addFixedTag(void *Ptr, uptr Tag) {
-  return reinterpret_cast<void *>(
-      addFixedTag(reinterpret_cast<uptr>(Ptr), Tag));
-}
-
-template <typename Config>
-inline constexpr bool allocatorSupportsMemoryTagging() {
-  return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging &&
-         (1 << SCUDO_MIN_ALIGNMENT_LOG) >= archMemoryTagGranuleSize();
-}
-
-} // namespace scudo
-
-#endif
diff --git a/Telegram/ThirdParty/scudo/mutex.h b/Telegram/ThirdParty/scudo/mutex.h
deleted file mode 100644
index 4caa94521..000000000
--- a/Telegram/ThirdParty/scudo/mutex.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- mutex.h -------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_MUTEX_H_
-#define SCUDO_MUTEX_H_
-
-#include "atomic_helpers.h"
-#include "common.h"
-#include "thread_annotations.h"
-
-#include <string.h>
-
-#if SCUDO_FUCHSIA
-#include <lib/sync/mutex.h> // for sync_mutex_t
-#endif
-
-namespace scudo {
-
-class CAPABILITY("mutex") HybridMutex {
-public:
-  bool tryLock() TRY_ACQUIRE(true);
-  NOINLINE void lock() ACQUIRE() {
-    if (LIKELY(tryLock()))
-      return;
-      // The compiler may try to fully unroll the loop, ending up in a
-      // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
-      // is large, ugly and unneeded, a compact loop is better for our purpose
-      // here. Use a pragma to tell the compiler not to unroll the loop.
-#ifdef __clang__
-#pragma nounroll
-#endif
-    for (u8 I = 0U; I < NumberOfTries; I++) {
-      delayLoop();
-      if (tryLock())
-        return;
-    }
-    lockSlow();
-  }
-  void unlock() RELEASE();
-
-  // TODO(chiahungduan): In general, we may want to assert the owner of lock as
-  // well. Given the current uses of HybridMutex, it's acceptable without
-  // asserting the owner. Re-evaluate this when we have certain scenarios which
-  // requires a more fine-grained lock granularity.
-  ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
-    if (SCUDO_DEBUG)
-      assertHeldImpl();
-  }
-
-private:
-  void delayLoop() {
-    // The value comes from the average time spent in accessing caches (which
-    // are the fastest operations) so that we are unlikely to wait too long for
-    // fast operations.
-    constexpr u32 SpinTimes = 16;
-    volatile u32 V = 0;
-    for (u32 I = 0; I < SpinTimes; ++I) {
-      u32 Tmp = V + 1;
-      V = Tmp;
-    }
-  }
-
-  void assertHeldImpl();
-
-  // TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and
-  // secondary allocator have different allocation times.
-  static constexpr u8 NumberOfTries = 32U;
-
-#if SCUDO_LINUX
-  atomic_u32 M = {};
-#elif SCUDO_FUCHSIA
-  sync_mutex_t M = {};
-#endif
-
-  void lockSlow() ACQUIRE();
-};
-
-class SCOPED_CAPABILITY ScopedLock {
-public:
-  explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
-  ~ScopedLock() RELEASE() { Mutex.unlock(); }
-
-private:
-  HybridMutex &Mutex;
-
-  ScopedLock(const ScopedLock &) = delete;
-  void operator=(const ScopedLock &) = delete;
-};
-
-} // namespace scudo
-
-#endif // SCUDO_MUTEX_H_
diff --git a/Telegram/ThirdParty/scudo/options.h b/Telegram/ThirdParty/scudo/options.h
deleted file mode 100644
index b20142a41..000000000
--- a/Telegram/ThirdParty/scudo/options.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//===-- options.h -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_OPTIONS_H_
-#define SCUDO_OPTIONS_H_
-
-#include "atomic_helpers.h"
-#include "common.h"
-#include "memtag.h"
-
-namespace scudo {
-
-enum class OptionBit {
-  MayReturnNull,
-  FillContents0of2,
-  FillContents1of2,
-  DeallocTypeMismatch,
-  DeleteSizeMismatch,
-  TrackAllocationStacks,
-  UseOddEvenTags,
-  UseMemoryTagging,
-  AddLargeAllocationSlack,
-};
-
-struct Options {
-  u32 Val;
-
-  bool get(OptionBit Opt) const { return Val & (1U << static_cast<u32>(Opt)); }
-
-  FillContentsMode getFillContentsMode() const {
-    return static_cast<FillContentsMode>(
-        (Val >> static_cast<u32>(OptionBit::FillContents0of2)) & 3);
-  }
-};
-
-template <typename Config> bool useMemoryTagging(const Options &Options) {
-  return allocatorSupportsMemoryTagging<Config>() &&
-         Options.get(OptionBit::UseMemoryTagging);
-}
-
-struct AtomicOptions {
-  atomic_u32 Val = {};
-
-  Options load() const { return Options{atomic_load_relaxed(&Val)}; }
-
-  void clear(OptionBit Opt) {
-    atomic_fetch_and(&Val, ~(1U << static_cast<u32>(Opt)),
-                     memory_order_relaxed);
-  }
-
-  void set(OptionBit Opt) {
-    atomic_fetch_or(&Val, 1U << static_cast<u32>(Opt), memory_order_relaxed);
-  }
-
-  void setFillContentsMode(FillContentsMode FillContents) {
-    u32 Opts = atomic_load_relaxed(&Val), NewOpts;
-    do {
-      NewOpts = Opts;
-      NewOpts &= ~(3U << static_cast<u32>(OptionBit::FillContents0of2));
-      NewOpts |= static_cast<u32>(FillContents)
-                 << static_cast<u32>(OptionBit::FillContents0of2);
-    } while (!atomic_compare_exchange_strong(&Val, &Opts, NewOpts,
-                                             memory_order_relaxed));
-  }
-};
-
-} // namespace scudo
-
-#endif // SCUDO_OPTIONS_H_
diff --git a/Telegram/ThirdParty/scudo/platform.h b/Telegram/ThirdParty/scudo/platform.h
deleted file mode 100644
index b71a86be7..000000000
--- a/Telegram/ThirdParty/scudo/platform.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//===-- platform.h ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_PLATFORM_H_
-#define SCUDO_PLATFORM_H_
-
-// Transitive includes of stdint.h specify some of the defines checked below.
-#include <stdint.h>
-
-#if defined(__linux__) && !defined(__TRUSTY__)
-#define SCUDO_LINUX 1
-#else
-#define SCUDO_LINUX 0
-#endif
-
-// See https://android.googlesource.com/platform/bionic/+/master/docs/defines.md
-#if defined(__BIONIC__)
-#define SCUDO_ANDROID 1
-#else
-#define SCUDO_ANDROID 0
-#endif
-
-#if defined(__Fuchsia__)
-#define SCUDO_FUCHSIA 1
-#else
-#define SCUDO_FUCHSIA 0
-#endif
-
-#if defined(__TRUSTY__)
-#define SCUDO_TRUSTY 1
-#else
-#define SCUDO_TRUSTY 0
-#endif
-
-#if defined(__riscv) && (__riscv_xlen == 64)
-#define SCUDO_RISCV64 1
-#else
-#define SCUDO_RISCV64 0
-#endif
-
-#if defined(__LP64__)
-#define SCUDO_WORDSIZE 64U
-#else
-#define SCUDO_WORDSIZE 32U
-#endif
-
-#if SCUDO_WORDSIZE == 64U
-#define FIRST_32_SECOND_64(a, b) (b)
-#else
-#define FIRST_32_SECOND_64(a, b) (a)
-#endif
-
-#ifndef SCUDO_CAN_USE_PRIMARY64
-#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
-#endif
-
-#ifndef SCUDO_CAN_USE_MTE
-#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
-#endif
-
-// Use smaller table sizes for fuzzing in order to reduce input size.
-// Trusty just has less available memory.
-#ifndef SCUDO_SMALL_STACK_DEPOT
-#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
-#define SCUDO_SMALL_STACK_DEPOT 1
-#else
-#define SCUDO_SMALL_STACK_DEPOT 0
-#endif
-#endif
-
-#ifndef SCUDO_ENABLE_HOOKS
-#define SCUDO_ENABLE_HOOKS 0
-#endif
-
-#ifndef SCUDO_MIN_ALIGNMENT_LOG
-// We force malloc-type functions to be aligned to std::max_align_t, but there
-// is no reason why the minimum alignment for all other functions can't be 8
-// bytes. Except obviously for applications making incorrect assumptions.
-// TODO(kostyak): define SCUDO_MIN_ALIGNMENT_LOG 3
-#define SCUDO_MIN_ALIGNMENT_LOG FIRST_32_SECOND_64(3, 4)
-#endif
-
-#if defined(__aarch64__)
-#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
-#else
-#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
-#endif
-
-// Older gcc have issues aligning to a constexpr, and require an integer.
-// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
-#if defined(__powerpc__) || defined(__powerpc64__)
-#define SCUDO_CACHE_LINE_SIZE 128
-#else
-#define SCUDO_CACHE_LINE_SIZE 64
-#endif
-
-#define SCUDO_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
-
-#endif // SCUDO_PLATFORM_H_
diff --git a/Telegram/ThirdParty/scudo/primary32.h b/Telegram/ThirdParty/scudo/primary32.h
deleted file mode 100644
index 4d03b282d..000000000
--- a/Telegram/ThirdParty/scudo/primary32.h
+++ /dev/null
@@ -1,1170 +0,0 @@
-//===-- primary32.h ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_PRIMARY32_H_
-#define SCUDO_PRIMARY32_H_
-
-#include "allocator_common.h"
-#include "bytemap.h"
-#include "common.h"
-#include "list.h"
-#include "local_cache.h"
-#include "options.h"
-#include "release.h"
-#include "report.h"
-#include "stats.h"
-#include "string_utils.h"
-#include "thread_annotations.h"
-
-namespace scudo {
-
-// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
-//
-// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
-// boundary, and keeps a bytemap of the mappable address space to track the size
-// class they are associated with.
-//
-// Mapped regions are split into equally sized Blocks according to the size
-// class they belong to, and the associated pointers are shuffled to prevent any
-// predictable address pattern (the predictability increases with the block
-// size).
-//
-// Regions for size class 0 are special and used to hold TransferBatches, which
-// allow to transfer arrays of pointers from the global size class freelist to
-// the thread specific freelist for said class, and back.
-//
-// Memory used by this allocator is never unmapped but can be partially
-// reclaimed if the platform allows for it.
-
-template <typename Config> class SizeClassAllocator32 {
-public:
-  typedef typename Config::Primary::CompactPtrT CompactPtrT;
-  typedef typename Config::Primary::SizeClassMap SizeClassMap;
-  static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
-  // The bytemap can only track UINT8_MAX - 1 classes.
-  static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
-  // Regions should be large enough to hold the largest Block.
-  static_assert((1UL << Config::Primary::RegionSizeLog) >=
-                    SizeClassMap::MaxSize,
-                "");
-  typedef SizeClassAllocator32<Config> ThisT;
-  typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
-  typedef TransferBatch<ThisT> TransferBatchT;
-  typedef BatchGroup<ThisT> BatchGroupT;
-
-  static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
-                "BatchGroupT uses the same class size as TransferBatchT");
-
-  static uptr getSizeByClassId(uptr ClassId) {
-    return (ClassId == SizeClassMap::BatchClassId)
-               ? sizeof(TransferBatchT)
-               : SizeClassMap::getSizeByClassId(ClassId);
-  }
-
-  static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
-
-  void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
-    if (SCUDO_FUCHSIA)
-      reportError("SizeClassAllocator32 is not supported on Fuchsia");
-
-    if (SCUDO_TRUSTY)
-      reportError("SizeClassAllocator32 is not supported on Trusty");
-
-    DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
-    PossibleRegions.init();
-    u32 Seed;
-    const u64 Time = getMonotonicTimeFast();
-    if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
-      Seed = static_cast<u32>(
-          Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
-    for (uptr I = 0; I < NumClasses; I++) {
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      Sci->RandState = getRandomU32(&Seed);
-      // Sci->MaxRegionIndex is already initialized to 0.
-      Sci->MinRegionIndex = NumRegions;
-      Sci->ReleaseInfo.LastReleaseAtNs = Time;
-    }
-    setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
-  }
-
-  void unmapTestOnly() {
-    {
-      ScopedLock L(RegionsStashMutex);
-      while (NumberOfStashedRegions > 0) {
-        unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
-              RegionSize);
-      }
-    }
-
-    uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      ScopedLock L(Sci->Mutex);
-      if (Sci->MinRegionIndex < MinRegionIndex)
-        MinRegionIndex = Sci->MinRegionIndex;
-      if (Sci->MaxRegionIndex > MaxRegionIndex)
-        MaxRegionIndex = Sci->MaxRegionIndex;
-      *Sci = {};
-    }
-
-    ScopedLock L(ByteMapMutex);
-    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
-      if (PossibleRegions[I])
-        unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
-    PossibleRegions.unmapTestOnly();
-  }
-
-  // When all blocks are freed, it has to be the same size as `AllocatedUser`.
-  void verifyAllBlocksAreReleasedTestOnly() {
-    // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
-    uptr BatchClassUsedInFreeLists = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      // We have to count BatchClassUsedInFreeLists in other regions first.
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      ScopedLock L1(Sci->Mutex);
-      uptr TotalBlocks = 0;
-      for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
-        // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
-        BatchClassUsedInFreeLists += BG.Batches.size() + 1;
-        for (const auto &It : BG.Batches)
-          TotalBlocks += It.getCount();
-      }
-
-      const uptr BlockSize = getSizeByClassId(I);
-      DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
-      DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
-    }
-
-    SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
-    ScopedLock L1(Sci->Mutex);
-    uptr TotalBlocks = 0;
-    for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
-      if (LIKELY(!BG.Batches.empty())) {
-        for (const auto &It : BG.Batches)
-          TotalBlocks += It.getCount();
-      } else {
-        // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
-        // itself.
-        ++TotalBlocks;
-      }
-    }
-
-    const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
-    DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
-              Sci->AllocatedUser / BlockSize);
-    const uptr BlocksInUse =
-        Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
-    DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
-  }
-
-  CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
-    return static_cast<CompactPtrT>(Ptr);
-  }
-
-  void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
-    return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
-  }
-
-  uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
-    const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
-    return CompactPtr & ~Mask;
-  }
-
-  uptr decompactGroupBase(uptr CompactPtrGroupBase) {
-    return CompactPtrGroupBase;
-  }
-
-  ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
-    const uptr PageSize = getPageSizeCached();
-    return BlockSize < PageSize / 16U;
-  }
-
-  ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
-    const uptr PageSize = getPageSizeCached();
-    return BlockSize > PageSize;
-  }
-
-  // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
-  // count. Now it's the same as the number of blocks stored in the
-  // `TransferBatch`.
-  u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
-                UNUSED const u16 MaxBlockCount) {
-    TransferBatchT *B = popBatch(C, ClassId);
-    if (!B)
-      return 0;
-
-    const u16 Count = B->getCount();
-    DCHECK_GT(Count, 0U);
-    B->moveToArray(ToArray);
-
-    if (ClassId != SizeClassMap::BatchClassId)
-      C->deallocate(SizeClassMap::BatchClassId, B);
-
-    return Count;
-  }
-
-  TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
-    DCHECK_LT(ClassId, NumClasses);
-    SizeClassInfo *Sci = getSizeClassInfo(ClassId);
-    ScopedLock L(Sci->Mutex);
-    TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
-    if (UNLIKELY(!B)) {
-      if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
-        return nullptr;
-      B = popBatchImpl(C, ClassId, Sci);
-      // if `populateFreeList` succeeded, we are supposed to get free blocks.
-      DCHECK_NE(B, nullptr);
-    }
-    return B;
-  }
-
-  // Push the array of free blocks to the designated batch group.
-  void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
-    DCHECK_LT(ClassId, NumClasses);
-    DCHECK_GT(Size, 0);
-
-    SizeClassInfo *Sci = getSizeClassInfo(ClassId);
-    if (ClassId == SizeClassMap::BatchClassId) {
-      ScopedLock L(Sci->Mutex);
-      pushBatchClassBlocks(Sci, Array, Size);
-      return;
-    }
-
-    // TODO(chiahungduan): Consider not doing grouping if the group size is not
-    // greater than the block size with a certain scale.
-
-    // Sort the blocks so that blocks belonging to the same group can be pushed
-    // together.
-    bool SameGroup = true;
-    for (u32 I = 1; I < Size; ++I) {
-      if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
-        SameGroup = false;
-      CompactPtrT Cur = Array[I];
-      u32 J = I;
-      while (J > 0 &&
-             compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
-        Array[J] = Array[J - 1];
-        --J;
-      }
-      Array[J] = Cur;
-    }
-
-    ScopedLock L(Sci->Mutex);
-    pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
-  }
-
-  void disable() NO_THREAD_SAFETY_ANALYSIS {
-    // The BatchClassId must be locked last since other classes can use it.
-    for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
-      if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
-        continue;
-      getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
-    }
-    getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
-    RegionsStashMutex.lock();
-    ByteMapMutex.lock();
-  }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS {
-    ByteMapMutex.unlock();
-    RegionsStashMutex.unlock();
-    getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
-    for (uptr I = 0; I < NumClasses; I++) {
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      getSizeClassInfo(I)->Mutex.unlock();
-    }
-  }
-
-  template <typename F> void iterateOverBlocks(F Callback) {
-    uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      // TODO: The call of `iterateOverBlocks` requires disabling
-      // SizeClassAllocator32. We may consider locking each region on demand
-      // only.
-      Sci->Mutex.assertHeld();
-      if (Sci->MinRegionIndex < MinRegionIndex)
-        MinRegionIndex = Sci->MinRegionIndex;
-      if (Sci->MaxRegionIndex > MaxRegionIndex)
-        MaxRegionIndex = Sci->MaxRegionIndex;
-    }
-
-    // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
-    ByteMapMutex.assertHeld();
-
-    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
-      if (PossibleRegions[I] &&
-          (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
-        const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
-        const uptr From = I * RegionSize;
-        const uptr To = From + (RegionSize / BlockSize) * BlockSize;
-        for (uptr Block = From; Block < To; Block += BlockSize)
-          Callback(Block);
-      }
-    }
-  }
-
-  void getStats(ScopedString *Str) {
-    // TODO(kostyak): get the RSS per region.
-    uptr TotalMapped = 0;
-    uptr PoppedBlocks = 0;
-    uptr PushedBlocks = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      ScopedLock L(Sci->Mutex);
-      TotalMapped += Sci->AllocatedUser;
-      PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
-      PushedBlocks += Sci->FreeListInfo.PushedBlocks;
-    }
-    Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
-                "remains %zu\n",
-                TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
-    for (uptr I = 0; I < NumClasses; I++) {
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      ScopedLock L(Sci->Mutex);
-      getStats(Str, I, Sci);
-    }
-  }
-
-  void getFragmentationInfo(ScopedString *Str) {
-    Str->append(
-        "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
-        getPageSizeCached());
-
-    for (uptr I = 1; I < NumClasses; I++) {
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      ScopedLock L(Sci->Mutex);
-      getSizeClassFragmentationInfo(Sci, I, Str);
-    }
-  }
-
-  bool setOption(Option O, sptr Value) {
-    if (O == Option::ReleaseInterval) {
-      const s32 Interval = Max(Min(static_cast<s32>(Value),
-                                   Config::Primary::MaxReleaseToOsIntervalMs),
-                               Config::Primary::MinReleaseToOsIntervalMs);
-      atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
-      return true;
-    }
-    // Not supported by the Primary, but not an error either.
-    return true;
-  }
-
-  uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
-    SizeClassInfo *Sci = getSizeClassInfo(ClassId);
-    // TODO: Once we have separate locks like primary64, we may consider using
-    // tryLock() as well.
-    ScopedLock L(Sci->Mutex);
-    return releaseToOSMaybe(Sci, ClassId, ReleaseType);
-  }
-
-  uptr releaseToOS(ReleaseToOS ReleaseType) {
-    uptr TotalReleasedBytes = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      SizeClassInfo *Sci = getSizeClassInfo(I);
-      ScopedLock L(Sci->Mutex);
-      TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
-    }
-    return TotalReleasedBytes;
-  }
-
-  const char *getRegionInfoArrayAddress() const { return nullptr; }
-  static uptr getRegionInfoArraySize() { return 0; }
-
-  static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
-                                    UNUSED uptr Ptr) {
-    return {};
-  }
-
-  AtomicOptions Options;
-
-private:
-  static const uptr NumClasses = SizeClassMap::NumClasses;
-  static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
-  static const uptr NumRegions =
-      SCUDO_MMAP_RANGE_SIZE >> Config::Primary::RegionSizeLog;
-  static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
-  typedef FlatByteMap<NumRegions> ByteMap;
-
-  struct ReleaseToOsInfo {
-    uptr BytesInFreeListAtLastCheckpoint;
-    uptr RangesReleased;
-    uptr LastReleasedBytes;
-    u64 LastReleaseAtNs;
-  };
-
-  struct BlocksInfo {
-    SinglyLinkedList<BatchGroupT> BlockList = {};
-    uptr PoppedBlocks = 0;
-    uptr PushedBlocks = 0;
-  };
-
-  struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
-    HybridMutex Mutex;
-    BlocksInfo FreeListInfo GUARDED_BY(Mutex);
-    uptr CurrentRegion GUARDED_BY(Mutex);
-    uptr CurrentRegionAllocated GUARDED_BY(Mutex);
-    u32 RandState;
-    uptr AllocatedUser GUARDED_BY(Mutex);
-    // Lowest & highest region index allocated for this size class, to avoid
-    // looping through the whole NumRegions.
-    uptr MinRegionIndex GUARDED_BY(Mutex);
-    uptr MaxRegionIndex GUARDED_BY(Mutex);
-    ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
-  };
-  static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
-
-  uptr computeRegionId(uptr Mem) {
-    const uptr Id = Mem >> Config::Primary::RegionSizeLog;
-    CHECK_LT(Id, NumRegions);
-    return Id;
-  }
-
-  uptr allocateRegionSlow() {
-    uptr MapSize = 2 * RegionSize;
-    const uptr MapBase = reinterpret_cast<uptr>(
-        map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
-    if (!MapBase)
-      return 0;
-    const uptr MapEnd = MapBase + MapSize;
-    uptr Region = MapBase;
-    if (isAligned(Region, RegionSize)) {
-      ScopedLock L(RegionsStashMutex);
-      if (NumberOfStashedRegions < MaxStashedRegions)
-        RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
-      else
-        MapSize = RegionSize;
-    } else {
-      Region = roundUp(MapBase, RegionSize);
-      unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
-      MapSize = RegionSize;
-    }
-    const uptr End = Region + MapSize;
-    if (End != MapEnd)
-      unmap(reinterpret_cast<void *>(End), MapEnd - End);
-
-    DCHECK_EQ(Region % RegionSize, 0U);
-    static_assert(Config::Primary::RegionSizeLog == GroupSizeLog,
-                  "Memory group should be the same size as Region");
-
-    return Region;
-  }
-
-  uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
-    DCHECK_LT(ClassId, NumClasses);
-    uptr Region = 0;
-    {
-      ScopedLock L(RegionsStashMutex);
-      if (NumberOfStashedRegions > 0)
-        Region = RegionsStash[--NumberOfStashedRegions];
-    }
-    if (!Region)
-      Region = allocateRegionSlow();
-    if (LIKELY(Region)) {
-      // Sci->Mutex is held by the caller, updating the Min/Max is safe.
-      const uptr RegionIndex = computeRegionId(Region);
-      if (RegionIndex < Sci->MinRegionIndex)
-        Sci->MinRegionIndex = RegionIndex;
-      if (RegionIndex > Sci->MaxRegionIndex)
-        Sci->MaxRegionIndex = RegionIndex;
-      ScopedLock L(ByteMapMutex);
-      PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
-    }
-    return Region;
-  }
-
-  SizeClassInfo *getSizeClassInfo(uptr ClassId) {
-    DCHECK_LT(ClassId, NumClasses);
-    return &SizeClassInfoArray[ClassId];
-  }
-
-  void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
-      REQUIRES(Sci->Mutex) {
-    DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
-
-    // Free blocks are recorded by TransferBatch in freelist for all
-    // size-classes. In addition, TransferBatch is allocated from BatchClassId.
-    // In order not to use additional block to record the free blocks in
-    // BatchClassId, they are self-contained. I.e., A TransferBatch records the
-    // block address of itself. See the figure below:
-    //
-    // TransferBatch at 0xABCD
-    // +----------------------------+
-    // | Free blocks' addr          |
-    // | +------+------+------+     |
-    // | |0xABCD|...   |...   |     |
-    // | +------+------+------+     |
-    // +----------------------------+
-    //
-    // When we allocate all the free blocks in the TransferBatch, the block used
-    // by TransferBatch is also free for use. We don't need to recycle the
-    // TransferBatch. Note that the correctness is maintained by the invariant,
-    //
-    //   The unit of each popBatch() request is entire TransferBatch. Return
-    //   part of the blocks in a TransferBatch is invalid.
-    //
-    // This ensures that TransferBatch won't leak the address itself while it's
-    // still holding other valid data.
-    //
-    // Besides, BatchGroup is also allocated from BatchClassId and has its
-    // address recorded in the TransferBatch too. To maintain the correctness,
-    //
-    //   The address of BatchGroup is always recorded in the last TransferBatch
-    //   in the freelist (also imply that the freelist should only be
-    //   updated with push_front). Once the last TransferBatch is popped,
-    //   the block used by BatchGroup is also free for use.
-    //
-    // With this approach, the blocks used by BatchGroup and TransferBatch are
-    // reusable and don't need additional space for them.
-
-    Sci->FreeListInfo.PushedBlocks += Size;
-    BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
-
-    if (BG == nullptr) {
-      // Construct `BatchGroup` on the last element.
-      BG = reinterpret_cast<BatchGroupT *>(
-          decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
-      --Size;
-      BG->Batches.clear();
-      // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
-      // memory group here.
-      BG->CompactPtrGroupBase = 0;
-      // `BG` is also the block of BatchClassId. Note that this is different
-      // from `CreateGroup` in `pushBlocksImpl`
-      BG->PushedBlocks = 1;
-      BG->BytesInBGAtLastCheckpoint = 0;
-      BG->MaxCachedPerBatch =
-          CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
-
-      Sci->FreeListInfo.BlockList.push_front(BG);
-    }
-
-    if (UNLIKELY(Size == 0))
-      return;
-
-    // This happens under 2 cases.
-    //   1. just allocated a new `BatchGroup`.
-    //   2. Only 1 block is pushed when the freelist is empty.
-    if (BG->Batches.empty()) {
-      // Construct the `TransferBatch` on the last element.
-      TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
-          decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
-      TB->clear();
-      // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
-      // recorded in the TransferBatch.
-      TB->add(Array[Size - 1]);
-      TB->add(
-          compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
-      --Size;
-      DCHECK_EQ(BG->PushedBlocks, 1U);
-      // `TB` is also the block of BatchClassId.
-      BG->PushedBlocks += 1;
-      BG->Batches.push_front(TB);
-    }
-
-    TransferBatchT *CurBatch = BG->Batches.front();
-    DCHECK_NE(CurBatch, nullptr);
-
-    for (u32 I = 0; I < Size;) {
-      u16 UnusedSlots =
-          static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
-      if (UnusedSlots == 0) {
-        CurBatch = reinterpret_cast<TransferBatchT *>(
-            decompactPtr(SizeClassMap::BatchClassId, Array[I]));
-        CurBatch->clear();
-        // Self-contained
-        CurBatch->add(Array[I]);
-        ++I;
-        // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
-        // BatchClassId.
-        BG->Batches.push_front(CurBatch);
-        UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
-      }
-      // `UnusedSlots` is u16 so the result will be also fit in u16.
-      const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
-      CurBatch->appendFromArray(&Array[I], AppendSize);
-      I += AppendSize;
-    }
-
-    BG->PushedBlocks += Size;
-  }
-  // Push the blocks to their batch group. The layout will be like,
-  //
-  // FreeListInfo.BlockList - > BG -> BG -> BG
-  //                            |     |     |
-  //                            v     v     v
-  //                            TB    TB    TB
-  //                            |
-  //                            v
-  //                            TB
-  //
-  // Each BlockGroup(BG) will associate with unique group id and the free blocks
-  // are managed by a list of TransferBatch(TB). To reduce the time of inserting
-  // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
-  // that we can get better performance of maintaining sorted property.
-  // Use `SameGroup=true` to indicate that all blocks in the array are from the
-  // same group then we will skip checking the group id of each block.
-  //
-  // The region mutex needs to be held while calling this method.
-  void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
-                      CompactPtrT *Array, u32 Size, bool SameGroup = false)
-      REQUIRES(Sci->Mutex) {
-    DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
-    DCHECK_GT(Size, 0U);
-
-    auto CreateGroup = [&](uptr CompactPtrGroupBase) {
-      BatchGroupT *BG =
-          reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
-      BG->Batches.clear();
-      TransferBatchT *TB =
-          reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
-      TB->clear();
-
-      BG->CompactPtrGroupBase = CompactPtrGroupBase;
-      BG->Batches.push_front(TB);
-      BG->PushedBlocks = 0;
-      BG->BytesInBGAtLastCheckpoint = 0;
-      BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
-
-      return BG;
-    };
-
-    auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
-      SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
-      TransferBatchT *CurBatch = Batches.front();
-      DCHECK_NE(CurBatch, nullptr);
-
-      for (u32 I = 0; I < Size;) {
-        DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
-        u16 UnusedSlots =
-            static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
-        if (UnusedSlots == 0) {
-          CurBatch =
-              reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
-          CurBatch->clear();
-          Batches.push_front(CurBatch);
-          UnusedSlots = BG->MaxCachedPerBatch;
-        }
-        // `UnusedSlots` is u16 so the result will be also fit in u16.
-        u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
-        CurBatch->appendFromArray(&Array[I], AppendSize);
-        I += AppendSize;
-      }
-
-      BG->PushedBlocks += Size;
-    };
-
-    Sci->FreeListInfo.PushedBlocks += Size;
-    BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
-
-    // In the following, `Cur` always points to the BatchGroup for blocks that
-    // will be pushed next. `Prev` is the element right before `Cur`.
-    BatchGroupT *Prev = nullptr;
-
-    while (Cur != nullptr &&
-           compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
-      Prev = Cur;
-      Cur = Cur->Next;
-    }
-
-    if (Cur == nullptr ||
-        compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
-      Cur = CreateGroup(compactPtrGroupBase(Array[0]));
-      if (Prev == nullptr)
-        Sci->FreeListInfo.BlockList.push_front(Cur);
-      else
-        Sci->FreeListInfo.BlockList.insert(Prev, Cur);
-    }
-
-    // All the blocks are from the same group, just push without checking group
-    // id.
-    if (SameGroup) {
-      for (u32 I = 0; I < Size; ++I)
-        DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
-
-      InsertBlocks(Cur, Array, Size);
-      return;
-    }
-
-    // The blocks are sorted by group id. Determine the segment of group and
-    // push them to their group together.
-    u32 Count = 1;
-    for (u32 I = 1; I < Size; ++I) {
-      if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
-        DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
-        InsertBlocks(Cur, Array + I - Count, Count);
-
-        while (Cur != nullptr &&
-               compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
-          Prev = Cur;
-          Cur = Cur->Next;
-        }
-
-        if (Cur == nullptr ||
-            compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
-          Cur = CreateGroup(compactPtrGroupBase(Array[I]));
-          DCHECK_NE(Prev, nullptr);
-          Sci->FreeListInfo.BlockList.insert(Prev, Cur);
-        }
-
-        Count = 1;
-      } else {
-        ++Count;
-      }
-    }
-
-    InsertBlocks(Cur, Array + Size - Count, Count);
-  }
-
-  // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
-  // group id will be considered first.
-  //
-  // The region mutex needs to be held while calling this method.
-  TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
-      REQUIRES(Sci->Mutex) {
-    if (Sci->FreeListInfo.BlockList.empty())
-      return nullptr;
-
-    SinglyLinkedList<TransferBatchT> &Batches =
-        Sci->FreeListInfo.BlockList.front()->Batches;
-
-    if (Batches.empty()) {
-      DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
-      BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
-      Sci->FreeListInfo.BlockList.pop_front();
-
-      // Block used by `BatchGroup` is from BatchClassId. Turn the block into
-      // `TransferBatch` with single block.
-      TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
-      TB->clear();
-      TB->add(
-          compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
-      Sci->FreeListInfo.PoppedBlocks += 1;
-      return TB;
-    }
-
-    TransferBatchT *B = Batches.front();
-    Batches.pop_front();
-    DCHECK_NE(B, nullptr);
-    DCHECK_GT(B->getCount(), 0U);
-
-    if (Batches.empty()) {
-      BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
-      Sci->FreeListInfo.BlockList.pop_front();
-
-      // We don't keep BatchGroup with zero blocks to avoid empty-checking while
-      // allocating. Note that block used by constructing BatchGroup is recorded
-      // as free blocks in the last element of BatchGroup::Batches. Which means,
-      // once we pop the last TransferBatch, the block is implicitly
-      // deallocated.
-      if (ClassId != SizeClassMap::BatchClassId)
-        C->deallocate(SizeClassMap::BatchClassId, BG);
-    }
-
-    Sci->FreeListInfo.PoppedBlocks += B->getCount();
-    return B;
-  }
-
-  NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
-      REQUIRES(Sci->Mutex) {
-    uptr Region;
-    uptr Offset;
-    // If the size-class currently has a region associated to it, use it. The
-    // newly created blocks will be located after the currently allocated memory
-    // for that region (up to RegionSize). Otherwise, create a new region, where
-    // the new blocks will be carved from the beginning.
-    if (Sci->CurrentRegion) {
-      Region = Sci->CurrentRegion;
-      DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
-      Offset = Sci->CurrentRegionAllocated;
-    } else {
-      DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
-      Region = allocateRegion(Sci, ClassId);
-      if (UNLIKELY(!Region))
-        return false;
-      C->getStats().add(StatMapped, RegionSize);
-      Sci->CurrentRegion = Region;
-      Offset = 0;
-    }
-
-    const uptr Size = getSizeByClassId(ClassId);
-    const u16 MaxCount = CacheT::getMaxCached(Size);
-    DCHECK_GT(MaxCount, 0U);
-    // The maximum number of blocks we should carve in the region is dictated
-    // by the maximum number of batches we want to fill, and the amount of
-    // memory left in the current region (we use the lowest of the two). This
-    // will not be 0 as we ensure that a region can at least hold one block (via
-    // static_assert and at the end of this function).
-    const u32 NumberOfBlocks =
-        Min(MaxNumBatches * MaxCount,
-            static_cast<u32>((RegionSize - Offset) / Size));
-    DCHECK_GT(NumberOfBlocks, 0U);
-
-    constexpr u32 ShuffleArraySize =
-        MaxNumBatches * TransferBatchT::MaxNumCached;
-    // Fill the transfer batches and put them in the size-class freelist. We
-    // need to randomize the blocks for security purposes, so we first fill a
-    // local array that we then shuffle before populating the batches.
-    CompactPtrT ShuffleArray[ShuffleArraySize];
-    DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
-
-    uptr P = Region + Offset;
-    for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
-      ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
-
-    if (ClassId != SizeClassMap::BatchClassId) {
-      u32 N = 1;
-      uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
-      for (u32 I = 1; I < NumberOfBlocks; I++) {
-        if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
-          shuffle(ShuffleArray + I - N, N, &Sci->RandState);
-          pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
-                         /*SameGroup=*/true);
-          N = 1;
-          CurGroup = compactPtrGroupBase(ShuffleArray[I]);
-        } else {
-          ++N;
-        }
-      }
-
-      shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
-      pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
-                     /*SameGroup=*/true);
-    } else {
-      pushBatchClassBlocks(Sci, ShuffleArray, NumberOfBlocks);
-    }
-
-    // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
-    // the requests from `PushBlocks` and `PopBatch` which are external
-    // interfaces. `populateFreeList` is the internal interface so we should set
-    // the values back to avoid incorrectly setting the stats.
-    Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
-
-    const uptr AllocatedUser = Size * NumberOfBlocks;
-    C->getStats().add(StatFree, AllocatedUser);
-    DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
-    // If there is not enough room in the region currently associated to fit
-    // more blocks, we deassociate the region by resetting CurrentRegion and
-    // CurrentRegionAllocated. Otherwise, update the allocated amount.
-    if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
-      Sci->CurrentRegion = 0;
-      Sci->CurrentRegionAllocated = 0;
-    } else {
-      Sci->CurrentRegionAllocated += AllocatedUser;
-    }
-    Sci->AllocatedUser += AllocatedUser;
-
-    return true;
-  }
-
-  void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
-      REQUIRES(Sci->Mutex) {
-    if (Sci->AllocatedUser == 0)
-      return;
-    const uptr BlockSize = getSizeByClassId(ClassId);
-    const uptr InUse =
-        Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
-    const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
-    uptr PushedBytesDelta = 0;
-    if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
-      PushedBytesDelta =
-          BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
-    }
-    const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
-    Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
-                "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
-                "latest pushed bytes: %6zuK\n",
-                ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
-                Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
-                InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
-                Sci->ReleaseInfo.LastReleasedBytes >> 10,
-                PushedBytesDelta >> 10);
-  }
-
-  void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
-                                     ScopedString *Str) REQUIRES(Sci->Mutex) {
-    const uptr BlockSize = getSizeByClassId(ClassId);
-    const uptr First = Sci->MinRegionIndex;
-    const uptr Last = Sci->MaxRegionIndex;
-    const uptr Base = First * RegionSize;
-    const uptr NumberOfRegions = Last - First + 1U;
-    auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
-      ScopedLock L(ByteMapMutex);
-      return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
-    };
-
-    FragmentationRecorder Recorder;
-    if (!Sci->FreeListInfo.BlockList.empty()) {
-      PageReleaseContext Context =
-          markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
-                         ReleaseToOS::ForceAll);
-      releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-    }
-
-    const uptr PageSize = getPageSizeCached();
-    const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
-    const uptr InUseBlocks =
-        Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
-    uptr AllocatedPagesCount = 0;
-    if (TotalBlocks != 0U) {
-      for (uptr I = 0; I < NumberOfRegions; ++I) {
-        if (SkipRegion(I))
-          continue;
-        AllocatedPagesCount += RegionSize / PageSize;
-      }
-
-      DCHECK_NE(AllocatedPagesCount, 0U);
-    }
-
-    DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
-    const uptr InUsePages =
-        AllocatedPagesCount - Recorder.getReleasedPagesCount();
-    const uptr InUseBytes = InUsePages * PageSize;
-
-    uptr Integral;
-    uptr Fractional;
-    computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
-                      &Fractional);
-    Str->append("  %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
-                "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
-                ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
-                AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
-  }
-
-  NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
-                                 ReleaseToOS ReleaseType = ReleaseToOS::Normal)
-      REQUIRES(Sci->Mutex) {
-    const uptr BlockSize = getSizeByClassId(ClassId);
-
-    DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
-    const uptr BytesInFreeList =
-        Sci->AllocatedUser -
-        (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
-            BlockSize;
-
-    if (UNLIKELY(BytesInFreeList == 0))
-      return 0;
-
-    // ====================================================================== //
-    // 1. Check if we have enough free blocks and if it's worth doing a page
-    // release.
-    // ====================================================================== //
-    if (ReleaseType != ReleaseToOS::ForceAll &&
-        !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
-                                 ReleaseType)) {
-      return 0;
-    }
-
-    const uptr First = Sci->MinRegionIndex;
-    const uptr Last = Sci->MaxRegionIndex;
-    DCHECK_NE(Last, 0U);
-    DCHECK_LE(First, Last);
-    uptr TotalReleasedBytes = 0;
-    const uptr Base = First * RegionSize;
-    const uptr NumberOfRegions = Last - First + 1U;
-
-    // ==================================================================== //
-    // 2. Mark the free blocks and we can tell which pages are in-use by
-    //    querying `PageReleaseContext`.
-    // ==================================================================== //
-    PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
-                                                NumberOfRegions, ReleaseType);
-    if (!Context.hasBlockMarked())
-      return 0;
-
-    // ==================================================================== //
-    // 3. Release the unused physical pages back to the OS.
-    // ==================================================================== //
-    ReleaseRecorder Recorder(Base);
-    auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
-      ScopedLock L(ByteMapMutex);
-      return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
-    };
-    releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-
-    if (Recorder.getReleasedRangesCount() > 0) {
-      Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
-      Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
-      Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
-      TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
-    }
-    Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
-
-    return TotalReleasedBytes;
-  }
-
-  bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
-                               uptr BytesInFreeList, ReleaseToOS ReleaseType)
-      REQUIRES(Sci->Mutex) {
-    DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
-    const uptr PageSize = getPageSizeCached();
-
-    if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
-      Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
-
-    // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
-    // so that we won't underestimate the releasable pages. For example, the
-    // following is the region usage,
-    //
-    //  BytesInFreeListAtLastCheckpoint   AllocatedUser
-    //                v                         v
-    //  |--------------------------------------->
-    //         ^                   ^
-    //  BytesInFreeList     ReleaseThreshold
-    //
-    // In general, if we have collected enough bytes and the amount of free
-    // bytes meets the ReleaseThreshold, we will try to do page release. If we
-    // don't update `BytesInFreeListAtLastCheckpoint` when the current
-    // `BytesInFreeList` is smaller, we may take longer time to wait for enough
-    // freed blocks because we miss the bytes between
-    // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
-    const uptr PushedBytesDelta =
-        BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
-    if (PushedBytesDelta < PageSize)
-      return false;
-
-    // Releasing smaller blocks is expensive, so we want to make sure that a
-    // significant amount of bytes are free, and that there has been a good
-    // amount of batches pushed to the freelist before attempting to release.
-    if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
-      if (PushedBytesDelta < Sci->AllocatedUser / 16U)
-        return false;
-
-    if (ReleaseType == ReleaseToOS::Normal) {
-      const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
-      if (IntervalMs < 0)
-        return false;
-
-      // The constant 8 here is selected from profiling some apps and the number
-      // of unreleased pages in the large size classes is around 16 pages or
-      // more. Choose half of it as a heuristic and which also avoids page
-      // release every time for every pushBlocks() attempt by large blocks.
-      const bool ByPassReleaseInterval =
-          isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
-      if (!ByPassReleaseInterval) {
-        if (Sci->ReleaseInfo.LastReleaseAtNs +
-                static_cast<u64>(IntervalMs) * 1000000 >
-            getMonotonicTimeFast()) {
-          // Memory was returned recently.
-          return false;
-        }
-      }
-    } // if (ReleaseType == ReleaseToOS::Normal)
-
-    return true;
-  }
-
-  PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
-                                    const uptr BlockSize, const uptr Base,
-                                    const uptr NumberOfRegions,
-                                    ReleaseToOS ReleaseType)
-      REQUIRES(Sci->Mutex) {
-    const uptr PageSize = getPageSizeCached();
-    const uptr GroupSize = (1UL << GroupSizeLog);
-    const uptr CurGroupBase =
-        compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
-
-    PageReleaseContext Context(BlockSize, NumberOfRegions,
-                               /*ReleaseSize=*/RegionSize);
-
-    auto DecompactPtr = [](CompactPtrT CompactPtr) {
-      return reinterpret_cast<uptr>(CompactPtr);
-    };
-    for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
-      const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
-      // The `GroupSize` may not be divided by `BlockSize`, which means there is
-      // an unused space at the end of Region. Exclude that space to avoid
-      // unused page map entry.
-      uptr AllocatedGroupSize = GroupBase == CurGroupBase
-                                    ? Sci->CurrentRegionAllocated
-                                    : roundDownSlow(GroupSize, BlockSize);
-      if (AllocatedGroupSize == 0)
-        continue;
-
-      // TransferBatches are pushed in front of BG.Batches. The first one may
-      // not have all caches used.
-      const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
-                             BG.Batches.front()->getCount();
-      const uptr BytesInBG = NumBlocks * BlockSize;
-
-      if (ReleaseType != ReleaseToOS::ForceAll) {
-        if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
-          BG.BytesInBGAtLastCheckpoint = BytesInBG;
-          continue;
-        }
-
-        const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
-        if (PushedBytesDelta < PageSize)
-          continue;
-
-        // Given the randomness property, we try to release the pages only if
-        // the bytes used by free blocks exceed certain proportion of allocated
-        // spaces.
-        if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
-                                           (100U - 1U - BlockSize / 16U)) {
-          continue;
-        }
-      }
-
-      // TODO: Consider updating this after page release if `ReleaseRecorder`
-      // can tell the released bytes in each group.
-      BG.BytesInBGAtLastCheckpoint = BytesInBG;
-
-      const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
-      const uptr RegionIndex = (GroupBase - Base) / RegionSize;
-
-      if (NumBlocks == MaxContainedBlocks) {
-        for (const auto &It : BG.Batches)
-          for (u16 I = 0; I < It.getCount(); ++I)
-            DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
-
-        const uptr To = GroupBase + AllocatedGroupSize;
-        Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
-                                      AllocatedGroupSize);
-      } else {
-        DCHECK_LT(NumBlocks, MaxContainedBlocks);
-
-        // Note that we don't always visit blocks in each BatchGroup so that we
-        // may miss the chance of releasing certain pages that cross
-        // BatchGroups.
-        Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
-                                       RegionIndex, AllocatedGroupSize,
-                                       /*MayContainLastBlockInRegion=*/true);
-      }
-
-      // We may not be able to do the page release In a rare case that we may
-      // fail on PageMap allocation.
-      if (UNLIKELY(!Context.hasBlockMarked()))
-        break;
-    }
-
-    return Context;
-  }
-
-  SizeClassInfo SizeClassInfoArray[NumClasses] = {};
-
-  HybridMutex ByteMapMutex;
-  // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
-  ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
-  atomic_s32 ReleaseToOsIntervalMs = {};
-  // Unless several threads request regions simultaneously from different size
-  // classes, the stash rarely contains more than 1 entry.
-  static constexpr uptr MaxStashedRegions = 4;
-  HybridMutex RegionsStashMutex;
-  uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
-  uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
-};
-
-} // namespace scudo
-
-#endif // SCUDO_PRIMARY32_H_
diff --git a/Telegram/ThirdParty/scudo/primary64.h b/Telegram/ThirdParty/scudo/primary64.h
deleted file mode 100644
index 9a642d236..000000000
--- a/Telegram/ThirdParty/scudo/primary64.h
+++ /dev/null
@@ -1,1688 +0,0 @@
-//===-- primary64.h ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_PRIMARY64_H_
-#define SCUDO_PRIMARY64_H_
-
-#include "allocator_common.h"
-#include "bytemap.h"
-#include "common.h"
-#include "list.h"
-#include "local_cache.h"
-#include "mem_map.h"
-#include "memtag.h"
-#include "options.h"
-#include "release.h"
-#include "stats.h"
-#include "string_utils.h"
-#include "thread_annotations.h"
-
-#include "condition_variable.h"
-
-namespace scudo {
-
-// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
-//
-// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
-// Regions, specific to each size class. Note that the base of that mapping is
-// random (based to the platform specific map() capabilities). If
-// PrimaryEnableRandomOffset is set, each Region actually starts at a random
-// offset from its base.
-//
-// Regions are mapped incrementally on demand to fulfill allocation requests,
-// those mappings being split into equally sized Blocks based on the size class
-// they belong to. The Blocks created are shuffled to prevent predictable
-// address patterns (the predictability increases with the size of the Blocks).
-//
-// The 1st Region (for size class 0) holds the TransferBatches. This is a
-// structure used to transfer arrays of available pointers from the class size
-// freelist to the thread specific freelist, and back.
-//
-// The memory used by this allocator is never unmapped, but can be partially
-// released if the platform allows for it.
-
-template <typename Config> class SizeClassAllocator64 {
-public:
-  typedef typename Config::Primary::CompactPtrT CompactPtrT;
-  typedef typename Config::Primary::SizeClassMap SizeClassMap;
-  typedef typename ConditionVariableState<
-      typename Config::Primary>::ConditionVariableT ConditionVariableT;
-  static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
-  static const uptr RegionSizeLog = Config::Primary::RegionSizeLog;
-  static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
-  static_assert(RegionSizeLog >= GroupSizeLog,
-                "Group size shouldn't be greater than the region size");
-  static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
-  typedef SizeClassAllocator64<Config> ThisT;
-  typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
-  typedef TransferBatch<ThisT> TransferBatchT;
-  typedef BatchGroup<ThisT> BatchGroupT;
-
-  static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
-                "BatchGroupT uses the same class size as TransferBatchT");
-
-  static uptr getSizeByClassId(uptr ClassId) {
-    return (ClassId == SizeClassMap::BatchClassId)
-               ? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
-               : SizeClassMap::getSizeByClassId(ClassId);
-  }
-
-  static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
-
-  static bool conditionVariableEnabled() {
-    return ConditionVariableState<typename Config::Primary>::enabled();
-  }
-
-  void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
-    DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
-
-    const uptr PageSize = getPageSizeCached();
-    const uptr GroupSize = (1UL << GroupSizeLog);
-    const uptr PagesInGroup = GroupSize / PageSize;
-    const uptr MinSizeClass = getSizeByClassId(1);
-    // When trying to release pages back to memory, visiting smaller size
-    // classes is expensive. Therefore, we only try to release smaller size
-    // classes when the amount of free blocks goes over a certain threshold (See
-    // the comment in releaseToOSMaybe() for more details). For example, for
-    // size class 32, we only do the release when the size of free blocks is
-    // greater than 97% of pages in a group. However, this may introduce another
-    // issue that if the number of free blocks is bouncing between 97% ~ 100%.
-    // Which means we may try many page releases but only release very few of
-    // them (less than 3% in a group). Even though we have
-    // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these
-    // calls but it will be better to have another guard to mitigate this issue.
-    //
-    // Here we add another constraint on the minimum size requirement. The
-    // constraint is determined by the size of in-use blocks in the minimal size
-    // class. Take size class 32 as an example,
-    //
-    //   +-     one memory group      -+
-    //   +----------------------+------+
-    //   |  97% of free blocks  |      |
-    //   +----------------------+------+
-    //                           \    /
-    //                      3% in-use blocks
-    //
-    //   * The release size threshold is 97%.
-    //
-    // The 3% size in a group is about 7 pages. For two consecutive
-    // releaseToOSMaybe(), we require the difference between `PushedBlocks`
-    // should be greater than 7 pages. This mitigates the page releasing
-    // thrashing which is caused by memory usage bouncing around the threshold.
-    // The smallest size class takes longest time to do the page release so we
-    // use its size of in-use blocks as a heuristic.
-    SmallerBlockReleasePageDelta =
-        PagesInGroup * (1 + MinSizeClass / 16U) / 100;
-
-    // Reserve the space required for the Primary.
-    CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
-                                "scudo:primary_reserve"));
-    PrimaryBase = ReservedMemory.getBase();
-    DCHECK_NE(PrimaryBase, 0U);
-
-    u32 Seed;
-    const u64 Time = getMonotonicTimeFast();
-    if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
-      Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
-
-    for (uptr I = 0; I < NumClasses; I++) {
-      RegionInfo *Region = getRegionInfo(I);
-
-      // The actual start of a region is offset by a random number of pages
-      // when PrimaryEnableRandomOffset is set.
-      Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
-                          (Config::Primary::EnableRandomOffset
-                               ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
-                               : 0);
-      Region->RandState = getRandomU32(&Seed);
-      // Releasing small blocks is expensive, set a higher threshold to avoid
-      // frequent page releases.
-      if (isSmallBlock(getSizeByClassId(I)))
-        Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
-      else
-        Region->TryReleaseThreshold = PageSize;
-      Region->ReleaseInfo.LastReleaseAtNs = Time;
-
-      Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
-          PrimaryBase + (I << RegionSizeLog), RegionSize);
-      CHECK(Region->MemMapInfo.MemMap.isAllocated());
-    }
-    shuffle(RegionInfoArray, NumClasses, &Seed);
-
-    // The binding should be done after region shuffling so that it won't bind
-    // the FLLock from the wrong region.
-    for (uptr I = 0; I < NumClasses; I++)
-      getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
-
-    setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
-  }
-
-  void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
-    for (uptr I = 0; I < NumClasses; I++) {
-      RegionInfo *Region = getRegionInfo(I);
-      *Region = {};
-    }
-    if (PrimaryBase)
-      ReservedMemory.release();
-    PrimaryBase = 0U;
-  }
-
-  // When all blocks are freed, it has to be the same size as `AllocatedUser`.
-  void verifyAllBlocksAreReleasedTestOnly() {
-    // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
-    uptr BatchClassUsedInFreeLists = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      // We have to count BatchClassUsedInFreeLists in other regions first.
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      RegionInfo *Region = getRegionInfo(I);
-      ScopedLock ML(Region->MMLock);
-      ScopedLock FL(Region->FLLock);
-      const uptr BlockSize = getSizeByClassId(I);
-      uptr TotalBlocks = 0;
-      for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
-        // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
-        BatchClassUsedInFreeLists += BG.Batches.size() + 1;
-        for (const auto &It : BG.Batches)
-          TotalBlocks += It.getCount();
-      }
-
-      DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
-      DCHECK_EQ(Region->FreeListInfo.PushedBlocks,
-                Region->FreeListInfo.PoppedBlocks);
-    }
-
-    RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId);
-    ScopedLock ML(Region->MMLock);
-    ScopedLock FL(Region->FLLock);
-    const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
-    uptr TotalBlocks = 0;
-    for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
-      if (LIKELY(!BG.Batches.empty())) {
-        for (const auto &It : BG.Batches)
-          TotalBlocks += It.getCount();
-      } else {
-        // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
-        // itself.
-        ++TotalBlocks;
-      }
-    }
-    DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
-              Region->MemMapInfo.AllocatedUser / BlockSize);
-    DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
-              Region->FreeListInfo.PushedBlocks);
-    const uptr BlocksInUse =
-        Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
-    DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
-  }
-
-  // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
-  // count. Now it's the same as the number of blocks stored in the
-  // `TransferBatch`.
-  u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
-                UNUSED const u16 MaxBlockCount) {
-    TransferBatchT *B = popBatch(C, ClassId);
-    if (!B)
-      return 0;
-
-    const u16 Count = B->getCount();
-    DCHECK_GT(Count, 0U);
-    B->moveToArray(ToArray);
-
-    if (ClassId != SizeClassMap::BatchClassId)
-      C->deallocate(SizeClassMap::BatchClassId, B);
-
-    return Count;
-  }
-
-  TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
-    DCHECK_LT(ClassId, NumClasses);
-    RegionInfo *Region = getRegionInfo(ClassId);
-
-    {
-      ScopedLock L(Region->FLLock);
-      TransferBatchT *B = popBatchImpl(C, ClassId, Region);
-      if (LIKELY(B))
-        return B;
-    }
-
-    bool ReportRegionExhausted = false;
-    TransferBatchT *B = nullptr;
-
-    if (conditionVariableEnabled()) {
-      B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
-    } else {
-      while (true) {
-        // When two threads compete for `Region->MMLock`, we only want one of
-        // them to call populateFreeListAndPopBatch(). To avoid both of them
-        // doing that, always check the freelist before mapping new pages.
-        ScopedLock ML(Region->MMLock);
-        {
-          ScopedLock FL(Region->FLLock);
-          if ((B = popBatchImpl(C, ClassId, Region)))
-            break;
-        }
-
-        const bool RegionIsExhausted = Region->Exhausted;
-        if (!RegionIsExhausted)
-          B = populateFreeListAndPopBatch(C, ClassId, Region);
-        ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
-        break;
-      }
-    }
-
-    if (UNLIKELY(ReportRegionExhausted)) {
-      Printf("Can't populate more pages for size class %zu.\n",
-             getSizeByClassId(ClassId));
-
-      // Theoretically, BatchClass shouldn't be used up. Abort immediately  when
-      // it happens.
-      if (ClassId == SizeClassMap::BatchClassId)
-        reportOutOfBatchClass();
-    }
-
-    return B;
-  }
-
-  // Push the array of free blocks to the designated batch group.
-  void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
-    DCHECK_LT(ClassId, NumClasses);
-    DCHECK_GT(Size, 0);
-
-    RegionInfo *Region = getRegionInfo(ClassId);
-    if (ClassId == SizeClassMap::BatchClassId) {
-      ScopedLock L(Region->FLLock);
-      pushBatchClassBlocks(Region, Array, Size);
-      if (conditionVariableEnabled())
-        Region->FLLockCV.notifyAll(Region->FLLock);
-      return;
-    }
-
-    // TODO(chiahungduan): Consider not doing grouping if the group size is not
-    // greater than the block size with a certain scale.
-
-    bool SameGroup = true;
-    if (GroupSizeLog < RegionSizeLog) {
-      // Sort the blocks so that blocks belonging to the same group can be
-      // pushed together.
-      for (u32 I = 1; I < Size; ++I) {
-        if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
-          SameGroup = false;
-        CompactPtrT Cur = Array[I];
-        u32 J = I;
-        while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
-          Array[J] = Array[J - 1];
-          --J;
-        }
-        Array[J] = Cur;
-      }
-    }
-
-    {
-      ScopedLock L(Region->FLLock);
-      pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
-      if (conditionVariableEnabled())
-        Region->FLLockCV.notifyAll(Region->FLLock);
-    }
-  }
-
-  void disable() NO_THREAD_SAFETY_ANALYSIS {
-    // The BatchClassId must be locked last since other classes can use it.
-    for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
-      if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
-        continue;
-      getRegionInfo(static_cast<uptr>(I))->MMLock.lock();
-      getRegionInfo(static_cast<uptr>(I))->FLLock.lock();
-    }
-    getRegionInfo(SizeClassMap::BatchClassId)->MMLock.lock();
-    getRegionInfo(SizeClassMap::BatchClassId)->FLLock.lock();
-  }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS {
-    getRegionInfo(SizeClassMap::BatchClassId)->FLLock.unlock();
-    getRegionInfo(SizeClassMap::BatchClassId)->MMLock.unlock();
-    for (uptr I = 0; I < NumClasses; I++) {
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      getRegionInfo(I)->FLLock.unlock();
-      getRegionInfo(I)->MMLock.unlock();
-    }
-  }
-
-  template <typename F> void iterateOverBlocks(F Callback) {
-    for (uptr I = 0; I < NumClasses; I++) {
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      RegionInfo *Region = getRegionInfo(I);
-      // TODO: The call of `iterateOverBlocks` requires disabling
-      // SizeClassAllocator64. We may consider locking each region on demand
-      // only.
-      Region->FLLock.assertHeld();
-      Region->MMLock.assertHeld();
-      const uptr BlockSize = getSizeByClassId(I);
-      const uptr From = Region->RegionBeg;
-      const uptr To = From + Region->MemMapInfo.AllocatedUser;
-      for (uptr Block = From; Block < To; Block += BlockSize)
-        Callback(Block);
-    }
-  }
-
-  void getStats(ScopedString *Str) {
-    // TODO(kostyak): get the RSS per region.
-    uptr TotalMapped = 0;
-    uptr PoppedBlocks = 0;
-    uptr PushedBlocks = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      RegionInfo *Region = getRegionInfo(I);
-      {
-        ScopedLock L(Region->MMLock);
-        TotalMapped += Region->MemMapInfo.MappedUser;
-      }
-      {
-        ScopedLock L(Region->FLLock);
-        PoppedBlocks += Region->FreeListInfo.PoppedBlocks;
-        PushedBlocks += Region->FreeListInfo.PushedBlocks;
-      }
-    }
-    Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
-                "allocations; remains %zu\n",
-                TotalMapped >> 20, 0U, PoppedBlocks,
-                PoppedBlocks - PushedBlocks);
-
-    for (uptr I = 0; I < NumClasses; I++) {
-      RegionInfo *Region = getRegionInfo(I);
-      ScopedLock L1(Region->MMLock);
-      ScopedLock L2(Region->FLLock);
-      getStats(Str, I, Region);
-    }
-  }
-
-  void getFragmentationInfo(ScopedString *Str) {
-    Str->append(
-        "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n",
-        getPageSizeCached());
-
-    for (uptr I = 1; I < NumClasses; I++) {
-      RegionInfo *Region = getRegionInfo(I);
-      ScopedLock L(Region->MMLock);
-      getRegionFragmentationInfo(Region, I, Str);
-    }
-  }
-
-  bool setOption(Option O, sptr Value) {
-    if (O == Option::ReleaseInterval) {
-      const s32 Interval = Max(Min(static_cast<s32>(Value),
-                                   Config::Primary::MaxReleaseToOsIntervalMs),
-                               Config::Primary::MinReleaseToOsIntervalMs);
-      atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
-      return true;
-    }
-    // Not supported by the Primary, but not an error either.
-    return true;
-  }
-
-  uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
-    RegionInfo *Region = getRegionInfo(ClassId);
-    // Note that the tryLock() may fail spuriously, given that it should rarely
-    // happen and page releasing is fine to skip, we don't take certain
-    // approaches to ensure one page release is done.
-    if (Region->MMLock.tryLock()) {
-      uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType);
-      Region->MMLock.unlock();
-      return BytesReleased;
-    }
-    return 0;
-  }
-
-  uptr releaseToOS(ReleaseToOS ReleaseType) {
-    uptr TotalReleasedBytes = 0;
-    for (uptr I = 0; I < NumClasses; I++) {
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      RegionInfo *Region = getRegionInfo(I);
-      ScopedLock L(Region->MMLock);
-      TotalReleasedBytes += releaseToOSMaybe(Region, I, ReleaseType);
-    }
-    return TotalReleasedBytes;
-  }
-
-  const char *getRegionInfoArrayAddress() const {
-    return reinterpret_cast<const char *>(RegionInfoArray);
-  }
-
-  static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
-
-  uptr getCompactPtrBaseByClassId(uptr ClassId) {
-    return getRegionInfo(ClassId)->RegionBeg;
-  }
-
-  CompactPtrT compactPtr(uptr ClassId, uptr Ptr) {
-    DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
-    return compactPtrInternal(getCompactPtrBaseByClassId(ClassId), Ptr);
-  }
-
-  void *decompactPtr(uptr ClassId, CompactPtrT CompactPtr) {
-    DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
-    return reinterpret_cast<void *>(
-        decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
-  }
-
-  static BlockInfo findNearestBlock(const char *RegionInfoData,
-                                    uptr Ptr) NO_THREAD_SAFETY_ANALYSIS {
-    const RegionInfo *RegionInfoArray =
-        reinterpret_cast<const RegionInfo *>(RegionInfoData);
-
-    uptr ClassId;
-    uptr MinDistance = -1UL;
-    for (uptr I = 0; I != NumClasses; ++I) {
-      if (I == SizeClassMap::BatchClassId)
-        continue;
-      uptr Begin = RegionInfoArray[I].RegionBeg;
-      // TODO(chiahungduan): In fact, We need to lock the RegionInfo::MMLock.
-      // However, the RegionInfoData is passed with const qualifier and lock the
-      // mutex requires modifying RegionInfoData, which means we need to remove
-      // the const qualifier. This may lead to another undefined behavior (The
-      // first one is accessing `AllocatedUser` without locking. It's better to
-      // pass `RegionInfoData` as `void *` then we can lock the mutex properly.
-      uptr End = Begin + RegionInfoArray[I].MemMapInfo.AllocatedUser;
-      if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
-        continue;
-      uptr RegionDistance;
-      if (Begin <= Ptr) {
-        if (Ptr < End)
-          RegionDistance = 0;
-        else
-          RegionDistance = Ptr - End;
-      } else {
-        RegionDistance = Begin - Ptr;
-      }
-
-      if (RegionDistance < MinDistance) {
-        MinDistance = RegionDistance;
-        ClassId = I;
-      }
-    }
-
-    BlockInfo B = {};
-    if (MinDistance <= 8192) {
-      B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
-      B.RegionEnd =
-          B.RegionBegin + RegionInfoArray[ClassId].MemMapInfo.AllocatedUser;
-      B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
-      B.BlockBegin =
-          B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
-                               sptr(B.BlockSize));
-      while (B.BlockBegin < B.RegionBegin)
-        B.BlockBegin += B.BlockSize;
-      while (B.RegionEnd < B.BlockBegin + B.BlockSize)
-        B.BlockBegin -= B.BlockSize;
-    }
-    return B;
-  }
-
-  AtomicOptions Options;
-
-private:
-  static const uptr RegionSize = 1UL << RegionSizeLog;
-  static const uptr NumClasses = SizeClassMap::NumClasses;
-  static const uptr PrimarySize = RegionSize * NumClasses;
-
-  static const uptr MapSizeIncrement = Config::Primary::MapSizeIncrement;
-  // Fill at most this number of batches from the newly map'd memory.
-  static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
-
-  struct ReleaseToOsInfo {
-    uptr BytesInFreeListAtLastCheckpoint;
-    uptr RangesReleased;
-    uptr LastReleasedBytes;
-    u64 LastReleaseAtNs;
-  };
-
-  struct BlocksInfo {
-    SinglyLinkedList<BatchGroupT> BlockList = {};
-    uptr PoppedBlocks = 0;
-    uptr PushedBlocks = 0;
-  };
-
-  struct PagesInfo {
-    MemMapT MemMap = {};
-    // Bytes mapped for user memory.
-    uptr MappedUser = 0;
-    // Bytes allocated for user memory.
-    uptr AllocatedUser = 0;
-  };
-
-  struct UnpaddedRegionInfo {
-    // Mutex for operations on freelist
-    HybridMutex FLLock;
-    ConditionVariableT FLLockCV GUARDED_BY(FLLock);
-    // Mutex for memmap operations
-    HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
-    // `RegionBeg` is initialized before thread creation and won't be changed.
-    uptr RegionBeg = 0;
-    u32 RandState GUARDED_BY(MMLock) = 0;
-    BlocksInfo FreeListInfo GUARDED_BY(FLLock);
-    PagesInfo MemMapInfo GUARDED_BY(MMLock);
-    // The minimum size of pushed blocks to trigger page release.
-    uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
-    ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
-    bool Exhausted GUARDED_BY(MMLock) = false;
-    bool isPopulatingFreeList GUARDED_BY(FLLock) = false;
-  };
-  struct RegionInfo : UnpaddedRegionInfo {
-    char Padding[SCUDO_CACHE_LINE_SIZE -
-                 (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {};
-  };
-  static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
-
-  RegionInfo *getRegionInfo(uptr ClassId) {
-    DCHECK_LT(ClassId, NumClasses);
-    return &RegionInfoArray[ClassId];
-  }
-
-  uptr getRegionBaseByClassId(uptr ClassId) {
-    return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
-                     RegionSize) +
-           PrimaryBase;
-  }
-
-  static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
-    return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale);
-  }
-
-  static uptr decompactPtrInternal(uptr Base, CompactPtrT CompactPtr) {
-    return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale);
-  }
-
-  static uptr compactPtrGroup(CompactPtrT CompactPtr) {
-    const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1;
-    return static_cast<uptr>(CompactPtr) & ~Mask;
-  }
-  static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) {
-    DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U);
-    return Base + (CompactPtrGroupBase << CompactPtrScale);
-  }
-
-  ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
-    const uptr PageSize = getPageSizeCached();
-    return BlockSize < PageSize / 16U;
-  }
-
-  ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
-    const uptr PageSize = getPageSizeCached();
-    return BlockSize > PageSize;
-  }
-
-  void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
-      REQUIRES(Region->FLLock) {
-    DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
-
-    // Free blocks are recorded by TransferBatch in freelist for all
-    // size-classes. In addition, TransferBatch is allocated from BatchClassId.
-    // In order not to use additional block to record the free blocks in
-    // BatchClassId, they are self-contained. I.e., A TransferBatch records the
-    // block address of itself. See the figure below:
-    //
-    // TransferBatch at 0xABCD
-    // +----------------------------+
-    // | Free blocks' addr          |
-    // | +------+------+------+     |
-    // | |0xABCD|...   |...   |     |
-    // | +------+------+------+     |
-    // +----------------------------+
-    //
-    // When we allocate all the free blocks in the TransferBatch, the block used
-    // by TransferBatch is also free for use. We don't need to recycle the
-    // TransferBatch. Note that the correctness is maintained by the invariant,
-    //
-    //   The unit of each popBatch() request is entire TransferBatch. Return
-    //   part of the blocks in a TransferBatch is invalid.
-    //
-    // This ensures that TransferBatch won't leak the address itself while it's
-    // still holding other valid data.
-    //
-    // Besides, BatchGroup is also allocated from BatchClassId and has its
-    // address recorded in the TransferBatch too. To maintain the correctness,
-    //
-    //   The address of BatchGroup is always recorded in the last TransferBatch
-    //   in the freelist (also imply that the freelist should only be
-    //   updated with push_front). Once the last TransferBatch is popped,
-    //   the block used by BatchGroup is also free for use.
-    //
-    // With this approach, the blocks used by BatchGroup and TransferBatch are
-    // reusable and don't need additional space for them.
-
-    Region->FreeListInfo.PushedBlocks += Size;
-    BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
-
-    if (BG == nullptr) {
-      // Construct `BatchGroup` on the last element.
-      BG = reinterpret_cast<BatchGroupT *>(
-          decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
-      --Size;
-      BG->Batches.clear();
-      // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
-      // memory group here.
-      BG->CompactPtrGroupBase = 0;
-      // `BG` is also the block of BatchClassId. Note that this is different
-      // from `CreateGroup` in `pushBlocksImpl`
-      BG->PushedBlocks = 1;
-      BG->BytesInBGAtLastCheckpoint = 0;
-      BG->MaxCachedPerBatch =
-          CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
-
-      Region->FreeListInfo.BlockList.push_front(BG);
-    }
-
-    if (UNLIKELY(Size == 0))
-      return;
-
-    // This happens under 2 cases.
-    //   1. just allocated a new `BatchGroup`.
-    //   2. Only 1 block is pushed when the freelist is empty.
-    if (BG->Batches.empty()) {
-      // Construct the `TransferBatch` on the last element.
-      TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
-          decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
-      TB->clear();
-      // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
-      // recorded in the TransferBatch.
-      TB->add(Array[Size - 1]);
-      TB->add(
-          compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
-      --Size;
-      DCHECK_EQ(BG->PushedBlocks, 1U);
-      // `TB` is also the block of BatchClassId.
-      BG->PushedBlocks += 1;
-      BG->Batches.push_front(TB);
-    }
-
-    TransferBatchT *CurBatch = BG->Batches.front();
-    DCHECK_NE(CurBatch, nullptr);
-
-    for (u32 I = 0; I < Size;) {
-      u16 UnusedSlots =
-          static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
-      if (UnusedSlots == 0) {
-        CurBatch = reinterpret_cast<TransferBatchT *>(
-            decompactPtr(SizeClassMap::BatchClassId, Array[I]));
-        CurBatch->clear();
-        // Self-contained
-        CurBatch->add(Array[I]);
-        ++I;
-        // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
-        // BatchClassId.
-        BG->Batches.push_front(CurBatch);
-        UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
-      }
-      // `UnusedSlots` is u16 so the result will be also fit in u16.
-      const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
-      CurBatch->appendFromArray(&Array[I], AppendSize);
-      I += AppendSize;
-    }
-
-    BG->PushedBlocks += Size;
-  }
-
-  // Push the blocks to their batch group. The layout will be like,
-  //
-  // FreeListInfo.BlockList - > BG -> BG -> BG
-  //                            |     |     |
-  //                            v     v     v
-  //                            TB    TB    TB
-  //                            |
-  //                            v
-  //                            TB
-  //
-  // Each BlockGroup(BG) will associate with unique group id and the free blocks
-  // are managed by a list of TransferBatch(TB). To reduce the time of inserting
-  // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
-  // that we can get better performance of maintaining sorted property.
-  // Use `SameGroup=true` to indicate that all blocks in the array are from the
-  // same group then we will skip checking the group id of each block.
-  void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
-                      CompactPtrT *Array, u32 Size, bool SameGroup = false)
-      REQUIRES(Region->FLLock) {
-    DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
-    DCHECK_GT(Size, 0U);
-
-    auto CreateGroup = [&](uptr CompactPtrGroupBase) {
-      BatchGroupT *BG =
-          reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
-      BG->Batches.clear();
-      TransferBatchT *TB =
-          reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
-      TB->clear();
-
-      BG->CompactPtrGroupBase = CompactPtrGroupBase;
-      BG->Batches.push_front(TB);
-      BG->PushedBlocks = 0;
-      BG->BytesInBGAtLastCheckpoint = 0;
-      BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
-
-      return BG;
-    };
-
-    auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
-      SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
-      TransferBatchT *CurBatch = Batches.front();
-      DCHECK_NE(CurBatch, nullptr);
-
-      for (u32 I = 0; I < Size;) {
-        DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
-        u16 UnusedSlots =
-            static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
-        if (UnusedSlots == 0) {
-          CurBatch =
-              reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
-          CurBatch->clear();
-          Batches.push_front(CurBatch);
-          UnusedSlots = BG->MaxCachedPerBatch;
-        }
-        // `UnusedSlots` is u16 so the result will be also fit in u16.
-        u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
-        CurBatch->appendFromArray(&Array[I], AppendSize);
-        I += AppendSize;
-      }
-
-      BG->PushedBlocks += Size;
-    };
-
-    Region->FreeListInfo.PushedBlocks += Size;
-    BatchGroupT *Cur = Region->FreeListInfo.BlockList.front();
-
-    // In the following, `Cur` always points to the BatchGroup for blocks that
-    // will be pushed next. `Prev` is the element right before `Cur`.
-    BatchGroupT *Prev = nullptr;
-
-    while (Cur != nullptr &&
-           compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
-      Prev = Cur;
-      Cur = Cur->Next;
-    }
-
-    if (Cur == nullptr ||
-        compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
-      Cur = CreateGroup(compactPtrGroup(Array[0]));
-      if (Prev == nullptr)
-        Region->FreeListInfo.BlockList.push_front(Cur);
-      else
-        Region->FreeListInfo.BlockList.insert(Prev, Cur);
-    }
-
-    // All the blocks are from the same group, just push without checking group
-    // id.
-    if (SameGroup) {
-      for (u32 I = 0; I < Size; ++I)
-        DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase);
-
-      InsertBlocks(Cur, Array, Size);
-      return;
-    }
-
-    // The blocks are sorted by group id. Determine the segment of group and
-    // push them to their group together.
-    u32 Count = 1;
-    for (u32 I = 1; I < Size; ++I) {
-      if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
-        DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase);
-        InsertBlocks(Cur, Array + I - Count, Count);
-
-        while (Cur != nullptr &&
-               compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) {
-          Prev = Cur;
-          Cur = Cur->Next;
-        }
-
-        if (Cur == nullptr ||
-            compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
-          Cur = CreateGroup(compactPtrGroup(Array[I]));
-          DCHECK_NE(Prev, nullptr);
-          Region->FreeListInfo.BlockList.insert(Prev, Cur);
-        }
-
-        Count = 1;
-      } else {
-        ++Count;
-      }
-    }
-
-    InsertBlocks(Cur, Array + Size - Count, Count);
-  }
-
-  TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
-                                 bool &ReportRegionExhausted) {
-    TransferBatchT *B = nullptr;
-
-    while (true) {
-      // We only expect one thread doing the freelist refillment and other
-      // threads will be waiting for either the completion of the
-      // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
-      // threads.
-      bool PopulateFreeList = false;
-      {
-        ScopedLock FL(Region->FLLock);
-        if (!Region->isPopulatingFreeList) {
-          Region->isPopulatingFreeList = true;
-          PopulateFreeList = true;
-        }
-      }
-
-      if (PopulateFreeList) {
-        ScopedLock ML(Region->MMLock);
-
-        const bool RegionIsExhausted = Region->Exhausted;
-        if (!RegionIsExhausted)
-          B = populateFreeListAndPopBatch(C, ClassId, Region);
-        ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
-
-        {
-          // Before reacquiring the `FLLock`, the freelist may be used up again
-          // and some threads are waiting for the freelist refillment by the
-          // current thread. It's important to set
-          // `Region->isPopulatingFreeList` to false so the threads about to
-          // sleep will notice the status change.
-          ScopedLock FL(Region->FLLock);
-          Region->isPopulatingFreeList = false;
-          Region->FLLockCV.notifyAll(Region->FLLock);
-        }
-
-        break;
-      }
-
-      // At here, there are two preconditions to be met before waiting,
-      //   1. The freelist is empty.
-      //   2. Region->isPopulatingFreeList == true, i.e, someone is still doing
-      //   `populateFreeListAndPopBatch()`.
-      //
-      // Note that it has the chance that freelist is empty but
-      // Region->isPopulatingFreeList == false because all the new populated
-      // blocks were used up right after the refillment. Therefore, we have to
-      // check if someone is still populating the freelist.
-      ScopedLock FL(Region->FLLock);
-      if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
-        break;
-
-      if (!Region->isPopulatingFreeList)
-        continue;
-
-      // Now the freelist is empty and someone's doing the refillment. We will
-      // wait until anyone refills the freelist or someone finishes doing
-      // `populateFreeListAndPopBatch()`. The refillment can be done by
-      // `populateFreeListAndPopBatch()`, `pushBlocks()`,
-      // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
-      Region->FLLockCV.wait(Region->FLLock);
-
-      if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
-        break;
-    }
-
-    return B;
-  }
-
-  // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
-  // group id will be considered first.
-  //
-  // The region mutex needs to be held while calling this method.
-  TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
-      REQUIRES(Region->FLLock) {
-    if (Region->FreeListInfo.BlockList.empty())
-      return nullptr;
-
-    SinglyLinkedList<TransferBatchT> &Batches =
-        Region->FreeListInfo.BlockList.front()->Batches;
-
-    if (Batches.empty()) {
-      DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
-      BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
-      Region->FreeListInfo.BlockList.pop_front();
-
-      // Block used by `BatchGroup` is from BatchClassId. Turn the block into
-      // `TransferBatch` with single block.
-      TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
-      TB->clear();
-      TB->add(
-          compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
-      Region->FreeListInfo.PoppedBlocks += 1;
-      return TB;
-    }
-
-    TransferBatchT *B = Batches.front();
-    Batches.pop_front();
-    DCHECK_NE(B, nullptr);
-    DCHECK_GT(B->getCount(), 0U);
-
-    if (Batches.empty()) {
-      BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
-      Region->FreeListInfo.BlockList.pop_front();
-
-      // We don't keep BatchGroup with zero blocks to avoid empty-checking while
-      // allocating. Note that block used by constructing BatchGroup is recorded
-      // as free blocks in the last element of BatchGroup::Batches. Which means,
-      // once we pop the last TransferBatch, the block is implicitly
-      // deallocated.
-      if (ClassId != SizeClassMap::BatchClassId)
-        C->deallocate(SizeClassMap::BatchClassId, BG);
-    }
-
-    Region->FreeListInfo.PoppedBlocks += B->getCount();
-
-    return B;
-  }
-
-  // Refill the freelist and return one batch.
-  NOINLINE TransferBatchT *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
-                                                       RegionInfo *Region)
-      REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
-    const uptr Size = getSizeByClassId(ClassId);
-    const u16 MaxCount = CacheT::getMaxCached(Size);
-
-    const uptr RegionBeg = Region->RegionBeg;
-    const uptr MappedUser = Region->MemMapInfo.MappedUser;
-    const uptr TotalUserBytes =
-        Region->MemMapInfo.AllocatedUser + MaxCount * Size;
-    // Map more space for blocks, if necessary.
-    if (TotalUserBytes > MappedUser) {
-      // Do the mmap for the user memory.
-      const uptr MapSize =
-          roundUp(TotalUserBytes - MappedUser, MapSizeIncrement);
-      const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
-      if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
-        Region->Exhausted = true;
-        return nullptr;
-      }
-
-      if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
-              RegionBeg + MappedUser, MapSize, "scudo:primary",
-              MAP_ALLOWNOMEM | MAP_RESIZABLE |
-                  (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG
-                                                            : 0)))) {
-        return nullptr;
-      }
-      Region->MemMapInfo.MappedUser += MapSize;
-      C->getStats().add(StatMapped, MapSize);
-    }
-
-    const u32 NumberOfBlocks =
-        Min(MaxNumBatches * MaxCount,
-            static_cast<u32>((Region->MemMapInfo.MappedUser -
-                              Region->MemMapInfo.AllocatedUser) /
-                             Size));
-    DCHECK_GT(NumberOfBlocks, 0);
-
-    constexpr u32 ShuffleArraySize =
-        MaxNumBatches * TransferBatchT::MaxNumCached;
-    CompactPtrT ShuffleArray[ShuffleArraySize];
-    DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
-
-    const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
-    uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser;
-    for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
-      ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P);
-
-    ScopedLock L(Region->FLLock);
-
-    if (ClassId != SizeClassMap::BatchClassId) {
-      u32 N = 1;
-      uptr CurGroup = compactPtrGroup(ShuffleArray[0]);
-      for (u32 I = 1; I < NumberOfBlocks; I++) {
-        if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) {
-          shuffle(ShuffleArray + I - N, N, &Region->RandState);
-          pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N,
-                         /*SameGroup=*/true);
-          N = 1;
-          CurGroup = compactPtrGroup(ShuffleArray[I]);
-        } else {
-          ++N;
-        }
-      }
-
-      shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState);
-      pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N,
-                     /*SameGroup=*/true);
-    } else {
-      pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
-    }
-
-    TransferBatchT *B = popBatchImpl(C, ClassId, Region);
-    DCHECK_NE(B, nullptr);
-
-    // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
-    // the requests from `PushBlocks` and `PopBatch` which are external
-    // interfaces. `populateFreeListAndPopBatch` is the internal interface so we
-    // should set the values back to avoid incorrectly setting the stats.
-    Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;
-
-    const uptr AllocatedUser = Size * NumberOfBlocks;
-    C->getStats().add(StatFree, AllocatedUser);
-    Region->MemMapInfo.AllocatedUser += AllocatedUser;
-
-    return B;
-  }
-
-  void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region)
-      REQUIRES(Region->MMLock, Region->FLLock) {
-    if (Region->MemMapInfo.MappedUser == 0)
-      return;
-    const uptr BlockSize = getSizeByClassId(ClassId);
-    const uptr InUseBlocks =
-        Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
-    const uptr BytesInFreeList =
-        Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize;
-    uptr RegionPushedBytesDelta = 0;
-    if (BytesInFreeList >=
-        Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
-      RegionPushedBytesDelta =
-          BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
-    }
-    const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize;
-    Str->append(
-        "%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
-        "inuse: %6zu total: %6zu releases: %6zu last "
-        "released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n",
-        Region->Exhausted ? "E" : " ", ClassId, getSizeByClassId(ClassId),
-        Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks,
-        Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks,
-        Region->ReleaseInfo.RangesReleased,
-        Region->ReleaseInfo.LastReleasedBytes >> 10,
-        RegionPushedBytesDelta >> 10, Region->RegionBeg,
-        getRegionBaseByClassId(ClassId));
-  }
-
-  void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId,
-                                  ScopedString *Str) REQUIRES(Region->MMLock) {
-    const uptr BlockSize = getSizeByClassId(ClassId);
-    const uptr AllocatedUserEnd =
-        Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
-
-    SinglyLinkedList<BatchGroupT> GroupsToRelease;
-    {
-      ScopedLock L(Region->FLLock);
-      GroupsToRelease = Region->FreeListInfo.BlockList;
-      Region->FreeListInfo.BlockList.clear();
-    }
-
-    FragmentationRecorder Recorder;
-    if (!GroupsToRelease.empty()) {
-      PageReleaseContext Context =
-          markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
-                         getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
-      auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
-      releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-
-      mergeGroupsToReleaseBack(Region, GroupsToRelease);
-    }
-
-    ScopedLock L(Region->FLLock);
-    const uptr PageSize = getPageSizeCached();
-    const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize;
-    const uptr InUseBlocks =
-        Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
-    const uptr AllocatedPagesCount =
-        roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize;
-    DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
-    const uptr InUsePages =
-        AllocatedPagesCount - Recorder.getReleasedPagesCount();
-    const uptr InUseBytes = InUsePages * PageSize;
-
-    uptr Integral;
-    uptr Fractional;
-    computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
-                      &Fractional);
-    Str->append("  %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
-                "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
-                ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
-                AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
-  }
-
-  NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
-                                 ReleaseToOS ReleaseType = ReleaseToOS::Normal)
-      REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
-    const uptr BlockSize = getSizeByClassId(ClassId);
-    uptr BytesInFreeList;
-    const uptr AllocatedUserEnd =
-        Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
-    SinglyLinkedList<BatchGroupT> GroupsToRelease;
-
-    {
-      ScopedLock L(Region->FLLock);
-
-      BytesInFreeList = Region->MemMapInfo.AllocatedUser -
-                        (Region->FreeListInfo.PoppedBlocks -
-                         Region->FreeListInfo.PushedBlocks) *
-                            BlockSize;
-      if (UNLIKELY(BytesInFreeList == 0))
-        return false;
-
-      // ==================================================================== //
-      // 1. Check if we have enough free blocks and if it's worth doing a page
-      //    release.
-      // ==================================================================== //
-      if (ReleaseType != ReleaseToOS::ForceAll &&
-          !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
-                                   ReleaseType)) {
-        return 0;
-      }
-
-      // ==================================================================== //
-      // 2. Determine which groups can release the pages. Use a heuristic to
-      //    gather groups that are candidates for doing a release.
-      // ==================================================================== //
-      if (ReleaseType == ReleaseToOS::ForceAll) {
-        GroupsToRelease = Region->FreeListInfo.BlockList;
-        Region->FreeListInfo.BlockList.clear();
-      } else {
-        GroupsToRelease =
-            collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd,
-                                   getCompactPtrBaseByClassId(ClassId));
-      }
-      if (GroupsToRelease.empty())
-        return 0;
-    }
-
-    // Note that we have extracted the `GroupsToRelease` from region freelist.
-    // It's safe to let pushBlocks()/popBatches() access the remaining region
-    // freelist. In the steps 3 and 4, we will temporarily release the FLLock
-    // and lock it again before step 5.
-
-    // ==================================================================== //
-    // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`.
-    //    Then we can tell which pages are in-use by querying
-    //    `PageReleaseContext`.
-    // ==================================================================== //
-    PageReleaseContext Context =
-        markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
-                       getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
-    if (UNLIKELY(!Context.hasBlockMarked())) {
-      mergeGroupsToReleaseBack(Region, GroupsToRelease);
-      return 0;
-    }
-
-    // ==================================================================== //
-    // 4. Release the unused physical pages back to the OS.
-    // ==================================================================== //
-    RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
-                                            Region->RegionBeg,
-                                            Context.getReleaseOffset());
-    auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
-    releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-    if (Recorder.getReleasedRangesCount() > 0) {
-      Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
-      Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
-      Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
-    }
-    Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
-
-    // ====================================================================== //
-    // 5. Merge the `GroupsToRelease` back to the freelist.
-    // ====================================================================== //
-    mergeGroupsToReleaseBack(Region, GroupsToRelease);
-
-    return Recorder.getReleasedBytes();
-  }
-
-  bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
-                               uptr BytesInFreeList, ReleaseToOS ReleaseType)
-      REQUIRES(Region->MMLock, Region->FLLock) {
-    DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
-              Region->FreeListInfo.PushedBlocks);
-    const uptr PageSize = getPageSizeCached();
-
-    // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
-    // so that we won't underestimate the releasable pages. For example, the
-    // following is the region usage,
-    //
-    //  BytesInFreeListAtLastCheckpoint   AllocatedUser
-    //                v                         v
-    //  |--------------------------------------->
-    //         ^                   ^
-    //  BytesInFreeList     ReleaseThreshold
-    //
-    // In general, if we have collected enough bytes and the amount of free
-    // bytes meets the ReleaseThreshold, we will try to do page release. If we
-    // don't update `BytesInFreeListAtLastCheckpoint` when the current
-    // `BytesInFreeList` is smaller, we may take longer time to wait for enough
-    // freed blocks because we miss the bytes between
-    // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
-    if (BytesInFreeList <=
-        Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
-      Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
-    }
-
-    const uptr RegionPushedBytesDelta =
-        BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
-    if (RegionPushedBytesDelta < PageSize)
-      return false;
-
-    // Releasing smaller blocks is expensive, so we want to make sure that a
-    // significant amount of bytes are free, and that there has been a good
-    // amount of batches pushed to the freelist before attempting to release.
-    if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
-      if (RegionPushedBytesDelta < Region->TryReleaseThreshold)
-        return false;
-
-    if (ReleaseType == ReleaseToOS::Normal) {
-      const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
-      if (IntervalMs < 0)
-        return false;
-
-      // The constant 8 here is selected from profiling some apps and the number
-      // of unreleased pages in the large size classes is around 16 pages or
-      // more. Choose half of it as a heuristic and which also avoids page
-      // release every time for every pushBlocks() attempt by large blocks.
-      const bool ByPassReleaseInterval =
-          isLargeBlock(BlockSize) && RegionPushedBytesDelta > 8 * PageSize;
-      if (!ByPassReleaseInterval) {
-        if (Region->ReleaseInfo.LastReleaseAtNs +
-                static_cast<u64>(IntervalMs) * 1000000 >
-            getMonotonicTimeFast()) {
-          // Memory was returned recently.
-          return false;
-        }
-      }
-    } // if (ReleaseType == ReleaseToOS::Normal)
-
-    return true;
-  }
-
-  SinglyLinkedList<BatchGroupT>
-  collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize,
-                         const uptr AllocatedUserEnd, const uptr CompactPtrBase)
-      REQUIRES(Region->MMLock, Region->FLLock) {
-    const uptr GroupSize = (1UL << GroupSizeLog);
-    const uptr PageSize = getPageSizeCached();
-    SinglyLinkedList<BatchGroupT> GroupsToRelease;
-
-    // We are examining each group and will take the minimum distance to the
-    // release threshold as the next Region::TryReleaseThreshold(). Note that if
-    // the size of free blocks has reached the release threshold, the distance
-    // to the next release will be PageSize * SmallerBlockReleasePageDelta. See
-    // the comment on `SmallerBlockReleasePageDelta` for more details.
-    uptr MinDistToThreshold = GroupSize;
-
-    for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
-                     *Prev = nullptr;
-         BG != nullptr;) {
-      // Group boundary is always GroupSize-aligned from CompactPtr base. The
-      // layout of memory groups is like,
-      //
-      //     (CompactPtrBase)
-      // #1 CompactPtrGroupBase   #2 CompactPtrGroupBase            ...
-      //           |                       |                       |
-      //           v                       v                       v
-      //           +-----------------------+-----------------------+
-      //            \                     / \                     /
-      //             ---   GroupSize   ---   ---   GroupSize   ---
-      //
-      // After decompacting the CompactPtrGroupBase, we expect the alignment
-      // property is held as well.
-      const uptr BatchGroupBase =
-          decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase);
-      DCHECK_LE(Region->RegionBeg, BatchGroupBase);
-      DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
-      DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U);
-      // TransferBatches are pushed in front of BG.Batches. The first one may
-      // not have all caches used.
-      const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
-                             BG->Batches.front()->getCount();
-      const uptr BytesInBG = NumBlocks * BlockSize;
-
-      if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) {
-        BG->BytesInBGAtLastCheckpoint = BytesInBG;
-        Prev = BG;
-        BG = BG->Next;
-        continue;
-      }
-
-      const uptr PushedBytesDelta = BG->BytesInBGAtLastCheckpoint - BytesInBG;
-
-      // Given the randomness property, we try to release the pages only if the
-      // bytes used by free blocks exceed certain proportion of group size. Note
-      // that this heuristic only applies when all the spaces in a BatchGroup
-      // are allocated.
-      if (isSmallBlock(BlockSize)) {
-        const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
-        const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
-                                            ? GroupSize
-                                            : AllocatedUserEnd - BatchGroupBase;
-        const uptr ReleaseThreshold =
-            (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U;
-        const bool HighDensity = BytesInBG >= ReleaseThreshold;
-        const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize);
-        // If all blocks in the group are released, we will do range marking
-        // which is fast. Otherwise, we will wait until we have accumulated
-        // a certain amount of free memory.
-        const bool ReachReleaseDelta =
-            MayHaveReleasedAll
-                ? true
-                : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta;
-
-        if (!HighDensity) {
-          DCHECK_LE(BytesInBG, ReleaseThreshold);
-          // The following is the usage of a memroy group,
-          //
-          //     BytesInBG             ReleaseThreshold
-          //  /             \                 v
-          //  +---+---------------------------+-----+
-          //  |   |         |                 |     |
-          //  +---+---------------------------+-----+
-          //       \        /                       ^
-          //    PushedBytesDelta                 GroupEnd
-          MinDistToThreshold =
-              Min(MinDistToThreshold,
-                  ReleaseThreshold - BytesInBG + PushedBytesDelta);
-        } else {
-          // If it reaches high density at this round, the next time we will try
-          // to release is based on SmallerBlockReleasePageDelta
-          MinDistToThreshold =
-              Min(MinDistToThreshold, PageSize * SmallerBlockReleasePageDelta);
-        }
-
-        if (!HighDensity || !ReachReleaseDelta) {
-          Prev = BG;
-          BG = BG->Next;
-          continue;
-        }
-      }
-
-      // If `BG` is the first BatchGroupT in the list, we only need to advance
-      // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
-      // for `Prev`.
-      //
-      //         (BG)   (BG->Next)
-      // Prev     Cur      BG
-      //   |       |       |
-      //   v       v       v
-      //  nil     +--+    +--+
-      //          |X | -> |  | -> ...
-      //          +--+    +--+
-      //
-      // Otherwise, `Prev` will be used to extract the `Cur` from the
-      // `FreeListInfo.BlockList`.
-      //
-      //         (BG)   (BG->Next)
-      // Prev     Cur      BG
-      //   |       |       |
-      //   v       v       v
-      //  +--+    +--+    +--+
-      //  |  | -> |X | -> |  | -> ...
-      //  +--+    +--+    +--+
-      //
-      // After FreeListInfo.BlockList::extract(),
-      //
-      // Prev     Cur       BG
-      //   |       |        |
-      //   v       v        v
-      //  +--+    +--+     +--+
-      //  |  |-+  |X |  +->|  | -> ...
-      //  +--+ |  +--+  |  +--+
-      //       +--------+
-      //
-      // Note that we need to advance before pushing this BatchGroup to
-      // GroupsToRelease because it's a destructive operation.
-
-      BatchGroupT *Cur = BG;
-      BG = BG->Next;
-
-      // Ideally, we may want to update this only after successful release.
-      // However, for smaller blocks, each block marking is a costly operation.
-      // Therefore, we update it earlier.
-      // TODO: Consider updating this after releasing pages if `ReleaseRecorder`
-      // can tell the released bytes in each group.
-      Cur->BytesInBGAtLastCheckpoint = BytesInBG;
-
-      if (Prev != nullptr)
-        Region->FreeListInfo.BlockList.extract(Prev, Cur);
-      else
-        Region->FreeListInfo.BlockList.pop_front();
-      GroupsToRelease.push_back(Cur);
-    }
-
-    // Only small blocks have the adaptive `TryReleaseThreshold`.
-    if (isSmallBlock(BlockSize)) {
-      // If the MinDistToThreshold is not updated, that means each memory group
-      // may have only pushed less than a page size. In that case, just set it
-      // back to normal.
-      if (MinDistToThreshold == GroupSize)
-        MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta;
-      Region->TryReleaseThreshold = MinDistToThreshold;
-    }
-
-    return GroupsToRelease;
-  }
-
-  PageReleaseContext
-  markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
-                 const uptr AllocatedUserEnd, const uptr CompactPtrBase,
-                 SinglyLinkedList<BatchGroupT> &GroupsToRelease)
-      REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
-    const uptr GroupSize = (1UL << GroupSizeLog);
-    auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
-      return decompactPtrInternal(CompactPtrBase, CompactPtr);
-    };
-
-    const uptr ReleaseBase = decompactGroupBase(
-        CompactPtrBase, GroupsToRelease.front()->CompactPtrGroupBase);
-    const uptr LastGroupEnd =
-        Min(decompactGroupBase(CompactPtrBase,
-                               GroupsToRelease.back()->CompactPtrGroupBase) +
-                GroupSize,
-            AllocatedUserEnd);
-    // The last block may straddle the group boundary. Rounding up to BlockSize
-    // to get the exact range.
-    const uptr ReleaseEnd =
-        roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
-        Region->RegionBeg;
-    const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
-    const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
-
-    PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
-                               ReleaseRangeSize, ReleaseOffset);
-    // We may not be able to do the page release in a rare case that we may
-    // fail on PageMap allocation.
-    if (UNLIKELY(!Context.ensurePageMapAllocated()))
-      return Context;
-
-    for (BatchGroupT &BG : GroupsToRelease) {
-      const uptr BatchGroupBase =
-          decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
-      const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
-      const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
-                                          ? GroupSize
-                                          : AllocatedUserEnd - BatchGroupBase;
-      const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
-      const bool MayContainLastBlockInRegion =
-          BatchGroupUsedEnd == AllocatedUserEnd;
-      const bool BlockAlignedWithUsedEnd =
-          (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
-
-      uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
-      if (!BlockAlignedWithUsedEnd)
-        ++MaxContainedBlocks;
-
-      const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
-                             BG.Batches.front()->getCount();
-
-      if (NumBlocks == MaxContainedBlocks) {
-        for (const auto &It : BG.Batches) {
-          if (&It != BG.Batches.front())
-            DCHECK_EQ(It.getCount(), BG.MaxCachedPerBatch);
-          for (u16 I = 0; I < It.getCount(); ++I)
-            DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase);
-        }
-
-        Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
-                                      Region->RegionBeg, /*RegionIndex=*/0,
-                                      Region->MemMapInfo.AllocatedUser);
-      } else {
-        DCHECK_LT(NumBlocks, MaxContainedBlocks);
-        // Note that we don't always visit blocks in each BatchGroup so that we
-        // may miss the chance of releasing certain pages that cross
-        // BatchGroups.
-        Context.markFreeBlocksInRegion(
-            BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0,
-            Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion);
-      }
-    }
-
-    DCHECK(Context.hasBlockMarked());
-
-    return Context;
-  }
-
-  void mergeGroupsToReleaseBack(RegionInfo *Region,
-                                SinglyLinkedList<BatchGroupT> &GroupsToRelease)
-      REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
-    ScopedLock L(Region->FLLock);
-
-    // After merging two freelists, we may have redundant `BatchGroup`s that
-    // need to be recycled. The number of unused `BatchGroup`s is expected to be
-    // small. Pick a constant which is inferred from real programs.
-    constexpr uptr MaxUnusedSize = 8;
-    CompactPtrT Blocks[MaxUnusedSize];
-    u32 Idx = 0;
-    RegionInfo *BatchClassRegion = getRegionInfo(SizeClassMap::BatchClassId);
-    // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s
-    // when we are manipulating the freelist of `BatchClassRegion`. Instead, we
-    // should just push it back to the freelist when we merge two `BatchGroup`s.
-    // This logic hasn't been implemented because we haven't supported releasing
-    // pages in `BatchClassRegion`.
-    DCHECK_NE(BatchClassRegion, Region);
-
-    // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
-    // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
-    // sorted.
-    for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
-                     *Prev = nullptr;
-         ;) {
-      if (BG == nullptr || GroupsToRelease.empty()) {
-        if (!GroupsToRelease.empty())
-          Region->FreeListInfo.BlockList.append_back(&GroupsToRelease);
-        break;
-      }
-
-      DCHECK(!BG->Batches.empty());
-
-      if (BG->CompactPtrGroupBase <
-          GroupsToRelease.front()->CompactPtrGroupBase) {
-        Prev = BG;
-        BG = BG->Next;
-        continue;
-      }
-
-      BatchGroupT *Cur = GroupsToRelease.front();
-      TransferBatchT *UnusedTransferBatch = nullptr;
-      GroupsToRelease.pop_front();
-
-      if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
-        BG->PushedBlocks += Cur->PushedBlocks;
-        // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
-        // collecting the `GroupsToRelease`.
-        BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
-        const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch;
-
-        // Note that the first TransferBatches in both `Batches` may not be
-        // full and only the first TransferBatch can have non-full blocks. Thus
-        // we have to merge them before appending one to another.
-        if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
-          BG->Batches.append_back(&Cur->Batches);
-        } else {
-          TransferBatchT *NonFullBatch = Cur->Batches.front();
-          Cur->Batches.pop_front();
-          const u16 NonFullBatchCount = NonFullBatch->getCount();
-          // The remaining Batches in `Cur` are full.
-          BG->Batches.append_back(&Cur->Batches);
-
-          if (BG->Batches.front()->getCount() == MaxCachedPerBatch) {
-            // Only 1 non-full TransferBatch, push it to the front.
-            BG->Batches.push_front(NonFullBatch);
-          } else {
-            const u16 NumBlocksToMove = static_cast<u16>(
-                Min(static_cast<u16>(MaxCachedPerBatch -
-                                     BG->Batches.front()->getCount()),
-                    NonFullBatchCount));
-            BG->Batches.front()->appendFromTransferBatch(NonFullBatch,
-                                                         NumBlocksToMove);
-            if (NonFullBatch->isEmpty())
-              UnusedTransferBatch = NonFullBatch;
-            else
-              BG->Batches.push_front(NonFullBatch);
-          }
-        }
-
-        const u32 NeededSlots = UnusedTransferBatch == nullptr ? 1U : 2U;
-        if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
-          ScopedLock L(BatchClassRegion->FLLock);
-          pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
-          if (conditionVariableEnabled())
-            BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
-          Idx = 0;
-        }
-        Blocks[Idx++] =
-            compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(Cur));
-        if (UnusedTransferBatch) {
-          Blocks[Idx++] =
-              compactPtr(SizeClassMap::BatchClassId,
-                         reinterpret_cast<uptr>(UnusedTransferBatch));
-        }
-        Prev = BG;
-        BG = BG->Next;
-        continue;
-      }
-
-      // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase
-      // larger than the first element in `GroupsToRelease`. We need to insert
-      // `GroupsToRelease::front()` (which is `Cur` below)  before `BG`.
-      //
-      //   1. If `Prev` is nullptr, we simply push `Cur` to the front of
-      //      FreeListInfo.BlockList.
-      //   2. Otherwise, use `insert()` which inserts an element next to `Prev`.
-      //
-      // Afterwards, we don't need to advance `BG` because the order between
-      // `BG` and the new `GroupsToRelease::front()` hasn't been checked.
-      if (Prev == nullptr)
-        Region->FreeListInfo.BlockList.push_front(Cur);
-      else
-        Region->FreeListInfo.BlockList.insert(Prev, Cur);
-      DCHECK_EQ(Cur->Next, BG);
-      Prev = Cur;
-    }
-
-    if (Idx != 0) {
-      ScopedLock L(BatchClassRegion->FLLock);
-      pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
-      if (conditionVariableEnabled())
-        BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
-    }
-
-    if (SCUDO_DEBUG) {
-      BatchGroupT *Prev = Region->FreeListInfo.BlockList.front();
-      for (BatchGroupT *Cur = Prev->Next; Cur != nullptr;
-           Prev = Cur, Cur = Cur->Next) {
-        CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
-      }
-    }
-
-    if (conditionVariableEnabled())
-      Region->FLLockCV.notifyAll(Region->FLLock);
-  }
-
-  // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
-  // deprecated.
-  uptr PrimaryBase = 0;
-  ReservedMemoryT ReservedMemory = {};
-  // The minimum size of pushed blocks that we will try to release the pages in
-  // that size class.
-  uptr SmallerBlockReleasePageDelta = 0;
-  atomic_s32 ReleaseToOsIntervalMs = {};
-  alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
-};
-
-} // namespace scudo
-
-#endif // SCUDO_PRIMARY64_H_
diff --git a/Telegram/ThirdParty/scudo/quarantine.h b/Telegram/ThirdParty/scudo/quarantine.h
deleted file mode 100644
index b5f8db0e8..000000000
--- a/Telegram/ThirdParty/scudo/quarantine.h
+++ /dev/null
@@ -1,309 +0,0 @@
-//===-- quarantine.h --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_QUARANTINE_H_
-#define SCUDO_QUARANTINE_H_
-
-#include "list.h"
-#include "mutex.h"
-#include "string_utils.h"
-#include "thread_annotations.h"
-
-namespace scudo {
-
-struct QuarantineBatch {
-  // With the following count, a batch (and the header that protects it) occupy
-  // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
-  static const u32 MaxCount = 1019;
-  QuarantineBatch *Next;
-  uptr Size;
-  u32 Count;
-  void *Batch[MaxCount];
-
-  void init(void *Ptr, uptr Size) {
-    Count = 1;
-    Batch[0] = Ptr;
-    this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
-  }
-
-  // The total size of quarantined nodes recorded in this batch.
-  uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
-
-  void push_back(void *Ptr, uptr Size) {
-    DCHECK_LT(Count, MaxCount);
-    Batch[Count++] = Ptr;
-    this->Size += Size;
-  }
-
-  bool canMerge(const QuarantineBatch *const From) const {
-    return Count + From->Count <= MaxCount;
-  }
-
-  void merge(QuarantineBatch *const From) {
-    DCHECK_LE(Count + From->Count, MaxCount);
-    DCHECK_GE(Size, sizeof(QuarantineBatch));
-
-    for (uptr I = 0; I < From->Count; ++I)
-      Batch[Count + I] = From->Batch[I];
-    Count += From->Count;
-    Size += From->getQuarantinedSize();
-
-    From->Count = 0;
-    From->Size = sizeof(QuarantineBatch);
-  }
-
-  void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
-};
-
-static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
-
-// Per-thread cache of memory blocks.
-template <typename Callback> class QuarantineCache {
-public:
-  void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
-
-  // Total memory used, including internal accounting.
-  uptr getSize() const { return atomic_load_relaxed(&Size); }
-  // Memory used for internal accounting.
-  uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
-
-  void enqueue(Callback Cb, void *Ptr, uptr Size) {
-    if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
-      QuarantineBatch *B =
-          reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
-      DCHECK(B);
-      B->init(Ptr, Size);
-      enqueueBatch(B);
-    } else {
-      List.back()->push_back(Ptr, Size);
-      addToSize(Size);
-    }
-  }
-
-  void transfer(QuarantineCache *From) {
-    List.append_back(&From->List);
-    addToSize(From->getSize());
-    atomic_store_relaxed(&From->Size, 0);
-  }
-
-  void enqueueBatch(QuarantineBatch *B) {
-    List.push_back(B);
-    addToSize(B->Size);
-  }
-
-  QuarantineBatch *dequeueBatch() {
-    if (List.empty())
-      return nullptr;
-    QuarantineBatch *B = List.front();
-    List.pop_front();
-    subFromSize(B->Size);
-    return B;
-  }
-
-  void mergeBatches(QuarantineCache *ToDeallocate) {
-    uptr ExtractedSize = 0;
-    QuarantineBatch *Current = List.front();
-    while (Current && Current->Next) {
-      if (Current->canMerge(Current->Next)) {
-        QuarantineBatch *Extracted = Current->Next;
-        // Move all the chunks into the current batch.
-        Current->merge(Extracted);
-        DCHECK_EQ(Extracted->Count, 0);
-        DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
-        // Remove the next batch From the list and account for its Size.
-        List.extract(Current, Extracted);
-        ExtractedSize += Extracted->Size;
-        // Add it to deallocation list.
-        ToDeallocate->enqueueBatch(Extracted);
-      } else {
-        Current = Current->Next;
-      }
-    }
-    subFromSize(ExtractedSize);
-  }
-
-  void getStats(ScopedString *Str) const {
-    uptr BatchCount = 0;
-    uptr TotalOverheadBytes = 0;
-    uptr TotalBytes = 0;
-    uptr TotalQuarantineChunks = 0;
-    for (const QuarantineBatch &Batch : List) {
-      BatchCount++;
-      TotalBytes += Batch.Size;
-      TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
-      TotalQuarantineChunks += Batch.Count;
-    }
-    const uptr QuarantineChunksCapacity =
-        BatchCount * QuarantineBatch::MaxCount;
-    const uptr ChunksUsagePercent =
-        (QuarantineChunksCapacity == 0)
-            ? 0
-            : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
-    const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
-    const uptr MemoryOverheadPercent =
-        (TotalQuarantinedBytes == 0)
-            ? 0
-            : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
-    Str->append(
-        "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
-        "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
-        BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
-        QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
-  }
-
-private:
-  SinglyLinkedList<QuarantineBatch> List;
-  atomic_uptr Size = {};
-
-  void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
-  void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
-};
-
-// The callback interface is:
-// void Callback::recycle(Node *Ptr);
-// void *Callback::allocate(uptr Size);
-// void Callback::deallocate(void *Ptr);
-template <typename Callback, typename Node> class GlobalQuarantine {
-public:
-  typedef QuarantineCache<Callback> CacheT;
-  using ThisT = GlobalQuarantine<Callback, Node>;
-
-  void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {
-    DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
-    DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
-    DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
-    DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
-    // Thread local quarantine size can be zero only when global quarantine size
-    // is zero (it allows us to perform just one atomic read per put() call).
-    CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
-
-    atomic_store_relaxed(&MaxSize, Size);
-    atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
-    atomic_store_relaxed(&MaxCacheSize, CacheSize);
-
-    Cache.init();
-  }
-
-  uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
-  uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
-
-  // This is supposed to be used in test only.
-  bool isEmpty() {
-    ScopedLock L(CacheMutex);
-    return Cache.getSize() == 0U;
-  }
-
-  void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
-    C->enqueue(Cb, Ptr, Size);
-    if (C->getSize() > getCacheSize())
-      drain(C, Cb);
-  }
-
-  void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
-    bool needRecycle = false;
-    {
-      ScopedLock L(CacheMutex);
-      Cache.transfer(C);
-      needRecycle = Cache.getSize() > getMaxSize();
-    }
-
-    if (needRecycle && RecycleMutex.tryLock())
-      recycle(atomic_load_relaxed(&MinSize), Cb);
-  }
-
-  void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
-    {
-      ScopedLock L(CacheMutex);
-      Cache.transfer(C);
-    }
-    RecycleMutex.lock();
-    recycle(0, Cb);
-  }
-
-  void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {
-    ScopedLock L(CacheMutex);
-    // It assumes that the world is stopped, just as the allocator's printStats.
-    Cache.getStats(Str);
-    Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
-                getMaxSize() >> 10, getCacheSize() >> 10);
-  }
-
-  void disable() NO_THREAD_SAFETY_ANALYSIS {
-    // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
-    RecycleMutex.lock();
-    CacheMutex.lock();
-  }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS {
-    CacheMutex.unlock();
-    RecycleMutex.unlock();
-  }
-
-private:
-  // Read-only data.
-  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
-  CacheT Cache GUARDED_BY(CacheMutex);
-  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
-  atomic_uptr MinSize = {};
-  atomic_uptr MaxSize = {};
-  alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
-
-  void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)
-      EXCLUDES(CacheMutex) {
-    CacheT Tmp;
-    Tmp.init();
-    {
-      ScopedLock L(CacheMutex);
-      // Go over the batches and merge partially filled ones to
-      // save some memory, otherwise batches themselves (since the memory used
-      // by them is counted against quarantine limit) can overcome the actual
-      // user's quarantined chunks, which diminishes the purpose of the
-      // quarantine.
-      const uptr CacheSize = Cache.getSize();
-      const uptr OverheadSize = Cache.getOverheadSize();
-      DCHECK_GE(CacheSize, OverheadSize);
-      // Do the merge only when overhead exceeds this predefined limit (might
-      // require some tuning). It saves us merge attempt when the batch list
-      // quarantine is unlikely to contain batches suitable for merge.
-      constexpr uptr OverheadThresholdPercents = 100;
-      if (CacheSize > OverheadSize &&
-          OverheadSize * (100 + OverheadThresholdPercents) >
-              CacheSize * OverheadThresholdPercents) {
-        Cache.mergeBatches(&Tmp);
-      }
-      // Extract enough chunks from the quarantine to get below the max
-      // quarantine size and leave some leeway for the newly quarantined chunks.
-      while (Cache.getSize() > MinSize)
-        Tmp.enqueueBatch(Cache.dequeueBatch());
-    }
-    RecycleMutex.unlock();
-    doRecycle(&Tmp, Cb);
-  }
-
-  void NOINLINE doRecycle(CacheT *C, Callback Cb) {
-    while (QuarantineBatch *B = C->dequeueBatch()) {
-      const u32 Seed = static_cast<u32>(
-          (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
-      B->shuffle(Seed);
-      constexpr uptr NumberOfPrefetch = 8UL;
-      CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
-      for (uptr I = 0; I < NumberOfPrefetch; I++)
-        PREFETCH(B->Batch[I]);
-      for (uptr I = 0, Count = B->Count; I < Count; I++) {
-        if (I + NumberOfPrefetch < Count)
-          PREFETCH(B->Batch[I + NumberOfPrefetch]);
-        Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
-      }
-      Cb.deallocate(B);
-    }
-  }
-};
-
-} // namespace scudo
-
-#endif // SCUDO_QUARANTINE_H_
diff --git a/Telegram/ThirdParty/scudo/release.cpp b/Telegram/ThirdParty/scudo/release.cpp
deleted file mode 100644
index 875a2b0c1..000000000
--- a/Telegram/ThirdParty/scudo/release.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-//===-- release.cpp ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "release.h"
-
-namespace scudo {
-
-BufferPool<RegionPageMap::StaticBufferCount,
-           RegionPageMap::StaticBufferNumElements>
-    RegionPageMap::Buffers;
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/release.h b/Telegram/ThirdParty/scudo/release.h
deleted file mode 100644
index b6f76a4d2..000000000
--- a/Telegram/ThirdParty/scudo/release.h
+++ /dev/null
@@ -1,701 +0,0 @@
-//===-- release.h -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_RELEASE_H_
-#define SCUDO_RELEASE_H_
-
-#include "common.h"
-#include "list.h"
-#include "mem_map.h"
-#include "mutex.h"
-#include "thread_annotations.h"
-
-namespace scudo {
-
-template <typename MemMapT> class RegionReleaseRecorder {
-public:
-  RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0)
-      : RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {}
-
-  uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
-
-  uptr getReleasedBytes() const { return ReleasedBytes; }
-
-  uptr getBase() const { return Base; }
-
-  // Releases [From, To) range of pages back to OS. Note that `From` and `To`
-  // are offseted from `Base` + Offset.
-  void releasePageRangeToOS(uptr From, uptr To) {
-    const uptr Size = To - From;
-    RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size);
-    ReleasedRangesCount++;
-    ReleasedBytes += Size;
-  }
-
-private:
-  uptr ReleasedRangesCount = 0;
-  uptr ReleasedBytes = 0;
-  MemMapT *RegionMemMap = nullptr;
-  uptr Base = 0;
-  // The release offset from Base. This is used when we know a given range after
-  // Base will not be released.
-  uptr Offset = 0;
-};
-
-class ReleaseRecorder {
-public:
-  ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)
-      : Base(Base), Offset(Offset), Data(Data) {}
-
-  uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
-
-  uptr getReleasedBytes() const { return ReleasedBytes; }
-
-  uptr getBase() const { return Base; }
-
-  // Releases [From, To) range of pages back to OS.
-  void releasePageRangeToOS(uptr From, uptr To) {
-    const uptr Size = To - From;
-    releasePagesToOS(Base, From + Offset, Size, Data);
-    ReleasedRangesCount++;
-    ReleasedBytes += Size;
-  }
-
-private:
-  uptr ReleasedRangesCount = 0;
-  uptr ReleasedBytes = 0;
-  // The starting address to release. Note that we may want to combine (Base +
-  // Offset) as a new Base. However, the Base is retrieved from
-  // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
-  // Therefore, store them separately to make it work on all the platforms.
-  uptr Base = 0;
-  // The release offset from Base. This is used when we know a given range after
-  // Base will not be released.
-  uptr Offset = 0;
-  MapPlatformData *Data = nullptr;
-};
-
-class FragmentationRecorder {
-public:
-  FragmentationRecorder() = default;
-
-  uptr getReleasedPagesCount() const { return ReleasedPagesCount; }
-
-  void releasePageRangeToOS(uptr From, uptr To) {
-    DCHECK_EQ((To - From) % getPageSizeCached(), 0U);
-    ReleasedPagesCount += (To - From) / getPageSizeCached();
-  }
-
-private:
-  uptr ReleasedPagesCount = 0;
-};
-
-// A buffer pool which holds a fixed number of static buffers of `uptr` elements
-// for fast buffer allocation. If the request size is greater than
-// `StaticBufferNumElements` or if all the static buffers are in use, it'll
-// delegate the allocation to map().
-template <uptr StaticBufferCount, uptr StaticBufferNumElements>
-class BufferPool {
-public:
-  // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
-  // extracting the least significant bit from the `Mask`.
-  static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
-  static_assert(isAligned(StaticBufferNumElements * sizeof(uptr),
-                          SCUDO_CACHE_LINE_SIZE),
-                "");
-
-  struct Buffer {
-    // Pointer to the buffer's memory, or nullptr if no buffer was allocated.
-    uptr *Data = nullptr;
-
-    // The index of the underlying static buffer, or StaticBufferCount if this
-    // buffer was dynamically allocated. This value is initially set to a poison
-    // value to aid debugging.
-    uptr BufferIndex = ~static_cast<uptr>(0);
-
-    // Only valid if BufferIndex == StaticBufferCount.
-    MemMapT MemMap = {};
-  };
-
-  // Return a zero-initialized buffer which can contain at least the given
-  // number of elements, or nullptr on failure.
-  Buffer getBuffer(const uptr NumElements) {
-    if (UNLIKELY(NumElements > StaticBufferNumElements))
-      return getDynamicBuffer(NumElements);
-
-    uptr index;
-    {
-      // TODO: In general, we expect this operation should be fast so the
-      // waiting thread won't be put into sleep. The HybridMutex does implement
-      // the busy-waiting but we may want to review the performance and see if
-      // we need an explict spin lock here.
-      ScopedLock L(Mutex);
-      index = getLeastSignificantSetBitIndex(Mask);
-      if (index < StaticBufferCount)
-        Mask ^= static_cast<uptr>(1) << index;
-    }
-
-    if (index >= StaticBufferCount)
-      return getDynamicBuffer(NumElements);
-
-    Buffer Buf;
-    Buf.Data = &RawBuffer[index * StaticBufferNumElements];
-    Buf.BufferIndex = index;
-    memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr));
-    return Buf;
-  }
-
-  void releaseBuffer(Buffer Buf) {
-    DCHECK_NE(Buf.Data, nullptr);
-    DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
-    if (Buf.BufferIndex != StaticBufferCount) {
-      ScopedLock L(Mutex);
-      DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);
-      Mask |= static_cast<uptr>(1) << Buf.BufferIndex;
-    } else {
-      Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());
-    }
-  }
-
-  bool isStaticBufferTestOnly(const Buffer &Buf) {
-    DCHECK_NE(Buf.Data, nullptr);
-    DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
-    return Buf.BufferIndex != StaticBufferCount;
-  }
-
-private:
-  Buffer getDynamicBuffer(const uptr NumElements) {
-    // When using a heap-based buffer, precommit the pages backing the
-    // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
-    // where page fault exceptions are skipped as the allocated memory
-    // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
-    // performance benefit on other platforms.
-    const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
-    const uptr MappedSize =
-        roundUp(NumElements * sizeof(uptr), getPageSizeCached());
-    Buffer Buf;
-    if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) {
-      Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase());
-      Buf.BufferIndex = StaticBufferCount;
-    }
-    return Buf;
-  }
-
-  HybridMutex Mutex;
-  // '1' means that buffer index is not used. '0' means the buffer is in use.
-  uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
-  uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex);
-};
-
-// A Region page map is used to record the usage of pages in the regions. It
-// implements a packed array of Counters. Each counter occupies 2^N bits, enough
-// to store counter's MaxValue. Ctor will try to use a static buffer first, and
-// if that fails (the buffer is too small or already locked), will allocate the
-// required Buffer via map(). The caller is expected to check whether the
-// initialization was successful by checking isAllocated() result. For
-// performance sake, none of the accessors check the validity of the arguments,
-// It is assumed that Index is always in [0, N) range and the value is not
-// incremented past MaxValue.
-class RegionPageMap {
-public:
-  RegionPageMap()
-      : Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
-        PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
-        BufferNumElements(0) {}
-  RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
-    reset(NumberOfRegions, CountersPerRegion, MaxValue);
-  }
-  ~RegionPageMap() {
-    if (!isAllocated())
-      return;
-    Buffers.releaseBuffer(Buffer);
-    Buffer = {};
-  }
-
-  // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
-  // specify the thread-safety attribute properly in current code structure.
-  // Besides, it's the only place we may want to check thread safety. Therefore,
-  // it's fine to bypass the thread-safety analysis now.
-  void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
-    DCHECK_GT(NumberOfRegion, 0);
-    DCHECK_GT(CountersPerRegion, 0);
-    DCHECK_GT(MaxValue, 0);
-
-    Regions = NumberOfRegion;
-    NumCounters = CountersPerRegion;
-
-    constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL;
-    // Rounding counter storage size up to the power of two allows for using
-    // bit shifts calculating particular counter's Index and offset.
-    const uptr CounterSizeBits =
-        roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
-    DCHECK_LE(CounterSizeBits, MaxCounterBits);
-    CounterSizeBitsLog = getLog2(CounterSizeBits);
-    CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
-
-    const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
-    DCHECK_GT(PackingRatio, 0);
-    PackingRatioLog = getLog2(PackingRatio);
-    BitOffsetMask = PackingRatio - 1;
-
-    SizePerRegion =
-        roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
-        PackingRatioLog;
-    BufferNumElements = SizePerRegion * Regions;
-    Buffer = Buffers.getBuffer(BufferNumElements);
-  }
-
-  bool isAllocated() const { return Buffer.Data != nullptr; }
-
-  uptr getCount() const { return NumCounters; }
-
-  uptr get(uptr Region, uptr I) const {
-    DCHECK_LT(Region, Regions);
-    DCHECK_LT(I, NumCounters);
-    const uptr Index = I >> PackingRatioLog;
-    const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
-    return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) &
-           CounterMask;
-  }
-
-  void inc(uptr Region, uptr I) const {
-    DCHECK_LT(get(Region, I), CounterMask);
-    const uptr Index = I >> PackingRatioLog;
-    const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
-    DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
-    DCHECK_EQ(isAllCounted(Region, I), false);
-    Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
-                                                   << BitOffset;
-  }
-
-  void incN(uptr Region, uptr I, uptr N) const {
-    DCHECK_GT(N, 0U);
-    DCHECK_LE(N, CounterMask);
-    DCHECK_LE(get(Region, I), CounterMask - N);
-    const uptr Index = I >> PackingRatioLog;
-    const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
-    DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
-    DCHECK_EQ(isAllCounted(Region, I), false);
-    Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset;
-  }
-
-  void incRange(uptr Region, uptr From, uptr To) const {
-    DCHECK_LE(From, To);
-    const uptr Top = Min(To + 1, NumCounters);
-    for (uptr I = From; I < Top; I++)
-      inc(Region, I);
-  }
-
-  // Set the counter to the max value. Note that the max number of blocks in a
-  // page may vary. To provide an easier way to tell if all the blocks are
-  // counted for different pages, set to the same max value to denote the
-  // all-counted status.
-  void setAsAllCounted(uptr Region, uptr I) const {
-    DCHECK_LE(get(Region, I), CounterMask);
-    const uptr Index = I >> PackingRatioLog;
-    const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
-    DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
-    Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
-  }
-  void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
-    DCHECK_LE(From, To);
-    const uptr Top = Min(To + 1, NumCounters);
-    for (uptr I = From; I < Top; I++)
-      setAsAllCounted(Region, I);
-  }
-
-  bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) {
-    const uptr Count = get(Region, I);
-    if (Count == CounterMask)
-      return true;
-    if (Count == MaxCount) {
-      setAsAllCounted(Region, I);
-      return true;
-    }
-    return false;
-  }
-  bool isAllCounted(uptr Region, uptr I) const {
-    return get(Region, I) == CounterMask;
-  }
-
-  uptr getBufferNumElements() const { return BufferNumElements; }
-
-private:
-  // We may consider making this configurable if there are cases which may
-  // benefit from this.
-  static const uptr StaticBufferCount = 2U;
-  static const uptr StaticBufferNumElements = 512U;
-  using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>;
-  static BufferPoolT Buffers;
-
-  uptr Regions;
-  uptr NumCounters;
-  uptr CounterSizeBitsLog;
-  uptr CounterMask;
-  uptr PackingRatioLog;
-  uptr BitOffsetMask;
-
-  uptr SizePerRegion;
-  uptr BufferNumElements;
-  BufferPoolT::Buffer Buffer;
-};
-
-template <class ReleaseRecorderT> class FreePagesRangeTracker {
-public:
-  explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
-      : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
-
-  void processNextPage(bool Released) {
-    if (Released) {
-      if (!InRange) {
-        CurrentRangeStatePage = CurrentPage;
-        InRange = true;
-      }
-    } else {
-      closeOpenedRange();
-    }
-    CurrentPage++;
-  }
-
-  void skipPages(uptr N) {
-    closeOpenedRange();
-    CurrentPage += N;
-  }
-
-  void finish() { closeOpenedRange(); }
-
-private:
-  void closeOpenedRange() {
-    if (InRange) {
-      Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
-                                    (CurrentPage << PageSizeLog));
-      InRange = false;
-    }
-  }
-
-  ReleaseRecorderT &Recorder;
-  const uptr PageSizeLog;
-  bool InRange = false;
-  uptr CurrentPage = 0;
-  uptr CurrentRangeStatePage = 0;
-};
-
-struct PageReleaseContext {
-  PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,
-                     uptr ReleaseOffset = 0)
-      : BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {
-    PageSize = getPageSizeCached();
-    if (BlockSize <= PageSize) {
-      if (PageSize % BlockSize == 0) {
-        // Same number of chunks per page, no cross overs.
-        FullPagesBlockCountMax = PageSize / BlockSize;
-        SameBlockCountPerPage = true;
-      } else if (BlockSize % (PageSize % BlockSize) == 0) {
-        // Some chunks are crossing page boundaries, which means that the page
-        // contains one or two partial chunks, but all pages contain the same
-        // number of chunks.
-        FullPagesBlockCountMax = PageSize / BlockSize + 1;
-        SameBlockCountPerPage = true;
-      } else {
-        // Some chunks are crossing page boundaries, which means that the page
-        // contains one or two partial chunks.
-        FullPagesBlockCountMax = PageSize / BlockSize + 2;
-        SameBlockCountPerPage = false;
-      }
-    } else {
-      if (BlockSize % PageSize == 0) {
-        // One chunk covers multiple pages, no cross overs.
-        FullPagesBlockCountMax = 1;
-        SameBlockCountPerPage = true;
-      } else {
-        // One chunk covers multiple pages, Some chunks are crossing page
-        // boundaries. Some pages contain one chunk, some contain two.
-        FullPagesBlockCountMax = 2;
-        SameBlockCountPerPage = false;
-      }
-    }
-
-    // TODO: For multiple regions, it's more complicated to support partial
-    // region marking (which includes the complexity of how to handle the last
-    // block in a region). We may consider this after markFreeBlocks() accepts
-    // only free blocks from the same region.
-    if (NumberOfRegions != 1)
-      DCHECK_EQ(ReleaseOffset, 0U);
-
-    PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
-    PageSizeLog = getLog2(PageSize);
-    ReleasePageOffset = ReleaseOffset >> PageSizeLog;
-  }
-
-  // PageMap is lazily allocated when markFreeBlocks() is invoked.
-  bool hasBlockMarked() const {
-    return PageMap.isAllocated();
-  }
-
-  bool ensurePageMapAllocated() {
-    if (PageMap.isAllocated())
-      return true;
-    PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
-    // TODO: Log some message when we fail on PageMap allocation.
-    return PageMap.isAllocated();
-  }
-
-  // Mark all the blocks in the given range [From, to). Instead of visiting all
-  // the blocks, we will just mark the page as all counted. Note the `From` and
-  // `To` has to be page aligned but with one exception, if `To` is equal to the
-  // RegionSize, it's not necessary to be aligned with page size.
-  bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,
-                             const uptr RegionIndex, const uptr RegionSize) {
-    DCHECK_LT(From, To);
-    DCHECK_LE(To, Base + RegionSize);
-    DCHECK_EQ(From % PageSize, 0U);
-    DCHECK_LE(To - From, RegionSize);
-
-    if (!ensurePageMapAllocated())
-      return false;
-
-    uptr FromInRegion = From - Base;
-    uptr ToInRegion = To - Base;
-    uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);
-
-    // The straddling block sits across entire range.
-    if (FirstBlockInRange >= ToInRegion)
-      return true;
-
-    // First block may not sit at the first pape in the range, move
-    // `FromInRegion` to the first block page.
-    FromInRegion = roundDown(FirstBlockInRange, PageSize);
-
-    // When The first block is not aligned to the range boundary, which means
-    // there is a block sitting acorss `From`, that looks like,
-    //
-    //   From                                             To
-    //     V                                               V
-    //     +-----------------------------------------------+
-    //  +-----+-----+-----+-----+
-    //  |     |     |     |     | ...
-    //  +-----+-----+-----+-----+
-    //     |-    first page     -||-    second page    -||- ...
-    //
-    // Therefore, we can't just mark the first page as all counted. Instead, we
-    // increment the number of blocks in the first page in the page map and
-    // then round up the `From` to the next page.
-    if (FirstBlockInRange != FromInRegion) {
-      DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange);
-      uptr NumBlocksInFirstPage =
-          (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
-          BlockSize;
-      PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
-                   NumBlocksInFirstPage);
-      FromInRegion = roundUp(FromInRegion + 1, PageSize);
-    }
-
-    uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);
-
-    // Note that LastBlockInRange may be smaller than `FromInRegion` at this
-    // point because it may contain only one block in the range.
-
-    // When the last block sits across `To`, we can't just mark the pages
-    // occupied by the last block as all counted. Instead, we increment the
-    // counters of those pages by 1. The exception is that if it's the last
-    // block in the region, it's fine to mark those pages as all counted.
-    if (LastBlockInRange + BlockSize != RegionSize) {
-      DCHECK_EQ(ToInRegion % PageSize, 0U);
-      // The case below is like,
-      //
-      //   From                                      To
-      //     V                                        V
-      //     +----------------------------------------+
-      //                          +-----+-----+-----+-----+
-      //                          |     |     |     |     | ...
-      //                          +-----+-----+-----+-----+
-      //                    ... -||-    last page    -||-    next page    -|
-      //
-      // The last block is not aligned to `To`, we need to increment the
-      // counter of `next page` by 1.
-      if (LastBlockInRange + BlockSize != ToInRegion) {
-        PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
-                         getPageIndex(LastBlockInRange + BlockSize - 1));
-      }
-    } else {
-      ToInRegion = RegionSize;
-    }
-
-    // After handling the first page and the last block, it's safe to mark any
-    // page in between the range [From, To).
-    if (FromInRegion < ToInRegion) {
-      PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
-                                   getPageIndex(ToInRegion - 1));
-    }
-
-    return true;
-  }
-
-  template <class TransferBatchT, typename DecompactPtrT>
-  bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
-                              DecompactPtrT DecompactPtr, const uptr Base,
-                              const uptr RegionIndex, const uptr RegionSize,
-                              bool MayContainLastBlockInRegion) {
-    if (!ensurePageMapAllocated())
-      return false;
-
-    if (MayContainLastBlockInRegion) {
-      const uptr LastBlockInRegion =
-          ((RegionSize / BlockSize) - 1U) * BlockSize;
-      // The last block in a region may not use the entire page, we mark the
-      // following "pretend" memory block(s) as free in advance.
-      //
-      //     Region Boundary
-      //         v
-      //  -----+-----------------------+
-      //       |      Last Page        | <- Rounded Region Boundary
-      //  -----+-----------------------+
-      //   |-----||- trailing blocks  -|
-      //      ^
-      //   last block
-      const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
-      const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;
-      // If the difference between `RoundedRegionSize` and
-      // `TrailingBlockBase` is larger than a page, that implies the reported
-      // `RegionSize` may not be accurate.
-      DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);
-
-      // Only the last page touched by the last block needs to mark the trailing
-      // blocks. Note that if the last "pretend" block straddles the boundary,
-      // we still have to count it in so that the logic of counting the number
-      // of blocks on a page is consistent.
-      uptr NumTrailingBlocks =
-          (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +
-           BlockSize - 1) /
-          BlockSize;
-      if (NumTrailingBlocks > 0) {
-        PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),
-                     NumTrailingBlocks);
-      }
-    }
-
-    // Iterate over free chunks and count how many free chunks affect each
-    // allocated page.
-    if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
-      // Each chunk affects one page only.
-      for (const auto &It : FreeList) {
-        for (u16 I = 0; I < It.getCount(); I++) {
-          const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
-          DCHECK_LT(PInRegion, RegionSize);
-          PageMap.inc(RegionIndex, getPageIndex(PInRegion));
-        }
-      }
-    } else {
-      // In all other cases chunks might affect more than one page.
-      DCHECK_GE(RegionSize, BlockSize);
-      for (const auto &It : FreeList) {
-        for (u16 I = 0; I < It.getCount(); I++) {
-          const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
-          PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
-                           getPageIndex(PInRegion + BlockSize - 1));
-        }
-      }
-    }
-
-    return true;
-  }
-
-  uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
-  uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; }
-
-  uptr BlockSize;
-  uptr NumberOfRegions;
-  // For partial region marking, some pages in front are not needed to be
-  // counted.
-  uptr ReleasePageOffset;
-  uptr PageSize;
-  uptr PagesCount;
-  uptr PageSizeLog;
-  uptr FullPagesBlockCountMax;
-  bool SameBlockCountPerPage;
-  RegionPageMap PageMap;
-};
-
-// Try to release the page which doesn't have any in-used block, i.e., they are
-// all free blocks. The `PageMap` will record the number of free blocks in each
-// page.
-template <class ReleaseRecorderT, typename SkipRegionT>
-NOINLINE void
-releaseFreeMemoryToOS(PageReleaseContext &Context,
-                      ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
-  const uptr PageSize = Context.PageSize;
-  const uptr BlockSize = Context.BlockSize;
-  const uptr PagesCount = Context.PagesCount;
-  const uptr NumberOfRegions = Context.NumberOfRegions;
-  const uptr ReleasePageOffset = Context.ReleasePageOffset;
-  const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
-  const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
-  RegionPageMap &PageMap = Context.PageMap;
-
-  // Iterate over pages detecting ranges of pages with chunk Counters equal
-  // to the expected number of chunks for the particular page.
-  FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
-  if (SameBlockCountPerPage) {
-    // Fast path, every page has the same number of chunks affecting it.
-    for (uptr I = 0; I < NumberOfRegions; I++) {
-      if (SkipRegion(I)) {
-        RangeTracker.skipPages(PagesCount);
-        continue;
-      }
-      for (uptr J = 0; J < PagesCount; J++) {
-        const bool CanRelease =
-            PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax);
-        RangeTracker.processNextPage(CanRelease);
-      }
-    }
-  } else {
-    // Slow path, go through the pages keeping count how many chunks affect
-    // each page.
-    const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
-    const uptr Pnc = Pn * BlockSize;
-    // The idea is to increment the current page pointer by the first chunk
-    // size, middle portion size (the portion of the page covered by chunks
-    // except the first and the last one) and then the last chunk size, adding
-    // up the number of chunks on the current page and checking on every step
-    // whether the page boundary was crossed.
-    for (uptr I = 0; I < NumberOfRegions; I++) {
-      if (SkipRegion(I)) {
-        RangeTracker.skipPages(PagesCount);
-        continue;
-      }
-      uptr PrevPageBoundary = 0;
-      uptr CurrentBoundary = 0;
-      if (ReleasePageOffset > 0) {
-        PrevPageBoundary = ReleasePageOffset * PageSize;
-        CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
-      }
-      for (uptr J = 0; J < PagesCount; J++) {
-        const uptr PageBoundary = PrevPageBoundary + PageSize;
-        uptr BlocksPerPage = Pn;
-        if (CurrentBoundary < PageBoundary) {
-          if (CurrentBoundary > PrevPageBoundary)
-            BlocksPerPage++;
-          CurrentBoundary += Pnc;
-          if (CurrentBoundary < PageBoundary) {
-            BlocksPerPage++;
-            CurrentBoundary += BlockSize;
-          }
-        }
-        PrevPageBoundary = PageBoundary;
-        const bool CanRelease =
-            PageMap.updateAsAllCountedIf(I, J, BlocksPerPage);
-        RangeTracker.processNextPage(CanRelease);
-      }
-    }
-  }
-  RangeTracker.finish();
-}
-
-} // namespace scudo
-
-#endif // SCUDO_RELEASE_H_
diff --git a/Telegram/ThirdParty/scudo/report.cpp b/Telegram/ThirdParty/scudo/report.cpp
deleted file mode 100644
index 9cef0adc0..000000000
--- a/Telegram/ThirdParty/scudo/report.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-//===-- report.cpp ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "report.h"
-
-#include "atomic_helpers.h"
-#include "string_utils.h"
-
-#include <stdarg.h>
-
-namespace scudo {
-
-class ScopedErrorReport {
-public:
-  ScopedErrorReport() : Message() { Message.append("Scudo ERROR: "); }
-  void append(const char *Format, ...) {
-    va_list Args;
-    va_start(Args, Format);
-    Message.vappend(Format, Args);
-    va_end(Args);
-  }
-  NORETURN ~ScopedErrorReport() { reportRawError(Message.data()); }
-
-private:
-  ScopedString Message;
-};
-
-inline void NORETURN trap() { __builtin_trap(); }
-
-// This could potentially be called recursively if a CHECK fails in the reports.
-void NORETURN reportCheckFailed(const char *File, int Line,
-                                const char *Condition, u64 Value1, u64 Value2) {
-  static atomic_u32 NumberOfCalls;
-  if (atomic_fetch_add(&NumberOfCalls, 1, memory_order_relaxed) > 2) {
-    // TODO(kostyak): maybe sleep here?
-    trap();
-  }
-  ScopedErrorReport Report;
-  Report.append("CHECK failed @ %s:%d %s ((u64)op1=%llu, (u64)op2=%llu)\n",
-                File, Line, Condition, Value1, Value2);
-}
-
-// Generic string fatal error message.
-void NORETURN reportError(const char *Message) {
-  ScopedErrorReport Report;
-  Report.append("%s\n", Message);
-}
-
-// Generic fatal error message without ScopedString.
-void NORETURN reportRawError(const char *Message) {
-  outputRaw(Message);
-  setAbortMessage(Message);
-  die();
-}
-
-void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
-  ScopedErrorReport Report;
-  Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
-}
-
-// The checksum of a chunk header is invalid. This could be caused by an
-// {over,under}write of the header, a pointer that is not an actual chunk.
-void NORETURN reportHeaderCorruption(void *Ptr) {
-  ScopedErrorReport Report;
-  Report.append("corrupted chunk header at address %p\n", Ptr);
-}
-
-// The allocator was compiled with parameters that conflict with field size
-// requirements.
-void NORETURN reportSanityCheckError(const char *Field) {
-  ScopedErrorReport Report;
-  Report.append("maximum possible %s doesn't fit in header\n", Field);
-}
-
-// We enforce a maximum alignment, to keep fields smaller and generally prevent
-// integer overflows, or unexpected corner cases.
-void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment) {
-  ScopedErrorReport Report;
-  Report.append("invalid allocation alignment: %zu exceeds maximum supported "
-                "alignment of %zu\n",
-                Alignment, MaxAlignment);
-}
-
-// See above, we also enforce a maximum size.
-void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
-                                         uptr MaxSize) {
-  ScopedErrorReport Report;
-  Report.append("requested allocation size %zu (%zu after adjustments) exceeds "
-                "maximum supported size of %zu\n",
-                UserSize, TotalSize, MaxSize);
-}
-
-void NORETURN reportOutOfBatchClass() {
-  ScopedErrorReport Report;
-  Report.append("BatchClass region is used up, can't hold any free block\n");
-}
-
-void NORETURN reportOutOfMemory(uptr RequestedSize) {
-  ScopedErrorReport Report;
-  Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
-}
-
-static const char *stringifyAction(AllocatorAction Action) {
-  switch (Action) {
-  case AllocatorAction::Recycling:
-    return "recycling";
-  case AllocatorAction::Deallocating:
-    return "deallocating";
-  case AllocatorAction::Reallocating:
-    return "reallocating";
-  case AllocatorAction::Sizing:
-    return "sizing";
-  }
-  return "<invalid action>";
-}
-
-// The chunk is not in a state congruent with the operation we want to perform.
-// This is usually the case with a double-free, a realloc of a freed pointer.
-void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr) {
-  ScopedErrorReport Report;
-  Report.append("invalid chunk state when %s address %p\n",
-                stringifyAction(Action), Ptr);
-}
-
-void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr) {
-  ScopedErrorReport Report;
-  Report.append("misaligned pointer when %s address %p\n",
-                stringifyAction(Action), Ptr);
-}
-
-// The deallocation function used is at odds with the one used to allocate the
-// chunk (eg: new[]/delete or malloc/delete, and so on).
-void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
-                                        u8 TypeA, u8 TypeB) {
-  ScopedErrorReport Report;
-  Report.append("allocation type mismatch when %s address %p (%d vs %d)\n",
-                stringifyAction(Action), Ptr, TypeA, TypeB);
-}
-
-// The size specified to the delete operator does not match the one that was
-// passed to new when allocating the chunk.
-void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size,
-                                       uptr ExpectedSize) {
-  ScopedErrorReport Report;
-  Report.append(
-      "invalid sized delete when deallocating address %p (%zu vs %zu)\n", Ptr,
-      Size, ExpectedSize);
-}
-
-void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment) {
-  ScopedErrorReport Report;
-  Report.append(
-      "invalid allocation alignment: %zu, alignment must be a power of two\n",
-      Alignment);
-}
-
-void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
-  ScopedErrorReport Report;
-  Report.append("calloc parameters overflow: count * size (%zu * %zu) cannot "
-                "be represented with type size_t\n",
-                Count, Size);
-}
-
-void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
-  ScopedErrorReport Report;
-  Report.append(
-      "invalid alignment requested in posix_memalign: %zu, alignment must be a "
-      "power of two and a multiple of sizeof(void *) == %zu\n",
-      Alignment, sizeof(void *));
-}
-
-void NORETURN reportPvallocOverflow(uptr Size) {
-  ScopedErrorReport Report;
-  Report.append("pvalloc parameters overflow: size %zu rounded up to system "
-                "page size %zu cannot be represented in type size_t\n",
-                Size, getPageSizeCached());
-}
-
-void NORETURN reportInvalidAlignedAllocAlignment(uptr Alignment, uptr Size) {
-  ScopedErrorReport Report;
-  Report.append("invalid alignment requested in aligned_alloc: %zu, alignment "
-                "must be a power of two and the requested size %zu must be a "
-                "multiple of alignment\n",
-                Alignment, Size);
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/report.h b/Telegram/ThirdParty/scudo/report.h
deleted file mode 100644
index a510fdaeb..000000000
--- a/Telegram/ThirdParty/scudo/report.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- report.h ------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_REPORT_H_
-#define SCUDO_REPORT_H_
-
-#include "internal_defs.h"
-
-namespace scudo {
-
-// Reports are *fatal* unless stated otherwise.
-
-// Generic error, adds newline to end of message.
-void NORETURN reportError(const char *Message);
-
-// Generic error, but the message is not modified.
-void NORETURN reportRawError(const char *Message);
-
-// Flags related errors.
-void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
-
-// Chunk header related errors.
-void NORETURN reportHeaderCorruption(void *Ptr);
-
-// Sanity checks related error.
-void NORETURN reportSanityCheckError(const char *Field);
-
-// Combined allocator errors.
-void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
-void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
-                                         uptr MaxSize);
-void NORETURN reportOutOfBatchClass();
-void NORETURN reportOutOfMemory(uptr RequestedSize);
-enum class AllocatorAction : u8 {
-  Recycling,
-  Deallocating,
-  Reallocating,
-  Sizing,
-};
-void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr);
-void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr);
-void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
-                                        u8 TypeA, u8 TypeB);
-void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size, uptr ExpectedSize);
-
-// C wrappers errors.
-void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment);
-void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
-void NORETURN reportCallocOverflow(uptr Count, uptr Size);
-void NORETURN reportPvallocOverflow(uptr Size);
-void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
-
-} // namespace scudo
-
-#endif // SCUDO_REPORT_H_
diff --git a/Telegram/ThirdParty/scudo/report_linux.cpp b/Telegram/ThirdParty/scudo/report_linux.cpp
deleted file mode 100644
index 6a983036e..000000000
--- a/Telegram/ThirdParty/scudo/report_linux.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-//===-- report_linux.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-#if SCUDO_LINUX || SCUDO_TRUSTY
-
-#include "common.h"
-#include "internal_defs.h"
-#include "report.h"
-#include "report_linux.h"
-#include "string_utils.h"
-
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace scudo {
-
-// Fatal internal map() error (potentially OOM related).
-void NORETURN reportMapError(uptr SizeIfOOM) {
-  char Error[128] = "Scudo ERROR: internal map failure\n";
-  if (SizeIfOOM) {
-    formatString(
-        Error, sizeof(Error),
-        "Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
-        SizeIfOOM >> 10);
-  }
-  reportRawError(Error);
-}
-
-void NORETURN reportUnmapError(uptr Addr, uptr Size) {
-  char Error[128];
-  formatString(Error, sizeof(Error),
-               "Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
-               "Size %zu\n",
-               strerror(errno), Addr, Size);
-  reportRawError(Error);
-}
-
-void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot) {
-  char Error[128];
-  formatString(
-      Error, sizeof(Error),
-      "Scudo ERROR: internal protect failure (error desc=%s) Addr 0x%zx "
-      "Size %zu Prot %x\n",
-      strerror(errno), Addr, Size, Prot);
-  reportRawError(Error);
-}
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX || SCUDO_TRUSTY
diff --git a/Telegram/ThirdParty/scudo/report_linux.h b/Telegram/ThirdParty/scudo/report_linux.h
deleted file mode 100644
index aa0bb247e..000000000
--- a/Telegram/ThirdParty/scudo/report_linux.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//===-- report_linux.h ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_REPORT_LINUX_H_
-#define SCUDO_REPORT_LINUX_H_
-
-#include "platform.h"
-
-#if SCUDO_LINUX || SCUDO_TRUSTY
-
-#include "internal_defs.h"
-
-namespace scudo {
-
-// Report a fatal error when a map call fails. SizeIfOOM shall
-// hold the requested size on an out-of-memory error, 0 otherwise.
-void NORETURN reportMapError(uptr SizeIfOOM = 0);
-
-// Report a fatal error when an unmap call fails.
-void NORETURN reportUnmapError(uptr Addr, uptr Size);
-
-// Report a fatal error when a mprotect call fails.
-void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot);
-
-} // namespace scudo
-
-#endif // SCUDO_LINUX || SCUDO_TRUSTY
-
-#endif // SCUDO_REPORT_LINUX_H_
diff --git a/Telegram/ThirdParty/scudo/secondary.h b/Telegram/ThirdParty/scudo/secondary.h
deleted file mode 100644
index f52a4188b..000000000
--- a/Telegram/ThirdParty/scudo/secondary.h
+++ /dev/null
@@ -1,708 +0,0 @@
-//===-- secondary.h ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_SECONDARY_H_
-#define SCUDO_SECONDARY_H_
-
-#include "chunk.h"
-#include "common.h"
-#include "list.h"
-#include "mem_map.h"
-#include "memtag.h"
-#include "mutex.h"
-#include "options.h"
-#include "stats.h"
-#include "string_utils.h"
-#include "thread_annotations.h"
-
-namespace scudo {
-
-// This allocator wraps the platform allocation primitives, and as such is on
-// the slower side and should preferably be used for larger sized allocations.
-// Blocks allocated will be preceded and followed by a guard page, and hold
-// their own header that is not checksummed: the guard pages and the Combined
-// header should be enough for our purpose.
-
-namespace LargeBlock {
-
-struct alignas(Max<uptr>(archSupportsMemoryTagging()
-                             ? archMemoryTagGranuleSize()
-                             : 1,
-                         1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
-  LargeBlock::Header *Prev;
-  LargeBlock::Header *Next;
-  uptr CommitBase;
-  uptr CommitSize;
-  MemMapT MemMap;
-};
-
-static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
-static_assert(!archSupportsMemoryTagging() ||
-                  sizeof(Header) % archMemoryTagGranuleSize() == 0,
-              "");
-
-constexpr uptr getHeaderSize() { return sizeof(Header); }
-
-template <typename Config> static uptr addHeaderTag(uptr Ptr) {
-  if (allocatorSupportsMemoryTagging<Config>())
-    return addFixedTag(Ptr, 1);
-  return Ptr;
-}
-
-template <typename Config> static Header *getHeader(uptr Ptr) {
-  return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
-}
-
-template <typename Config> static Header *getHeader(const void *Ptr) {
-  return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
-}
-
-} // namespace LargeBlock
-
-static inline void unmap(LargeBlock::Header *H) {
-  // Note that the `H->MapMap` is stored on the pages managed by itself. Take
-  // over the ownership before unmap() so that any operation along with unmap()
-  // won't touch inaccessible pages.
-  MemMapT MemMap = H->MemMap;
-  MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-}
-
-namespace {
-struct CachedBlock {
-  uptr CommitBase = 0;
-  uptr CommitSize = 0;
-  uptr BlockBegin = 0;
-  MemMapT MemMap = {};
-  u64 Time = 0;
-
-  bool isValid() { return CommitBase != 0; }
-
-  void invalidate() { CommitBase = 0; }
-};
-} // namespace
-
-template <typename Config> class MapAllocatorNoCache {
-public:
-  void init(UNUSED s32 ReleaseToOsInterval) {}
-  bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
-                UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
-                UNUSED bool *Zeroed) {
-    return false;
-  }
-  void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
-  bool canCache(UNUSED uptr Size) { return false; }
-  void disable() {}
-  void enable() {}
-  void releaseToOS() {}
-  void disableMemoryTagging() {}
-  void unmapTestOnly() {}
-  bool setOption(Option O, UNUSED sptr Value) {
-    if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
-        O == Option::MaxCacheEntrySize)
-      return false;
-    // Not supported by the Secondary Cache, but not an error either.
-    return true;
-  }
-
-  void getStats(UNUSED ScopedString *Str) {
-    Str->append("Secondary Cache Disabled\n");
-  }
-};
-
-static const uptr MaxUnusedCachePages = 4U;
-
-template <typename Config>
-bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
-                  uptr AllocPos, uptr Flags, MemMapT &MemMap) {
-  Flags |= MAP_RESIZABLE;
-  Flags |= MAP_ALLOWNOMEM;
-
-  const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
-  if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
-    const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
-    return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
-                        MAP_MEMTAG | Flags) &&
-           MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
-                        "scudo:secondary", Flags);
-  } else {
-    const uptr RemapFlags =
-        (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
-    return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
-  }
-}
-
-// Template specialization to avoid producing zero-length array
-template <typename T, size_t Size> class NonZeroLengthArray {
-public:
-  T &operator[](uptr Idx) { return values[Idx]; }
-
-private:
-  T values[Size];
-};
-template <typename T> class NonZeroLengthArray<T, 0> {
-public:
-  T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
-};
-
-template <typename Config> class MapAllocatorCache {
-public:
-  using CacheConfig = typename Config::Secondary::Cache;
-
-  void getStats(ScopedString *Str) {
-    ScopedLock L(Mutex);
-    uptr Integral;
-    uptr Fractional;
-    computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
-                      &Fractional);
-    Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
-                "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
-                EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
-                atomic_load_relaxed(&MaxEntrySize));
-    Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
-                "(%zu.%02zu%%)\n",
-                SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
-    for (CachedBlock Entry : Entries) {
-      if (!Entry.isValid())
-        continue;
-      Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
-                  "BlockSize: %zu %s\n",
-                  Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
-                  Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
-    }
-  }
-
-  // Ensure the default maximum specified fits the array.
-  static_assert(CacheConfig::DefaultMaxEntriesCount <=
-                    CacheConfig::EntriesArraySize,
-                "");
-
-  void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
-    DCHECK_EQ(EntriesCount, 0U);
-    setOption(Option::MaxCacheEntriesCount,
-              static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
-    setOption(Option::MaxCacheEntrySize,
-              static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
-    setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
-  }
-
-  void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
-    if (!canCache(H->CommitSize))
-      return unmap(H);
-
-    bool EntryCached = false;
-    bool EmptyCache = false;
-    const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
-    const u64 Time = getMonotonicTimeFast();
-    const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
-    CachedBlock Entry;
-    Entry.CommitBase = H->CommitBase;
-    Entry.CommitSize = H->CommitSize;
-    Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
-    Entry.MemMap = H->MemMap;
-    Entry.Time = Time;
-    if (useMemoryTagging<Config>(Options)) {
-      if (Interval == 0 && !SCUDO_FUCHSIA) {
-        // Release the memory and make it inaccessible at the same time by
-        // creating a new MAP_NOACCESS mapping on top of the existing mapping.
-        // Fuchsia does not support replacing mappings by creating a new mapping
-        // on top so we just do the two syscalls there.
-        Entry.Time = 0;
-        mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
-                             Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
-      } else {
-        Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
-                                         MAP_NOACCESS);
-      }
-    } else if (Interval == 0) {
-      Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
-      Entry.Time = 0;
-    }
-    do {
-      ScopedLock L(Mutex);
-      if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
-        // If we get here then memory tagging was disabled in between when we
-        // read Options and when we locked Mutex. We can't insert our entry into
-        // the quarantine or the cache because the permissions would be wrong so
-        // just unmap it.
-        break;
-      }
-      if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
-        QuarantinePos =
-            (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
-        if (!Quarantine[QuarantinePos].isValid()) {
-          Quarantine[QuarantinePos] = Entry;
-          return;
-        }
-        CachedBlock PrevEntry = Quarantine[QuarantinePos];
-        Quarantine[QuarantinePos] = Entry;
-        if (OldestTime == 0)
-          OldestTime = Entry.Time;
-        Entry = PrevEntry;
-      }
-      if (EntriesCount >= MaxCount) {
-        if (IsFullEvents++ == 4U)
-          EmptyCache = true;
-      } else {
-        for (u32 I = 0; I < MaxCount; I++) {
-          if (Entries[I].isValid())
-            continue;
-          if (I != 0)
-            Entries[I] = Entries[0];
-          Entries[0] = Entry;
-          EntriesCount++;
-          if (OldestTime == 0)
-            OldestTime = Entry.Time;
-          EntryCached = true;
-          break;
-        }
-      }
-    } while (0);
-    if (EmptyCache)
-      empty();
-    else if (Interval >= 0)
-      releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
-    if (!EntryCached)
-      Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
-  }
-
-  bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
-                LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
-    const uptr PageSize = getPageSizeCached();
-    const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
-    // 10% of the requested size proved to be the optimal choice for
-    // retrieving cached blocks after testing several options.
-    constexpr u32 FragmentedBytesDivisor = 10;
-    bool Found = false;
-    CachedBlock Entry;
-    uptr EntryHeaderPos = 0;
-    {
-      ScopedLock L(Mutex);
-      CallsToRetrieve++;
-      if (EntriesCount == 0)
-        return false;
-      u32 OptimalFitIndex = 0;
-      uptr MinDiff = UINTPTR_MAX;
-      for (u32 I = 0; I < MaxCount; I++) {
-        if (!Entries[I].isValid())
-          continue;
-        const uptr CommitBase = Entries[I].CommitBase;
-        const uptr CommitSize = Entries[I].CommitSize;
-        const uptr AllocPos =
-            roundDown(CommitBase + CommitSize - Size, Alignment);
-        const uptr HeaderPos = AllocPos - HeadersSize;
-        if (HeaderPos > CommitBase + CommitSize)
-          continue;
-        if (HeaderPos < CommitBase ||
-            AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
-          continue;
-        }
-        Found = true;
-        const uptr Diff = HeaderPos - CommitBase;
-        // immediately use a cached block if it's size is close enough to the
-        // requested size.
-        const uptr MaxAllowedFragmentedBytes =
-            (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
-        if (Diff <= MaxAllowedFragmentedBytes) {
-          OptimalFitIndex = I;
-          EntryHeaderPos = HeaderPos;
-          break;
-        }
-        // keep track of the smallest cached block
-        // that is greater than (AllocSize + HeaderSize)
-        if (Diff > MinDiff)
-          continue;
-        OptimalFitIndex = I;
-        MinDiff = Diff;
-        EntryHeaderPos = HeaderPos;
-      }
-      if (Found) {
-        Entry = Entries[OptimalFitIndex];
-        Entries[OptimalFitIndex].invalidate();
-        EntriesCount--;
-        SuccessfulRetrieves++;
-      }
-    }
-    if (!Found)
-      return false;
-
-    *H = reinterpret_cast<LargeBlock::Header *>(
-        LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
-    *Zeroed = Entry.Time == 0;
-    if (useMemoryTagging<Config>(Options))
-      Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
-    uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
-    if (useMemoryTagging<Config>(Options)) {
-      if (*Zeroed) {
-        storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
-                  NewBlockBegin);
-      } else if (Entry.BlockBegin < NewBlockBegin) {
-        storeTags(Entry.BlockBegin, NewBlockBegin);
-      } else {
-        storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
-      }
-    }
-    (*H)->CommitBase = Entry.CommitBase;
-    (*H)->CommitSize = Entry.CommitSize;
-    (*H)->MemMap = Entry.MemMap;
-    return true;
-  }
-
-  bool canCache(uptr Size) {
-    return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
-           Size <= atomic_load_relaxed(&MaxEntrySize);
-  }
-
-  bool setOption(Option O, sptr Value) {
-    if (O == Option::ReleaseInterval) {
-      const s32 Interval = Max(
-          Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
-          CacheConfig::MinReleaseToOsIntervalMs);
-      atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
-      return true;
-    }
-    if (O == Option::MaxCacheEntriesCount) {
-      const u32 MaxCount = static_cast<u32>(Value);
-      if (MaxCount > CacheConfig::EntriesArraySize)
-        return false;
-      atomic_store_relaxed(&MaxEntriesCount, MaxCount);
-      return true;
-    }
-    if (O == Option::MaxCacheEntrySize) {
-      atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
-      return true;
-    }
-    // Not supported by the Secondary Cache, but not an error either.
-    return true;
-  }
-
-  void releaseToOS() { releaseOlderThan(UINT64_MAX); }
-
-  void disableMemoryTagging() EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
-      if (Quarantine[I].isValid()) {
-        MemMapT &MemMap = Quarantine[I].MemMap;
-        MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-        Quarantine[I].invalidate();
-      }
-    }
-    const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
-    for (u32 I = 0; I < MaxCount; I++) {
-      if (Entries[I].isValid()) {
-        Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
-                                              Entries[I].CommitSize, 0);
-      }
-    }
-    QuarantinePos = -1U;
-  }
-
-  void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
-
-  void unmapTestOnly() { empty(); }
-
-private:
-  void empty() {
-    MemMapT MapInfo[CacheConfig::EntriesArraySize];
-    uptr N = 0;
-    {
-      ScopedLock L(Mutex);
-      for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
-        if (!Entries[I].isValid())
-          continue;
-        MapInfo[N] = Entries[I].MemMap;
-        Entries[I].invalidate();
-        N++;
-      }
-      EntriesCount = 0;
-      IsFullEvents = 0;
-    }
-    for (uptr I = 0; I < N; I++) {
-      MemMapT &MemMap = MapInfo[I];
-      MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-    }
-  }
-
-  void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
-    if (!Entry.isValid() || !Entry.Time)
-      return;
-    if (Entry.Time > Time) {
-      if (OldestTime == 0 || Entry.Time < OldestTime)
-        OldestTime = Entry.Time;
-      return;
-    }
-    Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
-    Entry.Time = 0;
-  }
-
-  void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
-      return;
-    OldestTime = 0;
-    for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
-      releaseIfOlderThan(Quarantine[I], Time);
-    for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
-      releaseIfOlderThan(Entries[I], Time);
-  }
-
-  HybridMutex Mutex;
-  u32 EntriesCount GUARDED_BY(Mutex) = 0;
-  u32 QuarantinePos GUARDED_BY(Mutex) = 0;
-  atomic_u32 MaxEntriesCount = {};
-  atomic_uptr MaxEntrySize = {};
-  u64 OldestTime GUARDED_BY(Mutex) = 0;
-  u32 IsFullEvents GUARDED_BY(Mutex) = 0;
-  atomic_s32 ReleaseToOsIntervalMs = {};
-  u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
-  u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
-
-  CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
-  NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
-      Quarantine GUARDED_BY(Mutex) = {};
-};
-
-template <typename Config> class MapAllocator {
-public:
-  void init(GlobalStats *S,
-            s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
-    DCHECK_EQ(AllocatedBytes, 0U);
-    DCHECK_EQ(FreedBytes, 0U);
-    Cache.init(ReleaseToOsInterval);
-    Stats.init();
-    if (LIKELY(S))
-      S->link(&Stats);
-  }
-
-  void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
-                 uptr *BlockEnd = nullptr,
-                 FillContentsMode FillContents = NoFill);
-
-  void deallocate(const Options &Options, void *Ptr);
-
-  static uptr getBlockEnd(void *Ptr) {
-    auto *B = LargeBlock::getHeader<Config>(Ptr);
-    return B->CommitBase + B->CommitSize;
-  }
-
-  static uptr getBlockSize(void *Ptr) {
-    return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
-  }
-
-  static constexpr uptr getHeadersSize() {
-    return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
-  }
-
-  void disable() NO_THREAD_SAFETY_ANALYSIS {
-    Mutex.lock();
-    Cache.disable();
-  }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS {
-    Cache.enable();
-    Mutex.unlock();
-  }
-
-  template <typename F> void iterateOverBlocks(F Callback) const {
-    Mutex.assertHeld();
-
-    for (const auto &H : InUseBlocks) {
-      uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
-      if (allocatorSupportsMemoryTagging<Config>())
-        Ptr = untagPointer(Ptr);
-      Callback(Ptr);
-    }
-  }
-
-  bool canCache(uptr Size) { return Cache.canCache(Size); }
-
-  bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
-
-  void releaseToOS() { Cache.releaseToOS(); }
-
-  void disableMemoryTagging() { Cache.disableMemoryTagging(); }
-
-  void unmapTestOnly() { Cache.unmapTestOnly(); }
-
-  void getStats(ScopedString *Str);
-
-private:
-  typename Config::Secondary::template CacheT<Config> Cache;
-
-  mutable HybridMutex Mutex;
-  DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
-  uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
-  uptr FreedBytes GUARDED_BY(Mutex) = 0;
-  uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
-  uptr LargestSize GUARDED_BY(Mutex) = 0;
-  u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
-  u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
-  LocalStats Stats GUARDED_BY(Mutex);
-};
-
-// As with the Primary, the size passed to this function includes any desired
-// alignment, so that the frontend can align the user allocation. The hint
-// parameter allows us to unmap spurious memory when dealing with larger
-// (greater than a page) alignments on 32-bit platforms.
-// Due to the sparsity of address space available on those platforms, requesting
-// an allocation from the Secondary with a large alignment would end up wasting
-// VA space (even though we are not committing the whole thing), hence the need
-// to trim off some of the reserved space.
-// For allocations requested with an alignment greater than or equal to a page,
-// the committed memory will amount to something close to Size - AlignmentHint
-// (pending rounding and headers).
-template <typename Config>
-void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
-                                     uptr Alignment, uptr *BlockEndPtr,
-                                     FillContentsMode FillContents) {
-  if (Options.get(OptionBit::AddLargeAllocationSlack))
-    Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
-  Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
-  const uptr PageSize = getPageSizeCached();
-
-  // Note that cached blocks may have aligned address already. Thus we simply
-  // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
-  const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
-
-  if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
-    LargeBlock::Header *H;
-    bool Zeroed;
-    if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
-                       &Zeroed)) {
-      const uptr BlockEnd = H->CommitBase + H->CommitSize;
-      if (BlockEndPtr)
-        *BlockEndPtr = BlockEnd;
-      uptr HInt = reinterpret_cast<uptr>(H);
-      if (allocatorSupportsMemoryTagging<Config>())
-        HInt = untagPointer(HInt);
-      const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
-      void *Ptr = reinterpret_cast<void *>(PtrInt);
-      if (FillContents && !Zeroed)
-        memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
-               BlockEnd - PtrInt);
-      {
-        ScopedLock L(Mutex);
-        InUseBlocks.push_back(H);
-        AllocatedBytes += H->CommitSize;
-        FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
-        NumberOfAllocs++;
-        Stats.add(StatAllocated, H->CommitSize);
-        Stats.add(StatMapped, H->MemMap.getCapacity());
-      }
-      return Ptr;
-    }
-  }
-
-  uptr RoundedSize =
-      roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
-  if (Alignment > PageSize)
-    RoundedSize += Alignment - PageSize;
-
-  ReservedMemoryT ReservedMemory;
-  const uptr MapSize = RoundedSize + 2 * PageSize;
-  if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
-                                      MAP_ALLOWNOMEM))) {
-    return nullptr;
-  }
-
-  // Take the entire ownership of reserved region.
-  MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
-                                           ReservedMemory.getCapacity());
-  uptr MapBase = MemMap.getBase();
-  uptr CommitBase = MapBase + PageSize;
-  uptr MapEnd = MapBase + MapSize;
-
-  // In the unlikely event of alignments larger than a page, adjust the amount
-  // of memory we want to commit, and trim the extra memory.
-  if (UNLIKELY(Alignment >= PageSize)) {
-    // For alignments greater than or equal to a page, the user pointer (eg: the
-    // pointer that is returned by the C or C++ allocation APIs) ends up on a
-    // page boundary , and our headers will live in the preceding page.
-    CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
-    const uptr NewMapBase = CommitBase - PageSize;
-    DCHECK_GE(NewMapBase, MapBase);
-    // We only trim the extra memory on 32-bit platforms: 64-bit platforms
-    // are less constrained memory wise, and that saves us two syscalls.
-    if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
-      MemMap.unmap(MapBase, NewMapBase - MapBase);
-      MapBase = NewMapBase;
-    }
-    const uptr NewMapEnd =
-        CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
-    DCHECK_LE(NewMapEnd, MapEnd);
-    if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
-      MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
-      MapEnd = NewMapEnd;
-    }
-  }
-
-  const uptr CommitSize = MapEnd - PageSize - CommitBase;
-  const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
-  if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
-                            MemMap)) {
-    MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-    return nullptr;
-  }
-  const uptr HeaderPos = AllocPos - getHeadersSize();
-  LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
-      LargeBlock::addHeaderTag<Config>(HeaderPos));
-  if (useMemoryTagging<Config>(Options))
-    storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
-              reinterpret_cast<uptr>(H + 1));
-  H->CommitBase = CommitBase;
-  H->CommitSize = CommitSize;
-  H->MemMap = MemMap;
-  if (BlockEndPtr)
-    *BlockEndPtr = CommitBase + CommitSize;
-  {
-    ScopedLock L(Mutex);
-    InUseBlocks.push_back(H);
-    AllocatedBytes += CommitSize;
-    FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
-    if (LargestSize < CommitSize)
-      LargestSize = CommitSize;
-    NumberOfAllocs++;
-    Stats.add(StatAllocated, CommitSize);
-    Stats.add(StatMapped, H->MemMap.getCapacity());
-  }
-  return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
-}
-
-template <typename Config>
-void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
-    EXCLUDES(Mutex) {
-  LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
-  const uptr CommitSize = H->CommitSize;
-  {
-    ScopedLock L(Mutex);
-    InUseBlocks.remove(H);
-    FreedBytes += CommitSize;
-    FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
-    NumberOfFrees++;
-    Stats.sub(StatAllocated, CommitSize);
-    Stats.sub(StatMapped, H->MemMap.getCapacity());
-  }
-  Cache.store(Options, H);
-}
-
-template <typename Config>
-void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
-  ScopedLock L(Mutex);
-  Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
-              "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
-              NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
-              FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
-              (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
-              FragmentedBytes >> 10);
-  Cache.getStats(Str);
-}
-
-} // namespace scudo
-
-#endif // SCUDO_SECONDARY_H_
diff --git a/Telegram/ThirdParty/scudo/size_class_map.h b/Telegram/ThirdParty/scudo/size_class_map.h
deleted file mode 100644
index 4138885de..000000000
--- a/Telegram/ThirdParty/scudo/size_class_map.h
+++ /dev/null
@@ -1,353 +0,0 @@
-//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_SIZE_CLASS_MAP_H_
-#define SCUDO_SIZE_CLASS_MAP_H_
-
-#include "chunk.h"
-#include "common.h"
-#include "string_utils.h"
-
-namespace scudo {
-
-inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
-  const uptr L = getMostSignificantSetBitIndex(Size);
-  const uptr LBits = (Size >> (L - LogBits)) - (1 << LogBits);
-  const uptr HBits = (L - ZeroLog) << LogBits;
-  return LBits + HBits;
-}
-
-template <typename Config> struct SizeClassMapBase {
-  static u16 getMaxCachedHint(uptr Size) {
-    DCHECK_NE(Size, 0);
-    u32 N;
-    // Force a 32-bit division if the template parameters allow for it.
-    if (Config::MaxBytesCachedLog > 31 || Config::MaxSizeLog > 31)
-      N = static_cast<u32>((1UL << Config::MaxBytesCachedLog) / Size);
-    else
-      N = (1U << Config::MaxBytesCachedLog) / static_cast<u32>(Size);
-
-    // Note that Config::MaxNumCachedHint is u16 so the result is guaranteed to
-    // fit in u16.
-    return static_cast<u16>(Max(1U, Min<u32>(Config::MaxNumCachedHint, N)));
-  }
-};
-
-// SizeClassMap maps allocation sizes into size classes and back, in an
-// efficient table-free manner.
-//
-// Class 0 is a special class that doesn't abide by the same rules as other
-// classes. The allocator uses it to hold batches.
-//
-// The other sizes are controlled by the template parameters:
-// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
-// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
-// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
-//               2^MidSizeLog bytes.
-// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
-//            eg. with NumBits==3 all size classes after 2^MidSizeLog look like
-//            0b1xx0..0 (where x is either 0 or 1).
-//
-// This class also gives a hint to a thread-caching allocator about the amount
-// of chunks that can be cached per-thread:
-// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
-// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
-template <typename Config>
-class FixedSizeClassMap : public SizeClassMapBase<Config> {
-  typedef SizeClassMapBase<Config> Base;
-
-  static const uptr MinSize = 1UL << Config::MinSizeLog;
-  static const uptr MidSize = 1UL << Config::MidSizeLog;
-  static const uptr MidClass = MidSize / MinSize;
-  static const u8 S = Config::NumBits - 1;
-  static const uptr M = (1UL << S) - 1;
-
-public:
-  static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
-
-  static const uptr MaxSize = (1UL << Config::MaxSizeLog) + Config::SizeDelta;
-  static const uptr NumClasses =
-      MidClass + ((Config::MaxSizeLog - Config::MidSizeLog) << S) + 1;
-  static_assert(NumClasses <= 256, "");
-  static const uptr LargestClassId = NumClasses - 1;
-  static const uptr BatchClassId = 0;
-
-  static uptr getSizeByClassId(uptr ClassId) {
-    DCHECK_NE(ClassId, BatchClassId);
-    if (ClassId <= MidClass)
-      return (ClassId << Config::MinSizeLog) + Config::SizeDelta;
-    ClassId -= MidClass;
-    const uptr T = MidSize << (ClassId >> S);
-    return T + (T >> S) * (ClassId & M) + Config::SizeDelta;
-  }
-
-  static u8 getSizeLSBByClassId(uptr ClassId) {
-    return u8(getLeastSignificantSetBitIndex(getSizeByClassId(ClassId)));
-  }
-
-  static constexpr bool usesCompressedLSBFormat() { return false; }
-
-  static uptr getClassIdBySize(uptr Size) {
-    if (Size <= Config::SizeDelta + (1 << Config::MinSizeLog))
-      return 1;
-    Size -= Config::SizeDelta;
-    DCHECK_LE(Size, MaxSize);
-    if (Size <= MidSize)
-      return (Size + MinSize - 1) >> Config::MinSizeLog;
-    return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
-  }
-
-  static u16 getMaxCachedHint(uptr Size) {
-    DCHECK_LE(Size, MaxSize);
-    return Base::getMaxCachedHint(Size);
-  }
-};
-
-template <typename Config>
-class TableSizeClassMap : public SizeClassMapBase<Config> {
-  typedef SizeClassMapBase<Config> Base;
-
-  static const u8 S = Config::NumBits - 1;
-  static const uptr M = (1UL << S) - 1;
-  static const uptr ClassesSize =
-      sizeof(Config::Classes) / sizeof(Config::Classes[0]);
-
-  struct SizeTable {
-    constexpr SizeTable() {
-      uptr Pos = 1 << Config::MidSizeLog;
-      uptr Inc = 1 << (Config::MidSizeLog - S);
-      for (uptr i = 0; i != getTableSize(); ++i) {
-        Pos += Inc;
-        if ((Pos & (Pos - 1)) == 0)
-          Inc *= 2;
-        Tab[i] = computeClassId(Pos + Config::SizeDelta);
-      }
-    }
-
-    constexpr static u8 computeClassId(uptr Size) {
-      for (uptr i = 0; i != ClassesSize; ++i) {
-        if (Size <= Config::Classes[i])
-          return static_cast<u8>(i + 1);
-      }
-      return static_cast<u8>(-1);
-    }
-
-    constexpr static uptr getTableSize() {
-      return (Config::MaxSizeLog - Config::MidSizeLog) << S;
-    }
-
-    u8 Tab[getTableSize()] = {};
-  };
-
-  static constexpr SizeTable SzTable = {};
-
-  struct LSBTable {
-    constexpr LSBTable() {
-      u8 Min = 255, Max = 0;
-      for (uptr I = 0; I != ClassesSize; ++I) {
-        for (u8 Bit = 0; Bit != 64; ++Bit) {
-          if (Config::Classes[I] & (1 << Bit)) {
-            Tab[I] = Bit;
-            if (Bit < Min)
-              Min = Bit;
-            if (Bit > Max)
-              Max = Bit;
-            break;
-          }
-        }
-      }
-
-      if (Max - Min > 3 || ClassesSize > 32)
-        return;
-
-      UseCompressedFormat = true;
-      CompressedMin = Min;
-      for (uptr I = 0; I != ClassesSize; ++I)
-        CompressedValue |= u64(Tab[I] - Min) << (I * 2);
-    }
-
-    u8 Tab[ClassesSize] = {};
-
-    bool UseCompressedFormat = false;
-    u8 CompressedMin = 0;
-    u64 CompressedValue = 0;
-  };
-
-  static constexpr LSBTable LTable = {};
-
-public:
-  static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
-
-  static const uptr NumClasses = ClassesSize + 1;
-  static_assert(NumClasses < 256, "");
-  static const uptr LargestClassId = NumClasses - 1;
-  static const uptr BatchClassId = 0;
-  static const uptr MaxSize = Config::Classes[LargestClassId - 1];
-
-  static uptr getSizeByClassId(uptr ClassId) {
-    return Config::Classes[ClassId - 1];
-  }
-
-  static u8 getSizeLSBByClassId(uptr ClassId) {
-    if (LTable.UseCompressedFormat)
-      return ((LTable.CompressedValue >> ((ClassId - 1) * 2)) & 3) +
-             LTable.CompressedMin;
-    else
-      return LTable.Tab[ClassId - 1];
-  }
-
-  static constexpr bool usesCompressedLSBFormat() {
-    return LTable.UseCompressedFormat;
-  }
-
-  static uptr getClassIdBySize(uptr Size) {
-    if (Size <= Config::Classes[0])
-      return 1;
-    Size -= Config::SizeDelta;
-    DCHECK_LE(Size, MaxSize);
-    if (Size <= (1 << Config::MidSizeLog))
-      return ((Size - 1) >> Config::MinSizeLog) + 1;
-    return SzTable.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
-  }
-
-  static u16 getMaxCachedHint(uptr Size) {
-    DCHECK_LE(Size, MaxSize);
-    return Base::getMaxCachedHint(Size);
-  }
-};
-
-struct DefaultSizeClassConfig {
-  static const uptr NumBits = 3;
-  static const uptr MinSizeLog = 5;
-  static const uptr MidSizeLog = 8;
-  static const uptr MaxSizeLog = 17;
-  static const u16 MaxNumCachedHint = 14;
-  static const uptr MaxBytesCachedLog = 10;
-  static const uptr SizeDelta = 0;
-};
-
-typedef FixedSizeClassMap<DefaultSizeClassConfig> DefaultSizeClassMap;
-
-struct FuchsiaSizeClassConfig {
-  static const uptr NumBits = 3;
-  static const uptr MinSizeLog = 5;
-  static const uptr MidSizeLog = 8;
-  static const uptr MaxSizeLog = 17;
-  static const u16 MaxNumCachedHint = 12;
-  static const uptr MaxBytesCachedLog = 10;
-  static const uptr SizeDelta = Chunk::getHeaderSize();
-};
-
-typedef FixedSizeClassMap<FuchsiaSizeClassConfig> FuchsiaSizeClassMap;
-
-struct AndroidSizeClassConfig {
-#if SCUDO_WORDSIZE == 64U
-  static const uptr NumBits = 7;
-  static const uptr MinSizeLog = 4;
-  static const uptr MidSizeLog = 6;
-  static const uptr MaxSizeLog = 16;
-  static const u16 MaxNumCachedHint = 13;
-  static const uptr MaxBytesCachedLog = 13;
-
-  static constexpr uptr Classes[] = {
-      0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0,
-      0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450,
-      0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210,
-      0x02d90, 0x03790, 0x04010, 0x04810, 0x05a10, 0x07310, 0x08210, 0x10010,
-  };
-  static const uptr SizeDelta = 16;
-#else
-  static const uptr NumBits = 8;
-  static const uptr MinSizeLog = 4;
-  static const uptr MidSizeLog = 7;
-  static const uptr MaxSizeLog = 16;
-  static const u16 MaxNumCachedHint = 14;
-  static const uptr MaxBytesCachedLog = 13;
-
-  static constexpr uptr Classes[] = {
-      0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090,
-      0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130,
-      0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0,
-      0x00330, 0x00370, 0x003a0, 0x00400, 0x00430, 0x004a0, 0x00530, 0x00610,
-      0x00730, 0x00840, 0x00910, 0x009c0, 0x00a60, 0x00b10, 0x00ca0, 0x00e00,
-      0x00fb0, 0x01030, 0x01130, 0x011f0, 0x01490, 0x01650, 0x01930, 0x02010,
-      0x02190, 0x02490, 0x02850, 0x02d50, 0x03010, 0x03210, 0x03c90, 0x04090,
-      0x04510, 0x04810, 0x05c10, 0x06f10, 0x07310, 0x08010, 0x0c010, 0x10010,
-  };
-  static const uptr SizeDelta = 16;
-#endif
-};
-
-typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
-
-#if SCUDO_WORDSIZE == 64U && defined(__clang__)
-static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
-#endif
-
-struct TrustySizeClassConfig {
-  static const uptr NumBits = 1;
-  static const uptr MinSizeLog = 5;
-  static const uptr MidSizeLog = 5;
-  static const uptr MaxSizeLog = 15;
-  static const u16 MaxNumCachedHint = 12;
-  static const uptr MaxBytesCachedLog = 10;
-  static const uptr SizeDelta = 0;
-};
-
-typedef FixedSizeClassMap<TrustySizeClassConfig> TrustySizeClassMap;
-
-template <typename SCMap> inline void printMap() {
-  ScopedString Buffer;
-  uptr PrevS = 0;
-  uptr TotalCached = 0;
-  for (uptr I = 0; I < SCMap::NumClasses; I++) {
-    if (I == SCMap::BatchClassId)
-      continue;
-    const uptr S = SCMap::getSizeByClassId(I);
-    const uptr D = S - PrevS;
-    const uptr P = PrevS ? (D * 100 / PrevS) : 0;
-    const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
-    const uptr Cached = SCMap::getMaxCachedHint(S) * S;
-    Buffer.append(
-        "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %u %zu; id %zu\n", I,
-        S, D, P, L, SCMap::getMaxCachedHint(S), Cached,
-        SCMap::getClassIdBySize(S));
-    TotalCached += Cached;
-    PrevS = S;
-  }
-  Buffer.append("Total Cached: %zu\n", TotalCached);
-  Buffer.output();
-}
-
-template <typename SCMap> static UNUSED void validateMap() {
-  for (uptr C = 0; C < SCMap::NumClasses; C++) {
-    if (C == SCMap::BatchClassId)
-      continue;
-    const uptr S = SCMap::getSizeByClassId(C);
-    CHECK_NE(S, 0U);
-    CHECK_EQ(SCMap::getClassIdBySize(S), C);
-    if (C < SCMap::LargestClassId)
-      CHECK_EQ(SCMap::getClassIdBySize(S + 1), C + 1);
-    CHECK_EQ(SCMap::getClassIdBySize(S - 1), C);
-    if (C - 1 != SCMap::BatchClassId)
-      CHECK_GT(SCMap::getSizeByClassId(C), SCMap::getSizeByClassId(C - 1));
-  }
-  // Do not perform the loop if the maximum size is too large.
-  if (SCMap::MaxSize > (1 << 19))
-    return;
-  for (uptr S = 1; S <= SCMap::MaxSize; S++) {
-    const uptr C = SCMap::getClassIdBySize(S);
-    CHECK_LT(C, SCMap::NumClasses);
-    CHECK_GE(SCMap::getSizeByClassId(C), S);
-    if (C - 1 != SCMap::BatchClassId)
-      CHECK_LT(SCMap::getSizeByClassId(C - 1), S);
-  }
-}
-} // namespace scudo
-
-#endif // SCUDO_SIZE_CLASS_MAP_H_
diff --git a/Telegram/ThirdParty/scudo/stack_depot.h b/Telegram/ThirdParty/scudo/stack_depot.h
deleted file mode 100644
index 12c35eb2a..000000000
--- a/Telegram/ThirdParty/scudo/stack_depot.h
+++ /dev/null
@@ -1,143 +0,0 @@
-//===-- stack_depot.h -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_STACK_DEPOT_H_
-#define SCUDO_STACK_DEPOT_H_
-
-#include "atomic_helpers.h"
-#include "mutex.h"
-
-namespace scudo {
-
-class MurMur2HashBuilder {
-  static const u32 M = 0x5bd1e995;
-  static const u32 Seed = 0x9747b28c;
-  static const u32 R = 24;
-  u32 H;
-
-public:
-  explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; }
-  void add(u32 K) {
-    K *= M;
-    K ^= K >> R;
-    K *= M;
-    H *= M;
-    H ^= K;
-  }
-  u32 get() {
-    u32 X = H;
-    X ^= X >> 13;
-    X *= M;
-    X ^= X >> 15;
-    return X;
-  }
-};
-
-class StackDepot {
-  HybridMutex RingEndMu;
-  u32 RingEnd = 0;
-
-  // This data structure stores a stack trace for each allocation and
-  // deallocation when stack trace recording is enabled, that may be looked up
-  // using a hash of the stack trace. The lower bits of the hash are an index
-  // into the Tab array, which stores an index into the Ring array where the
-  // stack traces are stored. As the name implies, Ring is a ring buffer, so a
-  // stack trace may wrap around to the start of the array.
-  //
-  // Each stack trace in Ring is prefixed by a stack trace marker consisting of
-  // a fixed 1 bit in bit 0 (this allows disambiguation between stack frames
-  // and stack trace markers in the case where instruction pointers are 4-byte
-  // aligned, as they are on arm64), the stack trace hash in bits 1-32, and the
-  // size of the stack trace in bits 33-63.
-  //
-  // The insert() function is potentially racy in its accesses to the Tab and
-  // Ring arrays, but find() is resilient to races in the sense that, barring
-  // hash collisions, it will either return the correct stack trace or no stack
-  // trace at all, even if two instances of insert() raced with one another.
-  // This is achieved by re-checking the hash of the stack trace before
-  // returning the trace.
-
-#if SCUDO_SMALL_STACK_DEPOT
-  static const uptr TabBits = 4;
-#else
-  static const uptr TabBits = 16;
-#endif
-  static const uptr TabSize = 1 << TabBits;
-  static const uptr TabMask = TabSize - 1;
-  atomic_u32 Tab[TabSize] = {};
-
-#if SCUDO_SMALL_STACK_DEPOT
-  static const uptr RingBits = 4;
-#else
-  static const uptr RingBits = 19;
-#endif
-  static const uptr RingSize = 1 << RingBits;
-  static const uptr RingMask = RingSize - 1;
-  atomic_u64 Ring[RingSize] = {};
-
-public:
-  // Insert hash of the stack trace [Begin, End) into the stack depot, and
-  // return the hash.
-  u32 insert(uptr *Begin, uptr *End) {
-    MurMur2HashBuilder B;
-    for (uptr *I = Begin; I != End; ++I)
-      B.add(u32(*I) >> 2);
-    u32 Hash = B.get();
-
-    u32 Pos = Hash & TabMask;
-    u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
-    u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
-    u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1;
-    if (Entry == Id)
-      return Hash;
-
-    ScopedLock Lock(RingEndMu);
-    RingPos = RingEnd;
-    atomic_store_relaxed(&Tab[Pos], RingPos);
-    atomic_store_relaxed(&Ring[RingPos], Id);
-    for (uptr *I = Begin; I != End; ++I) {
-      RingPos = (RingPos + 1) & RingMask;
-      atomic_store_relaxed(&Ring[RingPos], *I);
-    }
-    RingEnd = (RingPos + 1) & RingMask;
-    return Hash;
-  }
-
-  // Look up a stack trace by hash. Returns true if successful. The trace may be
-  // accessed via operator[] passing indexes between *RingPosPtr and
-  // *RingPosPtr + *SizePtr.
-  bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const {
-    u32 Pos = Hash & TabMask;
-    u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
-    if (RingPos >= RingSize)
-      return false;
-    u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
-    u64 HashWithTagBit = (u64(Hash) << 1) | 1;
-    if ((Entry & 0x1ffffffff) != HashWithTagBit)
-      return false;
-    u32 Size = u32(Entry >> 33);
-    if (Size >= RingSize)
-      return false;
-    *RingPosPtr = (RingPos + 1) & RingMask;
-    *SizePtr = Size;
-    MurMur2HashBuilder B;
-    for (uptr I = 0; I != Size; ++I) {
-      RingPos = (RingPos + 1) & RingMask;
-      B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2);
-    }
-    return B.get() == Hash;
-  }
-
-  u64 operator[](uptr RingPos) const {
-    return atomic_load_relaxed(&Ring[RingPos & RingMask]);
-  }
-};
-
-} // namespace scudo
-
-#endif // SCUDO_STACK_DEPOT_H_
diff --git a/Telegram/ThirdParty/scudo/stats.h b/Telegram/ThirdParty/scudo/stats.h
deleted file mode 100644
index 658b75863..000000000
--- a/Telegram/ThirdParty/scudo/stats.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//===-- stats.h -------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_STATS_H_
-#define SCUDO_STATS_H_
-
-#include "atomic_helpers.h"
-#include "list.h"
-#include "mutex.h"
-#include "thread_annotations.h"
-
-#include <string.h>
-
-namespace scudo {
-
-// Memory allocator statistics
-enum StatType { StatAllocated, StatFree, StatMapped, StatCount };
-
-typedef uptr StatCounters[StatCount];
-
-// Per-thread stats, live in per-thread cache. We use atomics so that the
-// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
-// lock, because those are expensive operations , and we only care for the stats
-// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
-// LocalStats::add'ing, this is OK, we will still get a meaningful number.
-class LocalStats {
-public:
-  void init() {
-    for (uptr I = 0; I < StatCount; I++)
-      DCHECK_EQ(get(static_cast<StatType>(I)), 0U);
-  }
-
-  void add(StatType I, uptr V) {
-    V += atomic_load_relaxed(&StatsArray[I]);
-    atomic_store_relaxed(&StatsArray[I], V);
-  }
-
-  void sub(StatType I, uptr V) {
-    V = atomic_load_relaxed(&StatsArray[I]) - V;
-    atomic_store_relaxed(&StatsArray[I], V);
-  }
-
-  void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
-
-  uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
-
-  LocalStats *Next = nullptr;
-  LocalStats *Prev = nullptr;
-
-private:
-  atomic_uptr StatsArray[StatCount] = {};
-};
-
-// Global stats, used for aggregation and querying.
-class GlobalStats : public LocalStats {
-public:
-  void init() { LocalStats::init(); }
-
-  void link(LocalStats *S) EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    StatsList.push_back(S);
-  }
-
-  void unlink(LocalStats *S) EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    StatsList.remove(S);
-    for (uptr I = 0; I < StatCount; I++)
-      add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
-  }
-
-  void get(uptr *S) const EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    for (uptr I = 0; I < StatCount; I++)
-      S[I] = LocalStats::get(static_cast<StatType>(I));
-    for (const auto &Stats : StatsList) {
-      for (uptr I = 0; I < StatCount; I++)
-        S[I] += Stats.get(static_cast<StatType>(I));
-    }
-    // All stats must be non-negative.
-    for (uptr I = 0; I < StatCount; I++)
-      S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
-  }
-
-  void lock() ACQUIRE(Mutex) { Mutex.lock(); }
-  void unlock() RELEASE(Mutex) { Mutex.unlock(); }
-
-  void disable() ACQUIRE(Mutex) { lock(); }
-  void enable() RELEASE(Mutex) { unlock(); }
-
-private:
-  mutable HybridMutex Mutex;
-  DoublyLinkedList<LocalStats> StatsList GUARDED_BY(Mutex);
-};
-
-} // namespace scudo
-
-#endif // SCUDO_STATS_H_
diff --git a/Telegram/ThirdParty/scudo/string_utils.cpp b/Telegram/ThirdParty/scudo/string_utils.cpp
deleted file mode 100644
index d4e4e3bec..000000000
--- a/Telegram/ThirdParty/scudo/string_utils.cpp
+++ /dev/null
@@ -1,277 +0,0 @@
-//===-- string_utils.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "string_utils.h"
-#include "common.h"
-
-#include <stdarg.h>
-#include <string.h>
-
-namespace scudo {
-
-static int appendChar(char **Buffer, const char *BufferEnd, char C) {
-  if (*Buffer < BufferEnd) {
-    **Buffer = C;
-    (*Buffer)++;
-  }
-  return 1;
-}
-
-// Appends number in a given Base to buffer. If its length is less than
-// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
-// on the value of |PadWithZero|.
-static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
-                        u8 Base, u8 MinNumberLength, bool PadWithZero,
-                        bool Negative, bool Upper) {
-  constexpr uptr MaxLen = 30;
-  RAW_CHECK(Base == 10 || Base == 16);
-  RAW_CHECK(Base == 10 || !Negative);
-  RAW_CHECK(AbsoluteValue || !Negative);
-  RAW_CHECK(MinNumberLength < MaxLen);
-  int Res = 0;
-  if (Negative && MinNumberLength)
-    --MinNumberLength;
-  if (Negative && PadWithZero)
-    Res += appendChar(Buffer, BufferEnd, '-');
-  uptr NumBuffer[MaxLen];
-  int Pos = 0;
-  do {
-    RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
-                  "appendNumber buffer overflow");
-    NumBuffer[Pos++] = static_cast<uptr>(AbsoluteValue % Base);
-    AbsoluteValue /= Base;
-  } while (AbsoluteValue > 0);
-  if (Pos < MinNumberLength) {
-    memset(&NumBuffer[Pos], 0,
-           sizeof(NumBuffer[0]) * static_cast<uptr>(MinNumberLength - Pos));
-    Pos = MinNumberLength;
-  }
-  RAW_CHECK(Pos > 0);
-  Pos--;
-  for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
-    char c = (PadWithZero || Pos == 0) ? '0' : ' ';
-    Res += appendChar(Buffer, BufferEnd, c);
-  }
-  if (Negative && !PadWithZero)
-    Res += appendChar(Buffer, BufferEnd, '-');
-  for (; Pos >= 0; Pos--) {
-    char Digit = static_cast<char>(NumBuffer[Pos]);
-    Digit = static_cast<char>((Digit < 10) ? '0' + Digit
-                                           : (Upper ? 'A' : 'a') + Digit - 10);
-    Res += appendChar(Buffer, BufferEnd, Digit);
-  }
-  return Res;
-}
-
-static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
-                          u8 Base, u8 MinNumberLength, bool PadWithZero,
-                          bool Upper) {
-  return appendNumber(Buffer, BufferEnd, Num, Base, MinNumberLength,
-                      PadWithZero, /*Negative=*/false, Upper);
-}
-
-static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
-                               u8 MinNumberLength, bool PadWithZero) {
-  const bool Negative = (Num < 0);
-  const u64 UnsignedNum = (Num == INT64_MIN)
-                              ? static_cast<u64>(INT64_MAX) + 1
-                              : static_cast<u64>(Negative ? -Num : Num);
-  return appendNumber(Buffer, BufferEnd, UnsignedNum, 10, MinNumberLength,
-                      PadWithZero, Negative, /*Upper=*/false);
-}
-
-// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
-// interpret Width == 0 as "no Width requested":
-// Width == 0 - no Width requested
-// Width  < 0 - left-justify S within and pad it to -Width chars, if necessary
-// Width  > 0 - right-justify S, not implemented yet
-static int appendString(char **Buffer, const char *BufferEnd, int Width,
-                        int MaxChars, const char *S) {
-  if (!S)
-    S = "<null>";
-  int Res = 0;
-  for (; *S; S++) {
-    if (MaxChars >= 0 && Res >= MaxChars)
-      break;
-    Res += appendChar(Buffer, BufferEnd, *S);
-  }
-  // Only the left justified strings are supported.
-  while (Width < -Res)
-    Res += appendChar(Buffer, BufferEnd, ' ');
-  return Res;
-}
-
-static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
-  int Res = 0;
-  Res += appendString(Buffer, BufferEnd, 0, -1, "0x");
-  Res += appendUnsigned(Buffer, BufferEnd, ptr_value, 16,
-                        SCUDO_POINTER_FORMAT_LENGTH, /*PadWithZero=*/true,
-                        /*Upper=*/false);
-  return Res;
-}
-
-static int formatString(char *Buffer, uptr BufferLength, const char *Format,
-                        va_list Args) {
-  static const char *PrintfFormatsHelp =
-      "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
-      "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
-  RAW_CHECK(Format);
-  RAW_CHECK(BufferLength > 0);
-  const char *BufferEnd = &Buffer[BufferLength - 1];
-  const char *Cur = Format;
-  int Res = 0;
-  for (; *Cur; Cur++) {
-    if (*Cur != '%') {
-      Res += appendChar(&Buffer, BufferEnd, *Cur);
-      continue;
-    }
-    Cur++;
-    const bool LeftJustified = *Cur == '-';
-    if (LeftJustified)
-      Cur++;
-    bool HaveWidth = (*Cur >= '0' && *Cur <= '9');
-    const bool PadWithZero = (*Cur == '0');
-    u8 Width = 0;
-    if (HaveWidth) {
-      while (*Cur >= '0' && *Cur <= '9')
-        Width = static_cast<u8>(Width * 10 + *Cur++ - '0');
-    }
-    const bool HavePrecision = (Cur[0] == '.' && Cur[1] == '*');
-    int Precision = -1;
-    if (HavePrecision) {
-      Cur += 2;
-      Precision = va_arg(Args, int);
-    }
-    const bool HaveZ = (*Cur == 'z');
-    Cur += HaveZ;
-    const bool HaveLL = !HaveZ && (Cur[0] == 'l' && Cur[1] == 'l');
-    Cur += HaveLL * 2;
-    s64 DVal;
-    u64 UVal;
-    const bool HaveLength = HaveZ || HaveLL;
-    const bool HaveFlags = HaveWidth || HaveLength;
-    // At the moment only %s supports precision and left-justification.
-    CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
-    switch (*Cur) {
-    case 'd': {
-      DVal = HaveLL  ? va_arg(Args, s64)
-             : HaveZ ? va_arg(Args, sptr)
-                     : va_arg(Args, int);
-      Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
-      break;
-    }
-    case 'u':
-    case 'x':
-    case 'X': {
-      UVal = HaveLL  ? va_arg(Args, u64)
-             : HaveZ ? va_arg(Args, uptr)
-                     : va_arg(Args, unsigned);
-      const bool Upper = (*Cur == 'X');
-      Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
-                            Width, PadWithZero, Upper);
-      break;
-    }
-    case 'p': {
-      RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
-      Res += appendPointer(&Buffer, BufferEnd, va_arg(Args, uptr));
-      break;
-    }
-    case 's': {
-      RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
-      // Only left-justified Width is supported.
-      CHECK(!HaveWidth || LeftJustified);
-      Res += appendString(&Buffer, BufferEnd, LeftJustified ? -Width : Width,
-                          Precision, va_arg(Args, char *));
-      break;
-    }
-    case 'c': {
-      RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
-      Res +=
-          appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
-      break;
-    }
-    // In Scudo, `s64`/`u64` are supposed to use `lld` and `llu` respectively.
-    // However, `-Wformat` doesn't know we have a different parser for those
-    // placeholders and it keeps complaining the type mismatch on 64-bit
-    // platform which uses `ld`/`lu` for `s64`/`u64`. Therefore, in order to
-    // silence the warning, we turn to use `PRId64`/`PRIu64` for printing
-    // `s64`/`u64` and handle the `ld`/`lu` here.
-    case 'l': {
-      ++Cur;
-      RAW_CHECK(*Cur == 'd' || *Cur == 'u');
-
-      if (*Cur == 'd') {
-        DVal = va_arg(Args, s64);
-        Res +=
-            appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
-      } else {
-        UVal = va_arg(Args, u64);
-        Res += appendUnsigned(&Buffer, BufferEnd, UVal, 10, Width, PadWithZero,
-                              false);
-      }
-
-      break;
-    }
-    case '%': {
-      RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
-      Res += appendChar(&Buffer, BufferEnd, '%');
-      break;
-    }
-    default: {
-      RAW_CHECK_MSG(false, PrintfFormatsHelp);
-    }
-    }
-  }
-  RAW_CHECK(Buffer <= BufferEnd);
-  appendChar(&Buffer, BufferEnd + 1, '\0');
-  return Res;
-}
-
-int formatString(char *Buffer, uptr BufferLength, const char *Format, ...) {
-  va_list Args;
-  va_start(Args, Format);
-  int Res = formatString(Buffer, BufferLength, Format, Args);
-  va_end(Args);
-  return Res;
-}
-
-void ScopedString::vappend(const char *Format, va_list Args) {
-  va_list ArgsCopy;
-  va_copy(ArgsCopy, Args);
-  // formatString doesn't currently support a null buffer or zero buffer length,
-  // so in order to get the resulting formatted string length, we use a one-char
-  // buffer.
-  char C[1];
-  const uptr AdditionalLength =
-      static_cast<uptr>(formatString(C, sizeof(C), Format, Args)) + 1;
-  const uptr Length = length();
-  String.resize(Length + AdditionalLength);
-  const uptr FormattedLength = static_cast<uptr>(formatString(
-      String.data() + Length, String.size() - Length, Format, ArgsCopy));
-  RAW_CHECK(data()[length()] == '\0');
-  RAW_CHECK(FormattedLength + 1 == AdditionalLength);
-  va_end(ArgsCopy);
-}
-
-void ScopedString::append(const char *Format, ...) {
-  va_list Args;
-  va_start(Args, Format);
-  vappend(Format, Args);
-  va_end(Args);
-}
-
-void Printf(const char *Format, ...) {
-  va_list Args;
-  va_start(Args, Format);
-  ScopedString Msg;
-  Msg.vappend(Format, Args);
-  outputRaw(Msg.data());
-  va_end(Args);
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/string_utils.h b/Telegram/ThirdParty/scudo/string_utils.h
deleted file mode 100644
index a4cab5268..000000000
--- a/Telegram/ThirdParty/scudo/string_utils.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- string_utils.h ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_STRING_UTILS_H_
-#define SCUDO_STRING_UTILS_H_
-
-#include "internal_defs.h"
-#include "vector.h"
-
-#include <stdarg.h>
-
-namespace scudo {
-
-class ScopedString {
-public:
-  explicit ScopedString() { String.push_back('\0'); }
-  uptr length() { return String.size() - 1; }
-  const char *data() { return String.data(); }
-  void clear() {
-    String.clear();
-    String.push_back('\0');
-  }
-  void vappend(const char *Format, va_list Args);
-  void append(const char *Format, ...) FORMAT(2, 3);
-  void output() const { outputRaw(String.data()); }
-  void reserve(size_t Size) { String.reserve(Size + 1); }
-
-private:
-  Vector<char> String;
-};
-
-int formatString(char *Buffer, uptr BufferLength, const char *Format, ...)
-    FORMAT(3, 4);
-void Printf(const char *Format, ...) FORMAT(1, 2);
-
-} // namespace scudo
-
-#endif // SCUDO_STRING_UTILS_H_
diff --git a/Telegram/ThirdParty/scudo/tests/CMakeLists.txt b/Telegram/ThirdParty/scudo/tests/CMakeLists.txt
deleted file mode 100644
index c6b6a1cb5..000000000
--- a/Telegram/ThirdParty/scudo/tests/CMakeLists.txt
+++ /dev/null
@@ -1,144 +0,0 @@
-include_directories(..)
-
-add_custom_target(ScudoUnitTests)
-set_target_properties(ScudoUnitTests PROPERTIES
-  FOLDER "Compiler-RT Tests")
-
-set(SCUDO_UNITTEST_CFLAGS
-  ${COMPILER_RT_UNITTEST_CFLAGS}
-  ${COMPILER_RT_GTEST_CFLAGS}
-  ${SANITIZER_TEST_CXX_CFLAGS}
-  -I${COMPILER_RT_SOURCE_DIR}/include
-  -I${COMPILER_RT_SOURCE_DIR}/lib
-  -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone
-  -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone/include
-  -DGTEST_HAS_RTTI=0
-  -g
-  # Extra flags for the C++ tests
-  -Wconversion
-  # TODO(kostyak): find a way to make -fsized-deallocation work
-  -Wno-mismatched-new-delete)
-
-if(COMPILER_RT_DEBUG)
-  list(APPEND SCUDO_UNITTEST_CFLAGS -DSCUDO_DEBUG=1 -DSCUDO_ENABLE_HOOKS=1)
-  if (NOT FUCHSIA)
-    list(APPEND SCUDO_UNITTEST_CFLAGS -DSCUDO_ENABLE_HOOKS_TESTS=1)
-  endif()
-endif()
-
-if(ANDROID)
-  list(APPEND SCUDO_UNITTEST_CFLAGS -fno-emulated-tls)
-endif()
-
-if (COMPILER_RT_HAS_GWP_ASAN)
-  list(APPEND SCUDO_UNITTEST_CFLAGS -DGWP_ASAN_HOOKS -fno-omit-frame-pointer
-       -mno-omit-leaf-frame-pointer)
-endif()
-
-append_list_if(COMPILER_RT_HAS_WTHREAD_SAFETY_FLAG -Werror=thread-safety
-  SCUDO_UNITTEST_CFLAGS)
-
-set(SCUDO_TEST_ARCH ${SCUDO_STANDALONE_SUPPORTED_ARCH})
-
-# gtests requires c++
-set(SCUDO_UNITTEST_LINK_FLAGS
-  ${COMPILER_RT_UNITTEST_LINK_FLAGS}
-  ${COMPILER_RT_UNWINDER_LINK_LIBS}
-  ${SANITIZER_TEST_CXX_LIBRARIES})
-list(APPEND SCUDO_UNITTEST_LINK_FLAGS -pthread -no-pie)
-# Linking against libatomic is required with some compilers
-check_library_exists(atomic __atomic_load_8 "" COMPILER_RT_HAS_LIBATOMIC)
-if (COMPILER_RT_HAS_LIBATOMIC)
-  list(APPEND SCUDO_UNITTEST_LINK_FLAGS -latomic)
-endif()
-
-set(SCUDO_TEST_HEADERS
-  scudo_unit_test.h
-  )
-foreach (header ${SCUDO_HEADERS})
-  list(APPEND SCUDO_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
-endforeach()
-
-macro(add_scudo_unittest testname)
-  cmake_parse_arguments(TEST "" "" "SOURCES;ADDITIONAL_RTOBJECTS" ${ARGN})
-  if (COMPILER_RT_HAS_GWP_ASAN)
-    list(APPEND TEST_ADDITIONAL_RTOBJECTS
-         RTGwpAsan RTGwpAsanBacktraceLibc RTGwpAsanSegvHandler)
-  endif()
-
-  if(COMPILER_RT_HAS_SCUDO_STANDALONE)
-    foreach(arch ${SCUDO_TEST_ARCH})
-      # Additional runtime objects get added along RTScudoStandalone
-      set(SCUDO_TEST_RTOBJECTS $<TARGET_OBJECTS:RTScudoStandalone.${arch}>)
-      foreach(rtobject ${TEST_ADDITIONAL_RTOBJECTS})
-        list(APPEND SCUDO_TEST_RTOBJECTS $<TARGET_OBJECTS:${rtobject}.${arch}>)
-      endforeach()
-      # Add the static runtime library made of all the runtime objects
-      set(RUNTIME RT${testname}.${arch})
-      add_library(${RUNTIME} STATIC ${SCUDO_TEST_RTOBJECTS})
-      set(ScudoUnitTestsObjects)
-      generate_compiler_rt_tests(ScudoUnitTestsObjects ScudoUnitTests
-        "${testname}-${arch}-Test" ${arch}
-        SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}
-        COMPILE_DEPS ${SCUDO_TEST_HEADERS}
-        DEPS llvm_gtest scudo_standalone
-        RUNTIME ${RUNTIME}
-        CFLAGS ${SCUDO_UNITTEST_CFLAGS}
-        LINK_FLAGS ${SCUDO_UNITTEST_LINK_FLAGS})
-    endforeach()
-  endif()
-endmacro()
-
-set(SCUDO_UNIT_TEST_SOURCES
-  atomic_test.cpp
-  bytemap_test.cpp
-  checksum_test.cpp
-  chunk_test.cpp
-  combined_test.cpp
-  common_test.cpp
-  condition_variable_test.cpp
-  flags_test.cpp
-  list_test.cpp
-  map_test.cpp
-  memtag_test.cpp
-  mutex_test.cpp
-  primary_test.cpp
-  quarantine_test.cpp
-  release_test.cpp
-  report_test.cpp
-  secondary_test.cpp
-  size_class_map_test.cpp
-  stats_test.cpp
-  strings_test.cpp
-  timing_test.cpp
-  tsd_test.cpp
-  vector_test.cpp
-  scudo_unit_test_main.cpp
-  )
-
-# Temporary hack until LLVM libc supports inttypes.h print format macros
-# See: https://github.com/llvm/llvm-project/issues/63317#issuecomment-1591906241
-if(LLVM_LIBC_INCLUDE_SCUDO)
-  list(REMOVE_ITEM SCUDO_UNIT_TEST_SOURCES timing_test.cpp)
-endif()
-
-add_scudo_unittest(ScudoUnitTest
-  SOURCES ${SCUDO_UNIT_TEST_SOURCES})
-
-set(SCUDO_C_UNIT_TEST_SOURCES
-  wrappers_c_test.cpp
-  scudo_unit_test_main.cpp
-  )
-
-add_scudo_unittest(ScudoCUnitTest
-  SOURCES ${SCUDO_C_UNIT_TEST_SOURCES}
-  ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers)
-
-set(SCUDO_CXX_UNIT_TEST_SOURCES
-  wrappers_cpp_test.cpp
-  scudo_unit_test_main.cpp
-  )
-
-add_scudo_unittest(ScudoCxxUnitTest
-  SOURCES ${SCUDO_CXX_UNIT_TEST_SOURCES}
-  ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers RTScudoStandaloneCxxWrappers)
diff --git a/Telegram/ThirdParty/scudo/tests/atomic_test.cpp b/Telegram/ThirdParty/scudo/tests/atomic_test.cpp
deleted file mode 100644
index e90a642fd..000000000
--- a/Telegram/ThirdParty/scudo/tests/atomic_test.cpp
+++ /dev/null
@@ -1,101 +0,0 @@
-//===-- atomic_test.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "atomic_helpers.h"
-
-namespace scudo {
-
-template <typename T> struct ValAndMagic {
-  typename T::Type Magic0;
-  T A;
-  typename T::Type Magic1;
-
-  static ValAndMagic<T> *Sink;
-};
-
-template <typename T> ValAndMagic<T> *ValAndMagic<T>::Sink;
-
-template <typename T, memory_order LoadMO, memory_order StoreMO>
-void checkStoreLoad() {
-  typedef typename T::Type Type;
-  ValAndMagic<T> Val;
-  // Prevent the compiler from scalarizing the struct.
-  ValAndMagic<T>::Sink = &Val;
-  // Ensure that surrounding memory is not overwritten.
-  Val.Magic0 = Val.Magic1 = (Type)-3;
-  for (u64 I = 0; I < 100; I++) {
-    // Generate A value that occupies all bytes of the variable.
-    u64 V = I;
-    V |= V << 8;
-    V |= V << 16;
-    V |= V << 32;
-    Val.A.ValDoNotUse = (Type)V;
-    EXPECT_EQ(atomic_load(&Val.A, LoadMO), (Type)V);
-    Val.A.ValDoNotUse = (Type)-1;
-    atomic_store(&Val.A, (Type)V, StoreMO);
-    EXPECT_EQ(Val.A.ValDoNotUse, (Type)V);
-  }
-  EXPECT_EQ(Val.Magic0, (Type)-3);
-  EXPECT_EQ(Val.Magic1, (Type)-3);
-}
-
-TEST(ScudoAtomicTest, AtomicStoreLoad) {
-  checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_relaxed>();
-  checkStoreLoad<atomic_u8, memory_order_consume, memory_order_relaxed>();
-  checkStoreLoad<atomic_u8, memory_order_acquire, memory_order_relaxed>();
-  checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_release>();
-  checkStoreLoad<atomic_u8, memory_order_seq_cst, memory_order_seq_cst>();
-
-  checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_relaxed>();
-  checkStoreLoad<atomic_u16, memory_order_consume, memory_order_relaxed>();
-  checkStoreLoad<atomic_u16, memory_order_acquire, memory_order_relaxed>();
-  checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_release>();
-  checkStoreLoad<atomic_u16, memory_order_seq_cst, memory_order_seq_cst>();
-
-  checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_relaxed>();
-  checkStoreLoad<atomic_u32, memory_order_consume, memory_order_relaxed>();
-  checkStoreLoad<atomic_u32, memory_order_acquire, memory_order_relaxed>();
-  checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_release>();
-  checkStoreLoad<atomic_u32, memory_order_seq_cst, memory_order_seq_cst>();
-
-  checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_relaxed>();
-  checkStoreLoad<atomic_u64, memory_order_consume, memory_order_relaxed>();
-  checkStoreLoad<atomic_u64, memory_order_acquire, memory_order_relaxed>();
-  checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_release>();
-  checkStoreLoad<atomic_u64, memory_order_seq_cst, memory_order_seq_cst>();
-
-  checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_relaxed>();
-  checkStoreLoad<atomic_uptr, memory_order_consume, memory_order_relaxed>();
-  checkStoreLoad<atomic_uptr, memory_order_acquire, memory_order_relaxed>();
-  checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_release>();
-  checkStoreLoad<atomic_uptr, memory_order_seq_cst, memory_order_seq_cst>();
-}
-
-template <typename T> void checkAtomicCompareExchange() {
-  typedef typename T::Type Type;
-  Type OldVal = 42;
-  Type NewVal = 24;
-  Type V = OldVal;
-  EXPECT_TRUE(atomic_compare_exchange_strong(reinterpret_cast<T *>(&V), &OldVal,
-                                             NewVal, memory_order_relaxed));
-  EXPECT_FALSE(atomic_compare_exchange_strong(
-      reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
-  EXPECT_EQ(NewVal, OldVal);
-}
-
-TEST(ScudoAtomicTest, AtomicCompareExchangeTest) {
-  checkAtomicCompareExchange<atomic_u8>();
-  checkAtomicCompareExchange<atomic_u16>();
-  checkAtomicCompareExchange<atomic_u32>();
-  checkAtomicCompareExchange<atomic_u64>();
-  checkAtomicCompareExchange<atomic_uptr>();
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/tests/bytemap_test.cpp b/Telegram/ThirdParty/scudo/tests/bytemap_test.cpp
deleted file mode 100644
index 4034b108f..000000000
--- a/Telegram/ThirdParty/scudo/tests/bytemap_test.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//===-- bytemap_test.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "bytemap.h"
-
-#include <pthread.h>
-#include <string.h>
-
-template <typename T> void testMap(T &Map, scudo::uptr Size) {
-  Map.init();
-  for (scudo::uptr I = 0; I < Size; I += 7)
-    Map.set(I, (I % 100) + 1);
-  for (scudo::uptr J = 0; J < Size; J++) {
-    if (J % 7)
-      EXPECT_EQ(Map[J], 0);
-    else
-      EXPECT_EQ(Map[J], (J % 100) + 1);
-  }
-}
-
-TEST(ScudoByteMapTest, FlatByteMap) {
-  const scudo::uptr Size = 1U << 10;
-  scudo::FlatByteMap<Size> Map;
-  testMap(Map, Size);
-  Map.unmapTestOnly();
-}
diff --git a/Telegram/ThirdParty/scudo/tests/checksum_test.cpp b/Telegram/ThirdParty/scudo/tests/checksum_test.cpp
deleted file mode 100644
index c5d5b7379..000000000
--- a/Telegram/ThirdParty/scudo/tests/checksum_test.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-//===-- checksum_test.cpp ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "checksum.h"
-
-#include <string.h>
-
-static scudo::u16 computeSoftwareChecksum(scudo::u32 Seed, scudo::uptr *Array,
-                                          scudo::uptr ArraySize) {
-  scudo::u16 Checksum = static_cast<scudo::u16>(Seed & 0xffff);
-  for (scudo::uptr I = 0; I < ArraySize; I++)
-    Checksum = scudo::computeBSDChecksum(Checksum, Array[I]);
-  return Checksum;
-}
-
-static scudo::u16 computeHardwareChecksum(scudo::u32 Seed, scudo::uptr *Array,
-                                          scudo::uptr ArraySize) {
-  scudo::u32 Crc = Seed;
-  for (scudo::uptr I = 0; I < ArraySize; I++)
-    Crc = scudo::computeHardwareCRC32(Crc, Array[I]);
-  return static_cast<scudo::u16>((Crc & 0xffff) ^ (Crc >> 16));
-}
-
-typedef scudo::u16 (*ComputeChecksum)(scudo::u32, scudo::uptr *, scudo::uptr);
-
-// This verifies that flipping bits in the data being checksummed produces a
-// different checksum. We do not use random data to avoid flakyness.
-template <ComputeChecksum F> static void verifyChecksumFunctionBitFlip() {
-  scudo::uptr Array[sizeof(scudo::u64) / sizeof(scudo::uptr)];
-  const scudo::uptr ArraySize = ARRAY_SIZE(Array);
-  memset(Array, 0xaa, sizeof(Array));
-  const scudo::u32 Seed = 0x41424343U;
-  const scudo::u16 Reference = F(Seed, Array, ArraySize);
-  scudo::u8 IdenticalChecksums = 0;
-  for (scudo::uptr I = 0; I < ArraySize; I++) {
-    for (scudo::uptr J = 0; J < SCUDO_WORDSIZE; J++) {
-      Array[I] ^= scudo::uptr{1} << J;
-      if (F(Seed, Array, ArraySize) == Reference)
-        IdenticalChecksums++;
-      Array[I] ^= scudo::uptr{1} << J;
-    }
-  }
-  // Allow for a couple of identical checksums over the whole set of flips.
-  EXPECT_LE(IdenticalChecksums, 2);
-}
-
-TEST(ScudoChecksumTest, ChecksumFunctions) {
-  verifyChecksumFunctionBitFlip<computeSoftwareChecksum>();
-  if (&scudo::computeHardwareCRC32 && scudo::hasHardwareCRC32())
-    verifyChecksumFunctionBitFlip<computeHardwareChecksum>();
-}
diff --git a/Telegram/ThirdParty/scudo/tests/chunk_test.cpp b/Telegram/ThirdParty/scudo/tests/chunk_test.cpp
deleted file mode 100644
index 1b2c1eb5a..000000000
--- a/Telegram/ThirdParty/scudo/tests/chunk_test.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-//===-- chunk_test.cpp ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "chunk.h"
-
-#include <stdlib.h>
-
-static constexpr scudo::uptr HeaderSize = scudo::Chunk::getHeaderSize();
-static constexpr scudo::u32 Cookie = 0x41424344U;
-static constexpr scudo::u32 InvalidCookie = 0x11223344U;
-
-static void initChecksum(void) {
-  if (&scudo::computeHardwareCRC32 && scudo::hasHardwareCRC32())
-    scudo::HashAlgorithm = scudo::Checksum::HardwareCRC32;
-}
-
-TEST(ScudoChunkDeathTest, ChunkBasic) {
-  initChecksum();
-  const scudo::uptr Size = 0x100U;
-  scudo::Chunk::UnpackedHeader Header = {};
-  void *Block = malloc(HeaderSize + Size);
-  void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
-                                     HeaderSize);
-  scudo::Chunk::storeHeader(Cookie, P, &Header);
-  memset(P, 'A', Size);
-  scudo::Chunk::loadHeader(Cookie, P, &Header);
-  EXPECT_TRUE(scudo::Chunk::isValid(Cookie, P, &Header));
-  EXPECT_FALSE(scudo::Chunk::isValid(InvalidCookie, P, &Header));
-  EXPECT_DEATH(scudo::Chunk::loadHeader(InvalidCookie, P, &Header), "");
-  free(Block);
-}
-
-TEST(ScudoChunkDeathTest, CorruptHeader) {
-  initChecksum();
-  const scudo::uptr Size = 0x100U;
-  scudo::Chunk::UnpackedHeader Header = {};
-  void *Block = malloc(HeaderSize + Size);
-  void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
-                                     HeaderSize);
-  scudo::Chunk::storeHeader(Cookie, P, &Header);
-  memset(P, 'A', Size);
-  scudo::Chunk::loadHeader(Cookie, P, &Header);
-  // Simulate a couple of corrupted bits per byte of header data.
-  for (scudo::uptr I = 0; I < sizeof(scudo::Chunk::PackedHeader); I++) {
-    *(reinterpret_cast<scudo::u8 *>(Block) + I) ^= 0x42U;
-    EXPECT_DEATH(scudo::Chunk::loadHeader(Cookie, P, &Header), "");
-    *(reinterpret_cast<scudo::u8 *>(Block) + I) ^= 0x42U;
-  }
-  free(Block);
-}
diff --git a/Telegram/ThirdParty/scudo/tests/combined_test.cpp b/Telegram/ThirdParty/scudo/tests/combined_test.cpp
deleted file mode 100644
index 3dbd93cac..000000000
--- a/Telegram/ThirdParty/scudo/tests/combined_test.cpp
+++ /dev/null
@@ -1,903 +0,0 @@
-//===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "memtag.h"
-#include "tests/scudo_unit_test.h"
-
-#include "allocator_config.h"
-#include "chunk.h"
-#include "combined.h"
-#include "condition_variable.h"
-#include "mem_map.h"
-#include "size_class_map.h"
-
-#include <algorithm>
-#include <condition_variable>
-#include <memory>
-#include <mutex>
-#include <set>
-#include <stdlib.h>
-#include <thread>
-#include <vector>
-
-static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
-static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
-
-// Fuchsia complains that the function is not used.
-UNUSED static void disableDebuggerdMaybe() {
-#if SCUDO_ANDROID
-  // Disable the debuggerd signal handler on Android, without this we can end
-  // up spending a significant amount of time creating tombstones.
-  signal(SIGSEGV, SIG_DFL);
-#endif
-}
-
-template <class AllocatorT>
-bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
-  const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
-  if (Alignment < MinAlignment)
-    Alignment = MinAlignment;
-  const scudo::uptr NeededSize =
-      scudo::roundUp(Size, MinAlignment) +
-      ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
-  return AllocatorT::PrimaryT::canAllocate(NeededSize);
-}
-
-template <class AllocatorT>
-void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
-                             scudo::uptr Alignment) {
-  const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
-  Size = scudo::roundUp(Size, MinAlignment);
-  if (Allocator->useMemoryTaggingTestOnly())
-    EXPECT_DEATH(
-        {
-          disableDebuggerdMaybe();
-          reinterpret_cast<char *>(P)[-1] = 'A';
-        },
-        "");
-  if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
-          ? Allocator->useMemoryTaggingTestOnly()
-          : Alignment == MinAlignment) {
-    EXPECT_DEATH(
-        {
-          disableDebuggerdMaybe();
-          reinterpret_cast<char *>(P)[Size] = 'A';
-        },
-        "");
-  }
-}
-
-template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
-  TestAllocator() {
-    this->initThreadMaybe();
-    if (scudo::archSupportsMemoryTagging() &&
-        !scudo::systemDetectsMemoryTagFaultsTestOnly())
-      this->disableMemoryTagging();
-  }
-  ~TestAllocator() { this->unmapTestOnly(); }
-
-  void *operator new(size_t size);
-  void operator delete(void *ptr);
-};
-
-constexpr size_t kMaxAlign = std::max({
-  alignof(scudo::Allocator<scudo::DefaultConfig>),
-#if SCUDO_CAN_USE_PRIMARY64
-      alignof(scudo::Allocator<scudo::FuchsiaConfig>),
-#endif
-      alignof(scudo::Allocator<scudo::AndroidConfig>)
-});
-
-#if SCUDO_RISCV64
-// The allocator is over 4MB large. Rather than creating an instance of this on
-// the heap, keep it in a global storage to reduce fragmentation from having to
-// mmap this at the start of every test.
-struct TestAllocatorStorage {
-  static constexpr size_t kMaxSize = std::max({
-    sizeof(scudo::Allocator<scudo::DefaultConfig>),
-#if SCUDO_CAN_USE_PRIMARY64
-        sizeof(scudo::Allocator<scudo::FuchsiaConfig>),
-#endif
-        sizeof(scudo::Allocator<scudo::AndroidConfig>)
-  });
-
-  // To alleviate some problem, let's skip the thread safety analysis here.
-  static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
-    CHECK(size <= kMaxSize &&
-          "Allocation size doesn't fit in the allocator storage");
-    M.lock();
-    return AllocatorStorage;
-  }
-
-  static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS {
-    M.assertHeld();
-    M.unlock();
-    ASSERT_EQ(ptr, AllocatorStorage);
-  }
-
-  static scudo::HybridMutex M;
-  static uint8_t AllocatorStorage[kMaxSize];
-};
-scudo::HybridMutex TestAllocatorStorage::M;
-alignas(kMaxAlign) uint8_t TestAllocatorStorage::AllocatorStorage[kMaxSize];
-#else
-struct TestAllocatorStorage {
-  static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
-    void *p = nullptr;
-    EXPECT_EQ(0, posix_memalign(&p, kMaxAlign, size));
-    return p;
-  }
-  static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS { free(ptr); }
-};
-#endif
-
-template <typename Config>
-void *TestAllocator<Config>::operator new(size_t size) {
-  return TestAllocatorStorage::get(size);
-}
-
-template <typename Config>
-void TestAllocator<Config>::operator delete(void *ptr) {
-  TestAllocatorStorage::release(ptr);
-}
-
-template <class TypeParam> struct ScudoCombinedTest : public Test {
-  ScudoCombinedTest() {
-    UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
-    Allocator = std::make_unique<AllocatorT>();
-  }
-  ~ScudoCombinedTest() {
-    Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-    UseQuarantine = true;
-  }
-
-  void RunTest();
-
-  void BasicTest(scudo::uptr SizeLog);
-
-  using AllocatorT = TestAllocator<TypeParam>;
-  std::unique_ptr<AllocatorT> Allocator;
-};
-
-template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
-
-namespace scudo {
-struct TestConditionVariableConfig {
-  static const bool MaySupportMemoryTagging = true;
-  template <class A>
-  using TSDRegistryT =
-      scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
-
-  struct Primary {
-    using SizeClassMap = scudo::AndroidSizeClassMap;
-#if SCUDO_CAN_USE_PRIMARY64
-    static const scudo::uptr RegionSizeLog = 28U;
-    typedef scudo::u32 CompactPtrT;
-    static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-    static const scudo::uptr GroupSizeLog = 20U;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-#else
-    static const scudo::uptr RegionSizeLog = 18U;
-    static const scudo::uptr GroupSizeLog = 18U;
-    typedef scudo::uptr CompactPtrT;
-#endif
-    static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
-    static const bool UseConditionVariable = true;
-#if SCUDO_LINUX
-    using ConditionVariableT = scudo::ConditionVariableLinux;
-#else
-    using ConditionVariableT = scudo::ConditionVariableDummy;
-#endif
-  };
-#if SCUDO_CAN_USE_PRIMARY64
-  template <typename Config>
-  using PrimaryT = scudo::SizeClassAllocator64<Config>;
-#else
-  template <typename Config>
-  using PrimaryT = scudo::SizeClassAllocator32<Config>;
-#endif
-
-  struct Secondary {
-    template <typename Config>
-    using CacheT = scudo::MapAllocatorNoCache<Config>;
-  };
-  template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
-};
-} // namespace scudo
-
-#if SCUDO_FUCHSIA
-#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
-#else
-#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig)                          \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)                          \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
-#endif
-
-#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
-  using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>;                   \
-  TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
-
-#define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
-  template <class TypeParam>                                                   \
-  struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
-    void Run();                                                                \
-  };                                                                           \
-  SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
-  template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
-  auto *Allocator = this->Allocator.get();
-  static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
-  EXPECT_FALSE(
-      Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
-
-  scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
-  for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
-    StackBuffer[I] = 0x42U;
-  EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
-  for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
-    EXPECT_EQ(StackBuffer[I], 0x42U);
-}
-
-template <class Config>
-void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
-  auto *Allocator = this->Allocator.get();
-
-  // This allocates and deallocates a bunch of chunks, with a wide range of
-  // sizes and alignments, with a focus on sizes that could trigger weird
-  // behaviors (plus or minus a small delta of a power of two for example).
-  for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
-    const scudo::uptr Align = 1U << AlignLog;
-    for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
-      if ((1LL << SizeLog) + Delta < 0)
-        continue;
-      const scudo::uptr Size =
-          static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
-      void *P = Allocator->allocate(Size, Origin, Align);
-      EXPECT_NE(P, nullptr);
-      EXPECT_TRUE(Allocator->isOwned(P));
-      EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
-      EXPECT_LE(Size, Allocator->getUsableSize(P));
-      memset(P, 0xaa, Size);
-      checkMemoryTaggingMaybe(Allocator, P, Size, Align);
-      Allocator->deallocate(P, Origin, Size);
-    }
-  }
-
-  Allocator->printStats();
-  Allocator->printFragmentationInfo();
-}
-
-#define SCUDO_MAKE_BASIC_TEST(SizeLog)                                         \
-  SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) {           \
-    this->BasicTest(SizeLog);                                                  \
-  }
-
-SCUDO_MAKE_BASIC_TEST(0)
-SCUDO_MAKE_BASIC_TEST(1)
-SCUDO_MAKE_BASIC_TEST(2)
-SCUDO_MAKE_BASIC_TEST(3)
-SCUDO_MAKE_BASIC_TEST(4)
-SCUDO_MAKE_BASIC_TEST(5)
-SCUDO_MAKE_BASIC_TEST(6)
-SCUDO_MAKE_BASIC_TEST(7)
-SCUDO_MAKE_BASIC_TEST(8)
-SCUDO_MAKE_BASIC_TEST(9)
-SCUDO_MAKE_BASIC_TEST(10)
-SCUDO_MAKE_BASIC_TEST(11)
-SCUDO_MAKE_BASIC_TEST(12)
-SCUDO_MAKE_BASIC_TEST(13)
-SCUDO_MAKE_BASIC_TEST(14)
-SCUDO_MAKE_BASIC_TEST(15)
-SCUDO_MAKE_BASIC_TEST(16)
-SCUDO_MAKE_BASIC_TEST(17)
-SCUDO_MAKE_BASIC_TEST(18)
-SCUDO_MAKE_BASIC_TEST(19)
-SCUDO_MAKE_BASIC_TEST(20)
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
-  auto *Allocator = this->Allocator.get();
-
-  // Ensure that specifying ZeroContents returns a zero'd out block.
-  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
-    for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
-      const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
-      void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
-      EXPECT_NE(P, nullptr);
-      for (scudo::uptr I = 0; I < Size; I++)
-        ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
-      memset(P, 0xaa, Size);
-      Allocator->deallocate(P, Origin, Size);
-    }
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
-  auto *Allocator = this->Allocator.get();
-
-  // Ensure that specifying ZeroFill returns a zero'd out block.
-  Allocator->setFillContents(scudo::ZeroFill);
-  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
-    for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
-      const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
-      void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
-      EXPECT_NE(P, nullptr);
-      for (scudo::uptr I = 0; I < Size; I++)
-        ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
-      memset(P, 0xaa, Size);
-      Allocator->deallocate(P, Origin, Size);
-    }
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
-  auto *Allocator = this->Allocator.get();
-
-  // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
-  // block. The primary allocator only produces pattern filled blocks if MTE
-  // is disabled, so we only require pattern filled blocks in that case.
-  Allocator->setFillContents(scudo::PatternOrZeroFill);
-  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
-    for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
-      const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
-      void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
-      EXPECT_NE(P, nullptr);
-      for (scudo::uptr I = 0; I < Size; I++) {
-        unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
-        if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
-                                                          1U << MinAlignLog) &&
-            !Allocator->useMemoryTaggingTestOnly())
-          ASSERT_EQ(V, scudo::PatternFillByte);
-        else
-          ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
-      }
-      memset(P, 0xaa, Size);
-      Allocator->deallocate(P, Origin, Size);
-    }
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
-  auto *Allocator = this->Allocator.get();
-
-  // Verify that a chunk will end up being reused, at some point.
-  const scudo::uptr NeedleSize = 1024U;
-  void *NeedleP = Allocator->allocate(NeedleSize, Origin);
-  Allocator->deallocate(NeedleP, Origin);
-  bool Found = false;
-  for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
-    void *P = Allocator->allocate(NeedleSize, Origin);
-    if (Allocator->getHeaderTaggedPointer(P) ==
-        Allocator->getHeaderTaggedPointer(NeedleP))
-      Found = true;
-    Allocator->deallocate(P, Origin);
-  }
-  EXPECT_TRUE(Found);
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
-  auto *Allocator = this->Allocator.get();
-
-  // Reallocate a chunk all the way up to a secondary allocation, verifying that
-  // we preserve the data in the process.
-  scudo::uptr Size = 16;
-  void *P = Allocator->allocate(Size, Origin);
-  const char Marker = 'A';
-  memset(P, Marker, Size);
-  while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
-    void *NewP = Allocator->reallocate(P, Size * 2);
-    EXPECT_NE(NewP, nullptr);
-    for (scudo::uptr J = 0; J < Size; J++)
-      EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
-    memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
-    Size *= 2U;
-    P = NewP;
-  }
-  Allocator->deallocate(P, Origin);
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
-  auto *Allocator = this->Allocator.get();
-
-  // Reallocate a large chunk all the way down to a byte, verifying that we
-  // preserve the data in the process.
-  scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
-  const scudo::uptr DataSize = 2048U;
-  void *P = Allocator->allocate(Size, Origin);
-  const char Marker = 'A';
-  memset(P, Marker, scudo::Min(Size, DataSize));
-  while (Size > 1U) {
-    Size /= 2U;
-    void *NewP = Allocator->reallocate(P, Size);
-    EXPECT_NE(NewP, nullptr);
-    for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
-      EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
-    P = NewP;
-  }
-  Allocator->deallocate(P, Origin);
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
-  auto *Allocator = this->Allocator.get();
-
-  // Check that reallocating a chunk to a slightly smaller or larger size
-  // returns the same chunk. This requires that all the sizes we iterate on use
-  // the same block size, but that should be the case for MaxSize - 64 with our
-  // default class size maps.
-  constexpr scudo::uptr ReallocSize =
-      TypeParam::Primary::SizeClassMap::MaxSize - 64;
-  void *P = Allocator->allocate(ReallocSize, Origin);
-  const char Marker = 'A';
-  memset(P, Marker, ReallocSize);
-  for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
-    const scudo::uptr NewSize =
-        static_cast<scudo::uptr>(static_cast<scudo::sptr>(ReallocSize) + Delta);
-    void *NewP = Allocator->reallocate(P, NewSize);
-    EXPECT_EQ(NewP, P);
-    for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
-      EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
-    checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
-  }
-  Allocator->deallocate(P, Origin);
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
-  auto *Allocator = this->Allocator.get();
-  // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
-  // they are the ones we allocated. This requires the allocator to not have any
-  // other allocated chunk at this point (eg: won't work with the Quarantine).
-  // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
-  // iterateOverChunks reads header by tagged and non-tagger pointers so one of
-  // them will fail.
-  if (!UseQuarantine) {
-    std::vector<void *> V;
-    for (scudo::uptr I = 0; I < 64U; I++)
-      V.push_back(Allocator->allocate(
-          static_cast<scudo::uptr>(std::rand()) %
-              (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
-          Origin));
-    Allocator->disable();
-    Allocator->iterateOverChunks(
-        0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
-        [](uintptr_t Base, UNUSED size_t Size, void *Arg) {
-          std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
-          void *P = reinterpret_cast<void *>(Base);
-          EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
-        },
-        reinterpret_cast<void *>(&V));
-    Allocator->enable();
-    for (auto P : V)
-      Allocator->deallocate(P, Origin);
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
-  auto *Allocator = this->Allocator.get();
-
-  // Check that use-after-free is detected.
-  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
-    const scudo::uptr Size = 1U << SizeLog;
-    if (!Allocator->useMemoryTaggingTestOnly())
-      continue;
-    EXPECT_DEATH(
-        {
-          disableDebuggerdMaybe();
-          void *P = Allocator->allocate(Size, Origin);
-          Allocator->deallocate(P, Origin);
-          reinterpret_cast<char *>(P)[0] = 'A';
-        },
-        "");
-    EXPECT_DEATH(
-        {
-          disableDebuggerdMaybe();
-          void *P = Allocator->allocate(Size, Origin);
-          Allocator->deallocate(P, Origin);
-          reinterpret_cast<char *>(P)[Size - 1] = 'A';
-        },
-        "");
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
-  auto *Allocator = this->Allocator.get();
-
-  if (Allocator->useMemoryTaggingTestOnly()) {
-    // Check that disabling memory tagging works correctly.
-    void *P = Allocator->allocate(2048, Origin);
-    EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 'A', "");
-    scudo::ScopedDisableMemoryTagChecks NoTagChecks;
-    Allocator->disableMemoryTagging();
-    reinterpret_cast<char *>(P)[2048] = 'A';
-    Allocator->deallocate(P, Origin);
-
-    P = Allocator->allocate(2048, Origin);
-    EXPECT_EQ(scudo::untagPointer(P), P);
-    reinterpret_cast<char *>(P)[2048] = 'A';
-    Allocator->deallocate(P, Origin);
-
-    Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
-  auto *Allocator = this->Allocator.get();
-
-  scudo::uptr BufferSize = 8192;
-  std::vector<char> Buffer(BufferSize);
-  scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
-  while (ActualSize > BufferSize) {
-    BufferSize = ActualSize + 1024;
-    Buffer.resize(BufferSize);
-    ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
-  }
-  std::string Stats(Buffer.begin(), Buffer.end());
-  // Basic checks on the contents of the statistics output, which also allows us
-  // to verify that we got it all.
-  EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
-  EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
-  EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
-  auto *Allocator = this->Allocator.get();
-
-  std::vector<void *> V;
-  for (scudo::uptr I = 0; I < 64U; I++)
-    V.push_back(Allocator->allocate(
-        static_cast<scudo::uptr>(std::rand()) %
-            (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
-        Origin));
-  for (auto P : V)
-    Allocator->deallocate(P, Origin);
-
-  bool UnlockRequired;
-  auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
-  TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-  EXPECT_TRUE(!TSD->getCache().isEmpty());
-  TSD->getCache().drain();
-  EXPECT_TRUE(TSD->getCache().isEmpty());
-  if (UnlockRequired)
-    TSD->unlock();
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
-  auto *Allocator = this->Allocator.get();
-
-  std::vector<void *> V;
-  for (scudo::uptr I = 0; I < 64U; I++)
-    V.push_back(Allocator->allocate(
-        static_cast<scudo::uptr>(std::rand()) %
-            (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
-        Origin));
-  for (auto P : V)
-    Allocator->deallocate(P, Origin);
-
-  // `ForceAll` will also drain the caches.
-  Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);
-
-  bool UnlockRequired;
-  auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
-  TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-  EXPECT_TRUE(TSD->getCache().isEmpty());
-  EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
-  EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
-  if (UnlockRequired)
-    TSD->unlock();
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
-  std::mutex Mutex;
-  std::condition_variable Cv;
-  bool Ready = false;
-  auto *Allocator = this->Allocator.get();
-  std::thread Threads[32];
-  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
-    Threads[I] = std::thread([&]() {
-      {
-        std::unique_lock<std::mutex> Lock(Mutex);
-        while (!Ready)
-          Cv.wait(Lock);
-      }
-      std::vector<std::pair<void *, scudo::uptr>> V;
-      for (scudo::uptr I = 0; I < 256U; I++) {
-        const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
-        void *P = Allocator->allocate(Size, Origin);
-        // A region could have ran out of memory, resulting in a null P.
-        if (P)
-          V.push_back(std::make_pair(P, Size));
-      }
-
-      // Try to interleave pushBlocks(), popBatch() and releaseToOS().
-      Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-
-      while (!V.empty()) {
-        auto Pair = V.back();
-        Allocator->deallocate(Pair.first, Origin, Pair.second);
-        V.pop_back();
-      }
-    });
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Ready = true;
-    Cv.notify_all();
-  }
-  for (auto &T : Threads)
-    T.join();
-  Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-}
-
-// Test that multiple instantiations of the allocator have not messed up the
-// process's signal handlers (GWP-ASan used to do this).
-TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
-  const scudo::uptr Size = 4 * scudo::getPageSizeCached();
-  scudo::ReservedMemoryT ReservedMemory;
-  ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, "testSEGV"));
-  void *P = reinterpret_cast<void *>(ReservedMemory.getBase());
-  ASSERT_NE(P, nullptr);
-  EXPECT_DEATH(memset(P, 0xaa, Size), "");
-  ReservedMemory.release();
-}
-
-struct DeathSizeClassConfig {
-  static const scudo::uptr NumBits = 1;
-  static const scudo::uptr MinSizeLog = 10;
-  static const scudo::uptr MidSizeLog = 10;
-  static const scudo::uptr MaxSizeLog = 13;
-  static const scudo::u16 MaxNumCachedHint = 8;
-  static const scudo::uptr MaxBytesCachedLog = 12;
-  static const scudo::uptr SizeDelta = 0;
-};
-
-static const scudo::uptr DeathRegionSizeLog = 21U;
-struct DeathConfig {
-  static const bool MaySupportMemoryTagging = false;
-  template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
-
-  struct Primary {
-    // Tiny allocator, its Primary only serves chunks of four sizes.
-    using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
-    static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
-    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    typedef scudo::uptr CompactPtrT;
-    static const scudo::uptr CompactPtrScale = 0;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-    static const scudo::uptr GroupSizeLog = 18;
-  };
-  template <typename Config>
-  using PrimaryT = scudo::SizeClassAllocator64<Config>;
-
-  struct Secondary {
-    template <typename Config>
-    using CacheT = scudo::MapAllocatorNoCache<Config>;
-  };
-
-  template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
-};
-
-TEST(ScudoCombinedDeathTest, DeathCombined) {
-  using AllocatorT = TestAllocator<DeathConfig>;
-  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
-
-  const scudo::uptr Size = 1000U;
-  void *P = Allocator->allocate(Size, Origin);
-  EXPECT_NE(P, nullptr);
-
-  // Invalid sized deallocation.
-  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
-
-  // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
-  UNUSED void *MisalignedP =
-      reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
-  EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
-  EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
-
-  // Header corruption.
-  scudo::u64 *H =
-      reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
-  *H ^= 0x42U;
-  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
-  *H ^= 0x420042U;
-  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
-  *H ^= 0x420000U;
-
-  // Invalid chunk state.
-  Allocator->deallocate(P, Origin, Size);
-  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
-  EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
-  EXPECT_DEATH(Allocator->getUsableSize(P), "");
-}
-
-// Verify that when a region gets full, the allocator will still manage to
-// fulfill the allocation through a larger size class.
-TEST(ScudoCombinedTest, FullRegion) {
-  using AllocatorT = TestAllocator<DeathConfig>;
-  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
-
-  std::vector<void *> V;
-  scudo::uptr FailedAllocationsCount = 0;
-  for (scudo::uptr ClassId = 1U;
-       ClassId <= DeathConfig::Primary::SizeClassMap::LargestClassId;
-       ClassId++) {
-    const scudo::uptr Size =
-        DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId);
-    // Allocate enough to fill all of the regions above this one.
-    const scudo::uptr MaxNumberOfChunks =
-        ((1U << DeathRegionSizeLog) / Size) *
-        (DeathConfig::Primary::SizeClassMap::LargestClassId - ClassId + 1);
-    void *P;
-    for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
-      P = Allocator->allocate(Size - 64U, Origin);
-      if (!P)
-        FailedAllocationsCount++;
-      else
-        V.push_back(P);
-    }
-    while (!V.empty()) {
-      Allocator->deallocate(V.back(), Origin);
-      V.pop_back();
-    }
-  }
-  EXPECT_EQ(FailedAllocationsCount, 0U);
-}
-
-// Ensure that releaseToOS can be called prior to any other allocator
-// operation without issue.
-SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
-  auto *Allocator = this->Allocator.get();
-  Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
-  auto *Allocator = this->Allocator.get();
-  Allocator->setOption(scudo::Option::MemtagTuning, M_MEMTAG_TUNING_BUFFER_OVERFLOW);
-
-  if (!Allocator->useMemoryTaggingTestOnly())
-    return;
-
-  auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
-    scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
-    scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
-    EXPECT_NE(Tag1 % 2, Tag2 % 2);
-  };
-
-  using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
-  for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
-       ClassId++) {
-    const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
-
-    std::set<scudo::uptr> Ptrs;
-    bool Found = false;
-    for (unsigned I = 0; I != 65536; ++I) {
-      scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
-          Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
-      if (Ptrs.count(P - Size)) {
-        Found = true;
-        CheckOddEven(P, P - Size);
-        break;
-      }
-      if (Ptrs.count(P + Size)) {
-        Found = true;
-        CheckOddEven(P, P + Size);
-        break;
-      }
-      Ptrs.insert(P);
-    }
-    EXPECT_TRUE(Found);
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
-  auto *Allocator = this->Allocator.get();
-
-  std::vector<void *> Ptrs(65536);
-
-  Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
-
-  constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
-
-  // Test that if mem-init is disabled on a thread, calloc should still work as
-  // expected. This is tricky to ensure when MTE is enabled, so this test tries
-  // to exercise the relevant code on our MTE path.
-  for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
-    using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
-    const scudo::uptr Size =
-        SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
-    if (Size < 8)
-      continue;
-    for (unsigned I = 0; I != Ptrs.size(); ++I) {
-      Ptrs[I] = Allocator->allocate(Size, Origin);
-      memset(Ptrs[I], 0xaa, Size);
-    }
-    for (unsigned I = 0; I != Ptrs.size(); ++I)
-      Allocator->deallocate(Ptrs[I], Origin, Size);
-    for (unsigned I = 0; I != Ptrs.size(); ++I) {
-      Ptrs[I] = Allocator->allocate(Size - 8, Origin);
-      memset(Ptrs[I], 0xbb, Size - 8);
-    }
-    for (unsigned I = 0; I != Ptrs.size(); ++I)
-      Allocator->deallocate(Ptrs[I], Origin, Size - 8);
-    for (unsigned I = 0; I != Ptrs.size(); ++I) {
-      Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
-      for (scudo::uptr J = 0; J < Size; ++J)
-        ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], '\0');
-    }
-  }
-
-  Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
-  auto *Allocator = this->Allocator.get();
-
-  // Regression test: make realloc-in-place happen at the very right end of a
-  // mapped region.
-  constexpr size_t nPtrs = 10000;
-  for (scudo::uptr i = 1; i < 32; ++i) {
-    scudo::uptr Size = 16 * i - 1;
-    std::vector<void *> Ptrs;
-    for (size_t i = 0; i < nPtrs; ++i) {
-      void *P = Allocator->allocate(Size, Origin);
-      P = Allocator->reallocate(P, Size + 1);
-      Ptrs.push_back(P);
-    }
-
-    for (size_t i = 0; i < nPtrs; ++i)
-      Allocator->deallocate(Ptrs[i], Origin);
-  }
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
-  auto *Allocator = this->Allocator.get();
-  auto Size = Allocator->getRingBufferSize();
-  if (Size > 0)
-    EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
-  auto *Allocator = this->Allocator.get();
-  auto *Addr = Allocator->getRingBufferAddress();
-  EXPECT_NE(Addr, nullptr);
-  EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
-}
-
-#if SCUDO_CAN_USE_PRIMARY64
-#if SCUDO_TRUSTY
-
-// TrustyConfig is designed for a domain-specific allocator. Add a basic test
-// which covers only simple operations and ensure the configuration is able to
-// compile.
-TEST(ScudoCombinedTest, BasicTrustyConfig) {
-  using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
-  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
-
-  for (scudo::uptr ClassId = 1U;
-       ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
-       ClassId++) {
-    const scudo::uptr Size =
-        scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
-    void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
-    ASSERT_NE(p, nullptr);
-    free(p);
-  }
-
-  bool UnlockRequired;
-  auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
-  TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-  TSD->getCache().drain();
-
-  Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-}
-
-#endif
-#endif
diff --git a/Telegram/ThirdParty/scudo/tests/common_test.cpp b/Telegram/ThirdParty/scudo/tests/common_test.cpp
deleted file mode 100644
index fff7c662a..000000000
--- a/Telegram/ThirdParty/scudo/tests/common_test.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-//===-- common_test.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "internal_defs.h"
-#include "tests/scudo_unit_test.h"
-
-#include "common.h"
-#include "mem_map.h"
-#include <algorithm>
-#include <fstream>
-
-namespace scudo {
-
-static uptr getResidentMemorySize() {
-  if (!SCUDO_LINUX)
-    UNREACHABLE("Not implemented!");
-  uptr Size;
-  uptr Resident;
-  std::ifstream IFS("/proc/self/statm");
-  IFS >> Size;
-  IFS >> Resident;
-  return Resident * getPageSizeCached();
-}
-
-// Fuchsia needs getResidentMemorySize implementation.
-TEST(ScudoCommonTest, SKIP_ON_FUCHSIA(ResidentMemorySize)) {
-  uptr OnStart = getResidentMemorySize();
-  EXPECT_GT(OnStart, 0UL);
-
-  const uptr Size = 1ull << 30;
-  const uptr Threshold = Size >> 3;
-
-  MemMapT MemMap;
-  ASSERT_TRUE(MemMap.map(/*Addr=*/0U, Size, "ResidentMemorySize"));
-  ASSERT_NE(MemMap.getBase(), 0U);
-  void *P = reinterpret_cast<void *>(MemMap.getBase());
-  EXPECT_LT(getResidentMemorySize(), OnStart + Threshold);
-
-  memset(P, 1, Size);
-  EXPECT_GT(getResidentMemorySize(), OnStart + Size - Threshold);
-
-  MemMap.releasePagesToOS(MemMap.getBase(), Size);
-  EXPECT_LT(getResidentMemorySize(), OnStart + Threshold);
-
-  memset(P, 1, Size);
-  EXPECT_GT(getResidentMemorySize(), OnStart + Size - Threshold);
-
-  MemMap.unmap(MemMap.getBase(), Size);
-}
-
-TEST(ScudoCommonTest, Zeros) {
-  const uptr Size = 1ull << 20;
-
-  MemMapT MemMap;
-  ASSERT_TRUE(MemMap.map(/*Addr=*/0U, Size, "Zeros"));
-  ASSERT_NE(MemMap.getBase(), 0U);
-  uptr *P = reinterpret_cast<uptr *>(MemMap.getBase());
-  const ptrdiff_t N = Size / sizeof(uptr);
-  EXPECT_EQ(std::count(P, P + N, 0), N);
-
-  memset(P, 1, Size);
-  EXPECT_EQ(std::count(P, P + N, 0), 0);
-
-  MemMap.releasePagesToOS(MemMap.getBase(), Size);
-  EXPECT_EQ(std::count(P, P + N, 0), N);
-
-  MemMap.unmap(MemMap.getBase(), Size);
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/tests/condition_variable_test.cpp b/Telegram/ThirdParty/scudo/tests/condition_variable_test.cpp
deleted file mode 100644
index caba1f64a..000000000
--- a/Telegram/ThirdParty/scudo/tests/condition_variable_test.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- condition_variable_test.cpp -----------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "common.h"
-#include "condition_variable.h"
-#include "mutex.h"
-
-#include <thread>
-
-template <typename ConditionVariableT> void simpleWaitAndNotifyAll() {
-  constexpr scudo::u32 NumThreads = 2;
-  constexpr scudo::u32 CounterMax = 1024;
-  std::thread Threads[NumThreads];
-
-  scudo::HybridMutex M;
-  ConditionVariableT CV;
-  CV.bindTestOnly(M);
-  scudo::u32 Counter = 0;
-
-  for (scudo::u32 I = 0; I < NumThreads; ++I) {
-    Threads[I] = std::thread(
-        [&](scudo::u32 Id) {
-          do {
-            scudo::ScopedLock L(M);
-            if (Counter % NumThreads != Id && Counter < CounterMax)
-              CV.wait(M);
-            if (Counter >= CounterMax) {
-              break;
-            } else {
-              ++Counter;
-              CV.notifyAll(M);
-            }
-          } while (true);
-        },
-        I);
-  }
-
-  for (std::thread &T : Threads)
-    T.join();
-
-  EXPECT_EQ(Counter, CounterMax);
-}
-
-TEST(ScudoConditionVariableTest, DummyCVWaitAndNotifyAll) {
-  simpleWaitAndNotifyAll<scudo::ConditionVariableDummy>();
-}
-
-#ifdef SCUDO_LINUX
-TEST(ScudoConditionVariableTest, LinuxCVWaitAndNotifyAll) {
-  simpleWaitAndNotifyAll<scudo::ConditionVariableLinux>();
-}
-#endif
diff --git a/Telegram/ThirdParty/scudo/tests/flags_test.cpp b/Telegram/ThirdParty/scudo/tests/flags_test.cpp
deleted file mode 100644
index 0205052ed..000000000
--- a/Telegram/ThirdParty/scudo/tests/flags_test.cpp
+++ /dev/null
@@ -1,134 +0,0 @@
-//===-- flags_test.cpp ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "flags.h"
-#include "flags_parser.h"
-
-#include <string.h>
-
-static const char FlagName[] = "flag_name";
-static const char FlagDesc[] = "flag description";
-
-template <typename T>
-static void testFlag(scudo::FlagType Type, T StartValue, const char *Env,
-                     T FinalValue) {
-  scudo::FlagParser Parser;
-  T Flag = StartValue;
-  Parser.registerFlag(FlagName, FlagDesc, Type, &Flag);
-  Parser.parseString(Env);
-  EXPECT_EQ(FinalValue, Flag);
-  // Reporting unrecognized flags is needed to reset them.
-  scudo::reportUnrecognizedFlags();
-}
-
-TEST(ScudoFlagsTest, BooleanFlags) {
-  testFlag(scudo::FlagType::FT_bool, false, "flag_name=1", true);
-  testFlag(scudo::FlagType::FT_bool, false, "flag_name=yes", true);
-  testFlag(scudo::FlagType::FT_bool, false, "flag_name='yes'", true);
-  testFlag(scudo::FlagType::FT_bool, false, "flag_name=true", true);
-  testFlag(scudo::FlagType::FT_bool, true, "flag_name=0", false);
-  testFlag(scudo::FlagType::FT_bool, true, "flag_name=\"0\"", false);
-  testFlag(scudo::FlagType::FT_bool, true, "flag_name=no", false);
-  testFlag(scudo::FlagType::FT_bool, true, "flag_name=false", false);
-  testFlag(scudo::FlagType::FT_bool, true, "flag_name='false'", false);
-}
-
-TEST(ScudoFlagsDeathTest, BooleanFlags) {
-  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name", true),
-               "expected '='");
-  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=", true),
-               "invalid value for bool option: ''");
-  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=2", true),
-               "invalid value for bool option: '2'");
-  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=-1", true),
-               "invalid value for bool option: '-1'");
-  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=on", true),
-               "invalid value for bool option: 'on'");
-}
-
-TEST(ScudoFlagsTest, IntFlags) {
-  testFlag(scudo::FlagType::FT_int, -11, nullptr, -11);
-  testFlag(scudo::FlagType::FT_int, -11, "flag_name=0", 0);
-  testFlag(scudo::FlagType::FT_int, -11, "flag_name='0'", 0);
-  testFlag(scudo::FlagType::FT_int, -11, "flag_name=42", 42);
-  testFlag(scudo::FlagType::FT_int, -11, "flag_name=-42", -42);
-  testFlag(scudo::FlagType::FT_int, -11, "flag_name=\"-42\"", -42);
-
-  // Unrecognized flags are ignored.
-  testFlag(scudo::FlagType::FT_int, -11, "--flag_name=42", -11);
-  testFlag(scudo::FlagType::FT_int, -11, "zzzzzzz=42", -11);
-}
-
-TEST(ScudoFlagsDeathTest, IntFlags) {
-  EXPECT_DEATH(testFlag(scudo::FlagType::FT_int, -11, "flag_name", 0),
-               "expected '='");
-  EXPECT_DEATH(testFlag(scudo::FlagType::FT_int, -11, "flag_name=42U", 0),
-               "invalid value for int option");
-}
-
-static void testTwoFlags(const char *Env, bool ExpectedFlag1,
-                         const int ExpectedFlag2, const char *Name1 = "flag1",
-                         const char *Name2 = "flag2") {
-  scudo::FlagParser Parser;
-  bool Flag1 = !ExpectedFlag1;
-  int Flag2;
-  Parser.registerFlag(Name1, FlagDesc, scudo::FlagType::FT_bool, &Flag1);
-  Parser.registerFlag(Name2, FlagDesc, scudo::FlagType::FT_int, &Flag2);
-  Parser.parseString(Env);
-  EXPECT_EQ(ExpectedFlag1, Flag1);
-  EXPECT_EQ(Flag2, ExpectedFlag2);
-  // Reporting unrecognized flags is needed to reset them.
-  scudo::reportUnrecognizedFlags();
-}
-
-TEST(ScudoFlagsTest, MultipleFlags) {
-  testTwoFlags("flag1=1 flag2=42", true, 42);
-  testTwoFlags("flag2=-1 flag1=0", false, -1);
-  testTwoFlags("flag1=false:flag2=1337", false, 1337);
-  testTwoFlags("flag2=42:flag1=yes", true, 42);
-  testTwoFlags("flag2=42\nflag1=yes", true, 42);
-  testTwoFlags("flag2=42\r\nflag1=yes", true, 42);
-  testTwoFlags("flag2=42\tflag1=yes", true, 42);
-}
-
-TEST(ScudoFlagsTest, CommonSuffixFlags) {
-  testTwoFlags("flag=1 other_flag=42", true, 42, "flag", "other_flag");
-  testTwoFlags("other_flag=42 flag=1", true, 42, "flag", "other_flag");
-}
-
-TEST(ScudoFlagsTest, AllocatorFlags) {
-  scudo::FlagParser Parser;
-  scudo::Flags Flags;
-  scudo::registerFlags(&Parser, &Flags);
-  Flags.setDefaults();
-  Flags.dealloc_type_mismatch = false;
-  Flags.delete_size_mismatch = false;
-  Flags.quarantine_max_chunk_size = 1024;
-  Parser.parseString("dealloc_type_mismatch=true:delete_size_mismatch=true:"
-                     "quarantine_max_chunk_size=2048");
-  EXPECT_TRUE(Flags.dealloc_type_mismatch);
-  EXPECT_TRUE(Flags.delete_size_mismatch);
-  EXPECT_EQ(2048, Flags.quarantine_max_chunk_size);
-}
-
-#ifdef GWP_ASAN_HOOKS
-TEST(ScudoFlagsTest, GWPASanFlags) {
-  scudo::FlagParser Parser;
-  scudo::Flags Flags;
-  scudo::registerFlags(&Parser, &Flags);
-  Flags.setDefaults();
-  Flags.GWP_ASAN_Enabled = false;
-  Parser.parseString("GWP_ASAN_Enabled=true:GWP_ASAN_SampleRate=1:"
-                     "GWP_ASAN_InstallSignalHandlers=false");
-  EXPECT_TRUE(Flags.GWP_ASAN_Enabled);
-  EXPECT_FALSE(Flags.GWP_ASAN_InstallSignalHandlers);
-  EXPECT_EQ(1, Flags.GWP_ASAN_SampleRate);
-}
-#endif // GWP_ASAN_HOOKS
diff --git a/Telegram/ThirdParty/scudo/tests/list_test.cpp b/Telegram/ThirdParty/scudo/tests/list_test.cpp
deleted file mode 100644
index 140ca027a..000000000
--- a/Telegram/ThirdParty/scudo/tests/list_test.cpp
+++ /dev/null
@@ -1,216 +0,0 @@
-//===-- list_test.cpp -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "list.h"
-
-struct ListItem {
-  ListItem *Next;
-  ListItem *Prev;
-};
-
-static ListItem Items[6];
-static ListItem *X = &Items[0];
-static ListItem *Y = &Items[1];
-static ListItem *Z = &Items[2];
-static ListItem *A = &Items[3];
-static ListItem *B = &Items[4];
-static ListItem *C = &Items[5];
-
-typedef scudo::SinglyLinkedList<ListItem> SLList;
-typedef scudo::DoublyLinkedList<ListItem> DLList;
-
-template <typename ListT>
-static void setList(ListT *L, ListItem *I1 = nullptr, ListItem *I2 = nullptr,
-                    ListItem *I3 = nullptr) {
-  L->clear();
-  if (I1)
-    L->push_back(I1);
-  if (I2)
-    L->push_back(I2);
-  if (I3)
-    L->push_back(I3);
-}
-
-template <typename ListT>
-static void checkList(ListT *L, ListItem *I1, ListItem *I2 = nullptr,
-                      ListItem *I3 = nullptr, ListItem *I4 = nullptr,
-                      ListItem *I5 = nullptr, ListItem *I6 = nullptr) {
-  if (I1) {
-    EXPECT_EQ(L->front(), I1);
-    L->pop_front();
-  }
-  if (I2) {
-    EXPECT_EQ(L->front(), I2);
-    L->pop_front();
-  }
-  if (I3) {
-    EXPECT_EQ(L->front(), I3);
-    L->pop_front();
-  }
-  if (I4) {
-    EXPECT_EQ(L->front(), I4);
-    L->pop_front();
-  }
-  if (I5) {
-    EXPECT_EQ(L->front(), I5);
-    L->pop_front();
-  }
-  if (I6) {
-    EXPECT_EQ(L->front(), I6);
-    L->pop_front();
-  }
-  EXPECT_TRUE(L->empty());
-}
-
-template <typename ListT> static void testListCommon(void) {
-  ListT L;
-  L.clear();
-
-  EXPECT_EQ(L.size(), 0U);
-  L.push_back(X);
-  EXPECT_EQ(L.size(), 1U);
-  EXPECT_EQ(L.back(), X);
-  EXPECT_EQ(L.front(), X);
-  L.pop_front();
-  EXPECT_TRUE(L.empty());
-  L.checkConsistency();
-
-  L.push_front(X);
-  EXPECT_EQ(L.size(), 1U);
-  EXPECT_EQ(L.back(), X);
-  EXPECT_EQ(L.front(), X);
-  L.pop_front();
-  EXPECT_TRUE(L.empty());
-  L.checkConsistency();
-
-  L.push_front(X);
-  L.push_front(Y);
-  L.push_front(Z);
-  EXPECT_EQ(L.size(), 3U);
-  EXPECT_EQ(L.front(), Z);
-  EXPECT_EQ(L.back(), X);
-  L.checkConsistency();
-
-  L.pop_front();
-  EXPECT_EQ(L.size(), 2U);
-  EXPECT_EQ(L.front(), Y);
-  EXPECT_EQ(L.back(), X);
-  L.pop_front();
-  L.pop_front();
-  EXPECT_TRUE(L.empty());
-  L.checkConsistency();
-
-  L.push_back(X);
-  L.push_back(Y);
-  L.push_back(Z);
-  EXPECT_EQ(L.size(), 3U);
-  EXPECT_EQ(L.front(), X);
-  EXPECT_EQ(L.back(), Z);
-  L.checkConsistency();
-
-  L.pop_front();
-  EXPECT_EQ(L.size(), 2U);
-  EXPECT_EQ(L.front(), Y);
-  EXPECT_EQ(L.back(), Z);
-  L.pop_front();
-  L.pop_front();
-  EXPECT_TRUE(L.empty());
-  L.checkConsistency();
-}
-
-TEST(ScudoListTest, LinkedListCommon) {
-  testListCommon<SLList>();
-  testListCommon<DLList>();
-}
-
-TEST(ScudoListTest, SinglyLinkedList) {
-  SLList L;
-  L.clear();
-
-  L.push_back(X);
-  L.push_back(Y);
-  L.push_back(Z);
-  L.extract(X, Y);
-  EXPECT_EQ(L.size(), 2U);
-  EXPECT_EQ(L.front(), X);
-  EXPECT_EQ(L.back(), Z);
-  L.checkConsistency();
-  L.extract(X, Z);
-  EXPECT_EQ(L.size(), 1U);
-  EXPECT_EQ(L.front(), X);
-  EXPECT_EQ(L.back(), X);
-  L.checkConsistency();
-  L.pop_front();
-  EXPECT_TRUE(L.empty());
-
-  SLList L1, L2;
-  L1.clear();
-  L2.clear();
-
-  L1.append_back(&L2);
-  EXPECT_TRUE(L1.empty());
-  EXPECT_TRUE(L2.empty());
-
-  setList(&L1, X);
-  checkList(&L1, X);
-
-  setList(&L1, X, Y);
-  L1.insert(X, Z);
-  checkList(&L1, X, Z, Y);
-
-  setList(&L1, X, Y, Z);
-  setList(&L2, A, B, C);
-  L1.append_back(&L2);
-  checkList(&L1, X, Y, Z, A, B, C);
-  EXPECT_TRUE(L2.empty());
-
-  L1.clear();
-  L2.clear();
-  L1.push_back(X);
-  L1.append_back(&L2);
-  EXPECT_EQ(L1.back(), X);
-  EXPECT_EQ(L1.front(), X);
-  EXPECT_EQ(L1.size(), 1U);
-}
-
-TEST(ScudoListTest, DoublyLinkedList) {
-  DLList L;
-  L.clear();
-
-  L.push_back(X);
-  L.push_back(Y);
-  L.push_back(Z);
-  L.remove(Y);
-  EXPECT_EQ(L.size(), 2U);
-  EXPECT_EQ(L.front(), X);
-  EXPECT_EQ(L.back(), Z);
-  L.checkConsistency();
-  L.remove(Z);
-  EXPECT_EQ(L.size(), 1U);
-  EXPECT_EQ(L.front(), X);
-  EXPECT_EQ(L.back(), X);
-  L.checkConsistency();
-  L.pop_front();
-  EXPECT_TRUE(L.empty());
-
-  L.push_back(X);
-  L.insert(Y, X);
-  EXPECT_EQ(L.size(), 2U);
-  EXPECT_EQ(L.front(), Y);
-  EXPECT_EQ(L.back(), X);
-  L.checkConsistency();
-  L.remove(Y);
-  EXPECT_EQ(L.size(), 1U);
-  EXPECT_EQ(L.front(), X);
-  EXPECT_EQ(L.back(), X);
-  L.checkConsistency();
-  L.pop_front();
-  EXPECT_TRUE(L.empty());
-}
diff --git a/Telegram/ThirdParty/scudo/tests/map_test.cpp b/Telegram/ThirdParty/scudo/tests/map_test.cpp
deleted file mode 100644
index 06a56f848..000000000
--- a/Telegram/ThirdParty/scudo/tests/map_test.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-//===-- map_test.cpp --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "common.h"
-#include "mem_map.h"
-
-#include <string.h>
-#include <unistd.h>
-
-static const char *MappingName = "scudo:test";
-
-TEST(ScudoMapTest, PageSize) {
-  EXPECT_EQ(scudo::getPageSizeCached(),
-            static_cast<scudo::uptr>(sysconf(_SC_PAGESIZE)));
-}
-
-TEST(ScudoMapDeathTest, MapNoAccessUnmap) {
-  const scudo::uptr Size = 4 * scudo::getPageSizeCached();
-  scudo::ReservedMemoryT ReservedMemory;
-
-  ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, MappingName));
-  EXPECT_NE(ReservedMemory.getBase(), 0U);
-  EXPECT_DEATH(
-      memset(reinterpret_cast<void *>(ReservedMemory.getBase()), 0xaa, Size),
-      "");
-
-  ReservedMemory.release();
-}
-
-TEST(ScudoMapDeathTest, MapUnmap) {
-  const scudo::uptr Size = 4 * scudo::getPageSizeCached();
-  EXPECT_DEATH(
-      {
-        // Repeat few time to avoid missing crash if it's mmaped by unrelated
-        // code.
-        for (int i = 0; i < 10; ++i) {
-          scudo::MemMapT MemMap;
-          MemMap.map(/*Addr=*/0U, Size, MappingName);
-          scudo::uptr P = MemMap.getBase();
-          if (P == 0U)
-            continue;
-          MemMap.unmap(MemMap.getBase(), Size);
-          memset(reinterpret_cast<void *>(P), 0xbb, Size);
-        }
-      },
-      "");
-}
-
-TEST(ScudoMapDeathTest, MapWithGuardUnmap) {
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-  const scudo::uptr Size = 4 * PageSize;
-  scudo::ReservedMemoryT ReservedMemory;
-  ASSERT_TRUE(
-      ReservedMemory.create(/*Addr=*/0U, Size + 2 * PageSize, MappingName));
-  ASSERT_NE(ReservedMemory.getBase(), 0U);
-
-  scudo::MemMapT MemMap =
-      ReservedMemory.dispatch(ReservedMemory.getBase(), Size + 2 * PageSize);
-  ASSERT_TRUE(MemMap.isAllocated());
-  scudo::uptr Q = MemMap.getBase() + PageSize;
-  ASSERT_TRUE(MemMap.remap(Q, Size, MappingName));
-  memset(reinterpret_cast<void *>(Q), 0xaa, Size);
-  EXPECT_DEATH(memset(reinterpret_cast<void *>(Q), 0xaa, Size + 1), "");
-  MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-}
-
-TEST(ScudoMapTest, MapGrowUnmap) {
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-  const scudo::uptr Size = 4 * PageSize;
-  scudo::ReservedMemoryT ReservedMemory;
-  ReservedMemory.create(/*Addr=*/0U, Size, MappingName);
-  ASSERT_TRUE(ReservedMemory.isCreated());
-
-  scudo::MemMapT MemMap =
-      ReservedMemory.dispatch(ReservedMemory.getBase(), Size);
-  ASSERT_TRUE(MemMap.isAllocated());
-  scudo::uptr Q = MemMap.getBase() + PageSize;
-  ASSERT_TRUE(MemMap.remap(Q, PageSize, MappingName));
-  memset(reinterpret_cast<void *>(Q), 0xaa, PageSize);
-  Q += PageSize;
-  ASSERT_TRUE(MemMap.remap(Q, PageSize, MappingName));
-  memset(reinterpret_cast<void *>(Q), 0xbb, PageSize);
-  MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-}
diff --git a/Telegram/ThirdParty/scudo/tests/memtag_test.cpp b/Telegram/ThirdParty/scudo/tests/memtag_test.cpp
deleted file mode 100644
index fd277f962..000000000
--- a/Telegram/ThirdParty/scudo/tests/memtag_test.cpp
+++ /dev/null
@@ -1,213 +0,0 @@
-//===-- memtag_test.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "common.h"
-#include "mem_map.h"
-#include "memtag.h"
-#include "platform.h"
-#include "tests/scudo_unit_test.h"
-
-#if SCUDO_LINUX
-namespace scudo {
-
-TEST(MemtagBasicDeathTest, Unsupported) {
-  if (archSupportsMemoryTagging())
-    GTEST_SKIP();
-
-  EXPECT_DEATH(archMemoryTagGranuleSize(), "not supported");
-  EXPECT_DEATH(untagPointer((uptr)0), "not supported");
-  EXPECT_DEATH(extractTag((uptr)0), "not supported");
-
-  EXPECT_DEATH(systemSupportsMemoryTagging(), "not supported");
-  EXPECT_DEATH(systemDetectsMemoryTagFaultsTestOnly(), "not supported");
-  EXPECT_DEATH(enableSystemMemoryTaggingTestOnly(), "not supported");
-
-  EXPECT_DEATH(selectRandomTag((uptr)0, 0), "not supported");
-  EXPECT_DEATH(addFixedTag((uptr)0, 1), "not supported");
-  EXPECT_DEATH(storeTags((uptr)0, (uptr)0 + sizeof(0)), "not supported");
-  EXPECT_DEATH(storeTag((uptr)0), "not supported");
-  EXPECT_DEATH(loadTag((uptr)0), "not supported");
-
-  EXPECT_DEATH(setRandomTag(nullptr, 64, 0, nullptr, nullptr), "not supported");
-  EXPECT_DEATH(untagPointer(nullptr), "not supported");
-  EXPECT_DEATH(loadTag(nullptr), "not supported");
-  EXPECT_DEATH(addFixedTag(nullptr, 0), "not supported");
-}
-
-class MemtagTest : public Test {
-protected:
-  void SetUp() override {
-    if (!archSupportsMemoryTagging() || !systemDetectsMemoryTagFaultsTestOnly())
-      GTEST_SKIP() << "Memory tagging is not supported";
-
-    BufferSize = getPageSizeCached();
-    ASSERT_FALSE(MemMap.isAllocated());
-    ASSERT_TRUE(MemMap.map(/*Addr=*/0U, BufferSize, "MemtagTest", MAP_MEMTAG));
-    ASSERT_NE(MemMap.getBase(), 0U);
-    Addr = MemMap.getBase();
-    Buffer = reinterpret_cast<u8 *>(Addr);
-    EXPECT_TRUE(isAligned(Addr, archMemoryTagGranuleSize()));
-    EXPECT_EQ(Addr, untagPointer(Addr));
-  }
-
-  void TearDown() override {
-    if (Buffer) {
-      ASSERT_TRUE(MemMap.isAllocated());
-      MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-    }
-  }
-
-  uptr BufferSize = 0;
-  scudo::MemMapT MemMap = {};
-  u8 *Buffer = nullptr;
-  uptr Addr = 0;
-};
-
-using MemtagDeathTest = MemtagTest;
-
-TEST_F(MemtagTest, ArchMemoryTagGranuleSize) {
-  EXPECT_GT(archMemoryTagGranuleSize(), 1u);
-  EXPECT_TRUE(isPowerOfTwo(archMemoryTagGranuleSize()));
-}
-
-TEST_F(MemtagTest, ExtractTag) {
-// The test is already skipped on anything other than 64 bit. But
-// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
-#if defined(__LP64__)
-  uptr Tags = 0;
-  // Try all value for the top byte and check the tags values are in the
-  // expected range.
-  for (u64 Top = 0; Top < 0x100; ++Top)
-    Tags = Tags | (1u << extractTag(Addr | (Top << 56)));
-  EXPECT_EQ(0xffffull, Tags);
-#endif
-}
-
-TEST_F(MemtagDeathTest, AddFixedTag) {
-  for (uptr Tag = 0; Tag < 0x10; ++Tag)
-    EXPECT_EQ(Tag, extractTag(addFixedTag(Addr, Tag)));
-  if (SCUDO_DEBUG) {
-    EXPECT_DEATH(addFixedTag(Addr, 16), "");
-    EXPECT_DEATH(addFixedTag(~Addr, 0), "");
-  }
-}
-
-TEST_F(MemtagTest, UntagPointer) {
-  uptr UnTagMask = untagPointer(~uptr(0));
-  for (u64 Top = 0; Top < 0x100; ++Top) {
-    uptr Ptr = (Addr | (Top << 56)) & UnTagMask;
-    EXPECT_EQ(addFixedTag(Ptr, 0), untagPointer(Ptr));
-  }
-}
-
-TEST_F(MemtagDeathTest, ScopedDisableMemoryTagChecks) {
-  u8 *P = reinterpret_cast<u8 *>(addFixedTag(Addr, 1));
-  EXPECT_NE(P, Buffer);
-
-  EXPECT_DEATH(*P = 20, "");
-  ScopedDisableMemoryTagChecks Disable;
-  *P = 10;
-}
-
-TEST_F(MemtagTest, SelectRandomTag) {
-  for (uptr SrcTag = 0; SrcTag < 0x10; ++SrcTag) {
-    uptr Ptr = addFixedTag(Addr, SrcTag);
-    uptr Tags = 0;
-    for (uptr I = 0; I < 100000; ++I)
-      Tags = Tags | (1u << extractTag(selectRandomTag(Ptr, 0)));
-    // std::popcnt is C++20
-    int PopCnt = 0;
-    while (Tags) {
-      PopCnt += Tags & 1;
-      Tags >>= 1;
-    }
-    // Random tags are not always very random, and this test is not about PRNG
-    // quality.  Anything above half would be satisfactory.
-    EXPECT_GE(PopCnt, 8);
-  }
-}
-
-TEST_F(MemtagTest, SelectRandomTagWithMask) {
-// The test is already skipped on anything other than 64 bit. But
-// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
-#if defined(__LP64__)
-  for (uptr j = 0; j < 32; ++j) {
-    for (uptr i = 0; i < 1000; ++i)
-      EXPECT_NE(j, extractTag(selectRandomTag(Addr, 1ull << j)));
-  }
-#endif
-}
-
-TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(LoadStoreTagUnaligned)) {
-  for (uptr P = Addr; P < Addr + 4 * archMemoryTagGranuleSize(); ++P) {
-    if (P % archMemoryTagGranuleSize() == 0)
-      continue;
-    EXPECT_DEATH(loadTag(P), "");
-    EXPECT_DEATH(storeTag(P), "");
-  }
-}
-
-TEST_F(MemtagTest, LoadStoreTag) {
-  uptr Base = Addr + 0x100;
-  uptr Tagged = addFixedTag(Base, 7);
-  storeTag(Tagged);
-
-  EXPECT_EQ(Base - archMemoryTagGranuleSize(),
-            loadTag(Base - archMemoryTagGranuleSize()));
-  EXPECT_EQ(Tagged, loadTag(Base));
-  EXPECT_EQ(Base + archMemoryTagGranuleSize(),
-            loadTag(Base + archMemoryTagGranuleSize()));
-}
-
-TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(StoreTagsUnaligned)) {
-  for (uptr P = Addr; P < Addr + 4 * archMemoryTagGranuleSize(); ++P) {
-    uptr Tagged = addFixedTag(P, 5);
-    if (Tagged % archMemoryTagGranuleSize() == 0)
-      continue;
-    EXPECT_DEATH(storeTags(Tagged, Tagged), "");
-  }
-}
-
-TEST_F(MemtagTest, StoreTags) {
-// The test is already skipped on anything other than 64 bit. But
-// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
-#if defined(__LP64__)
-  const uptr MaxTaggedSize = 4 * archMemoryTagGranuleSize();
-  for (uptr Size = 0; Size <= MaxTaggedSize; ++Size) {
-    uptr NoTagBegin = Addr + archMemoryTagGranuleSize();
-    uptr NoTagEnd = NoTagBegin + Size;
-
-    u8 Tag = 5;
-
-    uptr TaggedBegin = addFixedTag(NoTagBegin, Tag);
-    uptr TaggedEnd = addFixedTag(NoTagEnd, Tag);
-
-    EXPECT_EQ(roundUp(TaggedEnd, archMemoryTagGranuleSize()),
-              storeTags(TaggedBegin, TaggedEnd));
-
-    uptr LoadPtr = Addr;
-    // Untagged left granule.
-    EXPECT_EQ(LoadPtr, loadTag(LoadPtr));
-
-    for (LoadPtr += archMemoryTagGranuleSize(); LoadPtr < NoTagEnd;
-         LoadPtr += archMemoryTagGranuleSize()) {
-      EXPECT_EQ(addFixedTag(LoadPtr, 5), loadTag(LoadPtr));
-    }
-
-    // Untagged right granule.
-    EXPECT_EQ(LoadPtr, loadTag(LoadPtr));
-
-    // Reset tags without using StoreTags.
-    MemMap.releasePagesToOS(Addr, BufferSize);
-  }
-#endif
-}
-
-} // namespace scudo
-
-#endif
diff --git a/Telegram/ThirdParty/scudo/tests/mutex_test.cpp b/Telegram/ThirdParty/scudo/tests/mutex_test.cpp
deleted file mode 100644
index c3efeab82..000000000
--- a/Telegram/ThirdParty/scudo/tests/mutex_test.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-//===-- mutex_test.cpp ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "mutex.h"
-
-#include <pthread.h>
-#include <string.h>
-
-class TestData {
-public:
-  explicit TestData(scudo::HybridMutex &M) : Mutex(M) {
-    for (scudo::u32 I = 0; I < Size; I++)
-      Data[I] = 0;
-  }
-
-  void write() {
-    scudo::ScopedLock L(Mutex);
-    T V0 = Data[0];
-    for (scudo::u32 I = 0; I < Size; I++) {
-      EXPECT_EQ(Data[I], V0);
-      Data[I]++;
-    }
-  }
-
-  void tryWrite() {
-    if (!Mutex.tryLock())
-      return;
-    T V0 = Data[0];
-    for (scudo::u32 I = 0; I < Size; I++) {
-      EXPECT_EQ(Data[I], V0);
-      Data[I]++;
-    }
-    Mutex.unlock();
-  }
-
-  void backoff() {
-    volatile T LocalData[Size] = {};
-    for (scudo::u32 I = 0; I < Size; I++) {
-      LocalData[I] = LocalData[I] + 1;
-      EXPECT_EQ(LocalData[I], 1U);
-    }
-  }
-
-private:
-  static const scudo::u32 Size = 64U;
-  typedef scudo::u64 T;
-  scudo::HybridMutex &Mutex;
-  alignas(SCUDO_CACHE_LINE_SIZE) T Data[Size];
-};
-
-const scudo::u32 NumberOfThreads = 8;
-#if SCUDO_DEBUG
-const scudo::u32 NumberOfIterations = 4 * 1024;
-#else
-const scudo::u32 NumberOfIterations = 16 * 1024;
-#endif
-
-static void *lockThread(void *Param) {
-  TestData *Data = reinterpret_cast<TestData *>(Param);
-  for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
-    Data->write();
-    Data->backoff();
-  }
-  return 0;
-}
-
-static void *tryThread(void *Param) {
-  TestData *Data = reinterpret_cast<TestData *>(Param);
-  for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
-    Data->tryWrite();
-    Data->backoff();
-  }
-  return 0;
-}
-
-TEST(ScudoMutexTest, Mutex) {
-  scudo::HybridMutex M;
-  TestData Data(M);
-  pthread_t Threads[NumberOfThreads];
-  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_create(&Threads[I], 0, lockThread, &Data);
-  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_join(Threads[I], 0);
-}
-
-TEST(ScudoMutexTest, MutexTry) {
-  scudo::HybridMutex M;
-  TestData Data(M);
-  pthread_t Threads[NumberOfThreads];
-  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_create(&Threads[I], 0, tryThread, &Data);
-  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_join(Threads[I], 0);
-}
-
-TEST(ScudoMutexTest, MutexAssertHeld) {
-  scudo::HybridMutex M;
-  M.lock();
-  M.assertHeld();
-  M.unlock();
-}
diff --git a/Telegram/ThirdParty/scudo/tests/primary_test.cpp b/Telegram/ThirdParty/scudo/tests/primary_test.cpp
deleted file mode 100644
index 181715117..000000000
--- a/Telegram/ThirdParty/scudo/tests/primary_test.cpp
+++ /dev/null
@@ -1,441 +0,0 @@
-//===-- primary_test.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "allocator_config.h"
-#include "condition_variable.h"
-#include "primary32.h"
-#include "primary64.h"
-#include "size_class_map.h"
-
-#include <algorithm>
-#include <chrono>
-#include <condition_variable>
-#include <mutex>
-#include <random>
-#include <stdlib.h>
-#include <thread>
-#include <vector>
-
-// Note that with small enough regions, the SizeClassAllocator64 also works on
-// 32-bit architectures. It's not something we want to encourage, but we still
-// should ensure the tests pass.
-
-template <typename SizeClassMapT> struct TestConfig1 {
-  static const bool MaySupportMemoryTagging = false;
-
-  struct Primary {
-    using SizeClassMap = SizeClassMapT;
-    static const scudo::uptr RegionSizeLog = 18U;
-    static const scudo::uptr GroupSizeLog = 18U;
-    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    typedef scudo::uptr CompactPtrT;
-    static const scudo::uptr CompactPtrScale = 0;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-  };
-};
-
-template <typename SizeClassMapT> struct TestConfig2 {
-  static const bool MaySupportMemoryTagging = false;
-
-  struct Primary {
-    using SizeClassMap = SizeClassMapT;
-#if defined(__mips__)
-    // Unable to allocate greater size on QEMU-user.
-    static const scudo::uptr RegionSizeLog = 23U;
-#else
-    static const scudo::uptr RegionSizeLog = 24U;
-#endif
-    static const scudo::uptr GroupSizeLog = 20U;
-    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    typedef scudo::uptr CompactPtrT;
-    static const scudo::uptr CompactPtrScale = 0;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-  };
-};
-
-template <typename SizeClassMapT> struct TestConfig3 {
-  static const bool MaySupportMemoryTagging = true;
-
-  struct Primary {
-    using SizeClassMap = SizeClassMapT;
-#if defined(__mips__)
-    // Unable to allocate greater size on QEMU-user.
-    static const scudo::uptr RegionSizeLog = 23U;
-#else
-    static const scudo::uptr RegionSizeLog = 24U;
-#endif
-    static const scudo::uptr GroupSizeLog = 20U;
-    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    typedef scudo::uptr CompactPtrT;
-    static const scudo::uptr CompactPtrScale = 0;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-  };
-};
-
-template <typename SizeClassMapT> struct TestConfig4 {
-  static const bool MaySupportMemoryTagging = true;
-
-  struct Primary {
-    using SizeClassMap = SizeClassMapT;
-#if defined(__mips__)
-    // Unable to allocate greater size on QEMU-user.
-    static const scudo::uptr RegionSizeLog = 23U;
-#else
-    static const scudo::uptr RegionSizeLog = 24U;
-#endif
-    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    static const scudo::uptr CompactPtrScale = 3U;
-    static const scudo::uptr GroupSizeLog = 20U;
-    typedef scudo::u32 CompactPtrT;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-  };
-};
-
-// This is the only test config that enables the condition variable.
-template <typename SizeClassMapT> struct TestConfig5 {
-  static const bool MaySupportMemoryTagging = true;
-
-  struct Primary {
-    using SizeClassMap = SizeClassMapT;
-#if defined(__mips__)
-    // Unable to allocate greater size on QEMU-user.
-    static const scudo::uptr RegionSizeLog = 23U;
-#else
-    static const scudo::uptr RegionSizeLog = 24U;
-#endif
-    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-    static const scudo::uptr GroupSizeLog = 18U;
-    typedef scudo::u32 CompactPtrT;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-    static const bool UseConditionVariable = true;
-#if SCUDO_LINUX
-    using ConditionVariableT = scudo::ConditionVariableLinux;
-#else
-    using ConditionVariableT = scudo::ConditionVariableDummy;
-#endif
-  };
-};
-
-template <template <typename> class BaseConfig, typename SizeClassMapT>
-struct Config : public BaseConfig<SizeClassMapT> {};
-
-template <template <typename> class BaseConfig, typename SizeClassMapT>
-struct SizeClassAllocator
-    : public scudo::SizeClassAllocator64<Config<BaseConfig, SizeClassMapT>> {};
-template <typename SizeClassMapT>
-struct SizeClassAllocator<TestConfig1, SizeClassMapT>
-    : public scudo::SizeClassAllocator32<Config<TestConfig1, SizeClassMapT>> {};
-
-template <template <typename> class BaseConfig, typename SizeClassMapT>
-struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
-  ~TestAllocator() {
-    this->verifyAllBlocksAreReleasedTestOnly();
-    this->unmapTestOnly();
-  }
-
-  void *operator new(size_t size) {
-    void *p = nullptr;
-    EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
-    return p;
-  }
-
-  void operator delete(void *ptr) { free(ptr); }
-};
-
-template <template <typename> class BaseConfig>
-struct ScudoPrimaryTest : public Test {};
-
-#if SCUDO_FUCHSIA
-#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2)                            \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)
-#else
-#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1)                            \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2)                            \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)                            \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4)                            \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig5)
-#endif
-
-#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
-  using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<TYPE>;                          \
-  TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
-
-#define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
-  template <template <typename> class TypeParam>                               \
-  struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
-    void Run();                                                                \
-  };                                                                           \
-  SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
-  template <template <typename> class TypeParam>                               \
-  void FIXTURE##NAME<TypeParam>::Run()
-
-SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
-  using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
-  std::unique_ptr<Primary> Allocator(new Primary);
-  Allocator->init(/*ReleaseToOsInterval=*/-1);
-  typename Primary::CacheT Cache;
-  Cache.init(nullptr, Allocator.get());
-  const scudo::uptr NumberOfAllocations = 32U;
-  for (scudo::uptr I = 0; I <= 16U; I++) {
-    const scudo::uptr Size = 1UL << I;
-    if (!Primary::canAllocate(Size))
-      continue;
-    const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
-    void *Pointers[NumberOfAllocations];
-    for (scudo::uptr J = 0; J < NumberOfAllocations; J++) {
-      void *P = Cache.allocate(ClassId);
-      memset(P, 'B', Size);
-      Pointers[J] = P;
-    }
-    for (scudo::uptr J = 0; J < NumberOfAllocations; J++)
-      Cache.deallocate(ClassId, Pointers[J]);
-  }
-  Cache.destroy(nullptr);
-  Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-  scudo::ScopedString Str;
-  Allocator->getStats(&Str);
-  Str.output();
-}
-
-struct SmallRegionsConfig {
-  static const bool MaySupportMemoryTagging = false;
-
-  struct Primary {
-    using SizeClassMap = scudo::DefaultSizeClassMap;
-    static const scudo::uptr RegionSizeLog = 21U;
-    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    typedef scudo::uptr CompactPtrT;
-    static const scudo::uptr CompactPtrScale = 0;
-    static const bool EnableRandomOffset = true;
-    static const scudo::uptr MapSizeIncrement = 1UL << 18;
-    static const scudo::uptr GroupSizeLog = 20U;
-  };
-};
-
-// The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
-// For the 32-bit one, it requires actually exhausting memory, so we skip it.
-TEST(ScudoPrimaryTest, Primary64OOM) {
-  using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
-  using TransferBatch = Primary::TransferBatchT;
-  Primary Allocator;
-  Allocator.init(/*ReleaseToOsInterval=*/-1);
-  typename Primary::CacheT Cache;
-  scudo::GlobalStats Stats;
-  Stats.init();
-  Cache.init(&Stats, &Allocator);
-  bool AllocationFailed = false;
-  std::vector<TransferBatch *> Batches;
-  const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
-  const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
-  typename Primary::CacheT::CompactPtrT Blocks[TransferBatch::MaxNumCached];
-
-  for (scudo::uptr I = 0; I < 10000U; I++) {
-    TransferBatch *B = Allocator.popBatch(&Cache, ClassId);
-    if (!B) {
-      AllocationFailed = true;
-      break;
-    }
-    for (scudo::u16 J = 0; J < B->getCount(); J++)
-      memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size);
-    Batches.push_back(B);
-  }
-  while (!Batches.empty()) {
-    TransferBatch *B = Batches.back();
-    Batches.pop_back();
-    const scudo::u16 Count = B->getCount();
-    B->moveToArray(Blocks);
-    Allocator.pushBlocks(&Cache, ClassId, Blocks, Count);
-    Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
-  }
-  Cache.destroy(nullptr);
-  Allocator.releaseToOS(scudo::ReleaseToOS::Force);
-  scudo::ScopedString Str;
-  Allocator.getStats(&Str);
-  Str.output();
-  EXPECT_EQ(AllocationFailed, true);
-  Allocator.unmapTestOnly();
-}
-
-SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
-  using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
-  std::unique_ptr<Primary> Allocator(new Primary);
-  Allocator->init(/*ReleaseToOsInterval=*/-1);
-  typename Primary::CacheT Cache;
-  Cache.init(nullptr, Allocator.get());
-  std::vector<std::pair<scudo::uptr, void *>> V;
-  for (scudo::uptr I = 0; I < 64U; I++) {
-    const scudo::uptr Size =
-        static_cast<scudo::uptr>(std::rand()) % Primary::SizeClassMap::MaxSize;
-    const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
-    void *P = Cache.allocate(ClassId);
-    V.push_back(std::make_pair(ClassId, P));
-  }
-  scudo::uptr Found = 0;
-  auto Lambda = [&V, &Found](scudo::uptr Block) {
-    for (const auto &Pair : V) {
-      if (Pair.second == reinterpret_cast<void *>(Block))
-        Found++;
-    }
-  };
-  Allocator->disable();
-  Allocator->iterateOverBlocks(Lambda);
-  Allocator->enable();
-  EXPECT_EQ(Found, V.size());
-  while (!V.empty()) {
-    auto Pair = V.back();
-    Cache.deallocate(Pair.first, Pair.second);
-    V.pop_back();
-  }
-  Cache.destroy(nullptr);
-  Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-  scudo::ScopedString Str;
-  Allocator->getStats(&Str);
-  Str.output();
-}
-
-SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
-  using Primary = TestAllocator<TypeParam, scudo::Config::Primary::SizeClassMap>;
-  std::unique_ptr<Primary> Allocator(new Primary);
-  Allocator->init(/*ReleaseToOsInterval=*/-1);
-  std::mutex Mutex;
-  std::condition_variable Cv;
-  bool Ready = false;
-  std::thread Threads[32];
-  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) {
-    Threads[I] = std::thread([&]() {
-      static thread_local typename Primary::CacheT Cache;
-      Cache.init(nullptr, Allocator.get());
-      std::vector<std::pair<scudo::uptr, void *>> V;
-      {
-        std::unique_lock<std::mutex> Lock(Mutex);
-        while (!Ready)
-          Cv.wait(Lock);
-      }
-      for (scudo::uptr I = 0; I < 256U; I++) {
-        const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) %
-                                 Primary::SizeClassMap::MaxSize / 4;
-        const scudo::uptr ClassId =
-            Primary::SizeClassMap::getClassIdBySize(Size);
-        void *P = Cache.allocate(ClassId);
-        if (P)
-          V.push_back(std::make_pair(ClassId, P));
-      }
-
-      // Try to interleave pushBlocks(), popBatch() and releaseToOS().
-      Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-
-      while (!V.empty()) {
-        auto Pair = V.back();
-        Cache.deallocate(Pair.first, Pair.second);
-        V.pop_back();
-        // This increases the chance of having non-full TransferBatches and it
-        // will jump into the code path of merging TransferBatches.
-        if (std::rand() % 8 == 0)
-          Cache.drain();
-      }
-      Cache.destroy(nullptr);
-    });
-  }
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Ready = true;
-    Cv.notify_all();
-  }
-  for (auto &T : Threads)
-    T.join();
-  Allocator->releaseToOS(scudo::ReleaseToOS::Force);
-  scudo::ScopedString Str;
-  Allocator->getStats(&Str);
-  Allocator->getFragmentationInfo(&Str);
-  Str.output();
-}
-
-// Through a simple allocation that spans two pages, verify that releaseToOS
-// actually releases some bytes (at least one page worth). This is a regression
-// test for an error in how the release criteria were computed.
-SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) {
-  using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
-  std::unique_ptr<Primary> Allocator(new Primary);
-  Allocator->init(/*ReleaseToOsInterval=*/-1);
-  typename Primary::CacheT Cache;
-  Cache.init(nullptr, Allocator.get());
-  const scudo::uptr Size = scudo::getPageSizeCached() * 2;
-  EXPECT_TRUE(Primary::canAllocate(Size));
-  const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
-  void *P = Cache.allocate(ClassId);
-  EXPECT_NE(P, nullptr);
-  Cache.deallocate(ClassId, P);
-  Cache.destroy(nullptr);
-  EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll), 0U);
-}
-
-SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
-  using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
-  std::unique_ptr<Primary> Allocator(new Primary);
-  Allocator->init(/*ReleaseToOsInterval=*/-1);
-  typename Primary::CacheT Cache;
-  Cache.init(nullptr, Allocator.get());
-  const scudo::uptr Size = 32U;
-  const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
-
-  // We will allocate 4 times the group size memory and release all of them. We
-  // expect the free blocks will be classified with groups. Then we will
-  // allocate the same amount of memory as group size and expect the blocks will
-  // have the max address difference smaller or equal to 2 times the group size.
-  // Note that it isn't necessary to be in the range of single group size
-  // because the way we get the group id is doing compact pointer shifting.
-  // According to configuration, the compact pointer may not align to group
-  // size. As a result, the blocks can cross two groups at most.
-  const scudo::uptr GroupSizeMem = (1ULL << Primary::GroupSizeLog);
-  const scudo::uptr PeakAllocationMem = 4 * GroupSizeMem;
-  const scudo::uptr PeakNumberOfAllocations = PeakAllocationMem / Size;
-  const scudo::uptr FinalNumberOfAllocations = GroupSizeMem / Size;
-  std::vector<scudo::uptr> Blocks;
-  std::mt19937 R;
-
-  for (scudo::uptr I = 0; I < PeakNumberOfAllocations; ++I)
-    Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
-
-  std::shuffle(Blocks.begin(), Blocks.end(), R);
-
-  // Release all the allocated blocks, including those held by local cache.
-  while (!Blocks.empty()) {
-    Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
-    Blocks.pop_back();
-  }
-  Cache.drain();
-
-  for (scudo::uptr I = 0; I < FinalNumberOfAllocations; ++I)
-    Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
-
-  EXPECT_LE(*std::max_element(Blocks.begin(), Blocks.end()) -
-                *std::min_element(Blocks.begin(), Blocks.end()),
-            GroupSizeMem * 2);
-
-  while (!Blocks.empty()) {
-    Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
-    Blocks.pop_back();
-  }
-  Cache.drain();
-}
diff --git a/Telegram/ThirdParty/scudo/tests/quarantine_test.cpp b/Telegram/ThirdParty/scudo/tests/quarantine_test.cpp
deleted file mode 100644
index 972c98d51..000000000
--- a/Telegram/ThirdParty/scudo/tests/quarantine_test.cpp
+++ /dev/null
@@ -1,255 +0,0 @@
-//===-- quarantine_test.cpp -------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "quarantine.h"
-
-#include <pthread.h>
-#include <stdlib.h>
-
-static void *FakePtr = reinterpret_cast<void *>(0xFA83FA83);
-static const scudo::uptr BlockSize = 8UL;
-static const scudo::uptr LargeBlockSize = 16384UL;
-
-struct QuarantineCallback {
-  void recycle(void *P) { EXPECT_EQ(P, FakePtr); }
-  void *allocate(scudo::uptr Size) { return malloc(Size); }
-  void deallocate(void *P) { free(P); }
-};
-
-typedef scudo::GlobalQuarantine<QuarantineCallback, void> QuarantineT;
-typedef typename QuarantineT::CacheT CacheT;
-
-static QuarantineCallback Cb;
-
-static void deallocateCache(CacheT *Cache) {
-  while (scudo::QuarantineBatch *Batch = Cache->dequeueBatch())
-    Cb.deallocate(Batch);
-}
-
-TEST(ScudoQuarantineTest, QuarantineBatchMerge) {
-  // Verify the trivial case.
-  scudo::QuarantineBatch Into;
-  Into.init(FakePtr, 4UL);
-  scudo::QuarantineBatch From;
-  From.init(FakePtr, 8UL);
-
-  Into.merge(&From);
-
-  EXPECT_EQ(Into.Count, 2UL);
-  EXPECT_EQ(Into.Batch[0], FakePtr);
-  EXPECT_EQ(Into.Batch[1], FakePtr);
-  EXPECT_EQ(Into.Size, 12UL + sizeof(scudo::QuarantineBatch));
-  EXPECT_EQ(Into.getQuarantinedSize(), 12UL);
-
-  EXPECT_EQ(From.Count, 0UL);
-  EXPECT_EQ(From.Size, sizeof(scudo::QuarantineBatch));
-  EXPECT_EQ(From.getQuarantinedSize(), 0UL);
-
-  // Merge the batch to the limit.
-  for (scudo::uptr I = 2; I < scudo::QuarantineBatch::MaxCount; ++I)
-    From.push_back(FakePtr, 8UL);
-  EXPECT_TRUE(Into.Count + From.Count == scudo::QuarantineBatch::MaxCount);
-  EXPECT_TRUE(Into.canMerge(&From));
-
-  Into.merge(&From);
-  EXPECT_TRUE(Into.Count == scudo::QuarantineBatch::MaxCount);
-
-  // No more space, not even for one element.
-  From.init(FakePtr, 8UL);
-
-  EXPECT_FALSE(Into.canMerge(&From));
-}
-
-TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesEmpty) {
-  CacheT Cache;
-  CacheT ToDeallocate;
-  Cache.init();
-  ToDeallocate.init();
-  Cache.mergeBatches(&ToDeallocate);
-
-  EXPECT_EQ(ToDeallocate.getSize(), 0UL);
-  EXPECT_EQ(ToDeallocate.dequeueBatch(), nullptr);
-}
-
-TEST(SanitizerCommon, QuarantineCacheMergeBatchesOneBatch) {
-  CacheT Cache;
-  Cache.init();
-  Cache.enqueue(Cb, FakePtr, BlockSize);
-  EXPECT_EQ(BlockSize + sizeof(scudo::QuarantineBatch), Cache.getSize());
-
-  CacheT ToDeallocate;
-  ToDeallocate.init();
-  Cache.mergeBatches(&ToDeallocate);
-
-  // Nothing to merge, nothing to deallocate.
-  EXPECT_EQ(BlockSize + sizeof(scudo::QuarantineBatch), Cache.getSize());
-
-  EXPECT_EQ(ToDeallocate.getSize(), 0UL);
-  EXPECT_EQ(ToDeallocate.dequeueBatch(), nullptr);
-
-  deallocateCache(&Cache);
-}
-
-TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesSmallBatches) {
-  // Make a Cache with two batches small enough to merge.
-  CacheT From;
-  From.init();
-  From.enqueue(Cb, FakePtr, BlockSize);
-  CacheT Cache;
-  Cache.init();
-  Cache.enqueue(Cb, FakePtr, BlockSize);
-
-  Cache.transfer(&From);
-  EXPECT_EQ(BlockSize * 2 + sizeof(scudo::QuarantineBatch) * 2,
-            Cache.getSize());
-
-  CacheT ToDeallocate;
-  ToDeallocate.init();
-  Cache.mergeBatches(&ToDeallocate);
-
-  // Batches merged, one batch to deallocate.
-  EXPECT_EQ(BlockSize * 2 + sizeof(scudo::QuarantineBatch), Cache.getSize());
-  EXPECT_EQ(ToDeallocate.getSize(), sizeof(scudo::QuarantineBatch));
-
-  deallocateCache(&Cache);
-  deallocateCache(&ToDeallocate);
-}
-
-TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesTooBigToMerge) {
-  const scudo::uptr NumBlocks = scudo::QuarantineBatch::MaxCount - 1;
-
-  // Make a Cache with two batches small enough to merge.
-  CacheT From;
-  CacheT Cache;
-  From.init();
-  Cache.init();
-  for (scudo::uptr I = 0; I < NumBlocks; ++I) {
-    From.enqueue(Cb, FakePtr, BlockSize);
-    Cache.enqueue(Cb, FakePtr, BlockSize);
-  }
-  Cache.transfer(&From);
-  EXPECT_EQ(BlockSize * NumBlocks * 2 + sizeof(scudo::QuarantineBatch) * 2,
-            Cache.getSize());
-
-  CacheT ToDeallocate;
-  ToDeallocate.init();
-  Cache.mergeBatches(&ToDeallocate);
-
-  // Batches cannot be merged.
-  EXPECT_EQ(BlockSize * NumBlocks * 2 + sizeof(scudo::QuarantineBatch) * 2,
-            Cache.getSize());
-  EXPECT_EQ(ToDeallocate.getSize(), 0UL);
-
-  deallocateCache(&Cache);
-}
-
-TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesALotOfBatches) {
-  const scudo::uptr NumBatchesAfterMerge = 3;
-  const scudo::uptr NumBlocks =
-      scudo::QuarantineBatch::MaxCount * NumBatchesAfterMerge;
-  const scudo::uptr NumBatchesBeforeMerge = NumBlocks;
-
-  // Make a Cache with many small batches.
-  CacheT Cache;
-  Cache.init();
-  for (scudo::uptr I = 0; I < NumBlocks; ++I) {
-    CacheT From;
-    From.init();
-    From.enqueue(Cb, FakePtr, BlockSize);
-    Cache.transfer(&From);
-  }
-
-  EXPECT_EQ(BlockSize * NumBlocks +
-                sizeof(scudo::QuarantineBatch) * NumBatchesBeforeMerge,
-            Cache.getSize());
-
-  CacheT ToDeallocate;
-  ToDeallocate.init();
-  Cache.mergeBatches(&ToDeallocate);
-
-  // All blocks should fit Into 3 batches.
-  EXPECT_EQ(BlockSize * NumBlocks +
-                sizeof(scudo::QuarantineBatch) * NumBatchesAfterMerge,
-            Cache.getSize());
-
-  EXPECT_EQ(ToDeallocate.getSize(),
-            sizeof(scudo::QuarantineBatch) *
-                (NumBatchesBeforeMerge - NumBatchesAfterMerge));
-
-  deallocateCache(&Cache);
-  deallocateCache(&ToDeallocate);
-}
-
-static const scudo::uptr MaxQuarantineSize = 1024UL << 10; // 1MB
-static const scudo::uptr MaxCacheSize = 256UL << 10;       // 256KB
-
-TEST(ScudoQuarantineTest, GlobalQuarantine) {
-  QuarantineT Quarantine;
-  CacheT Cache;
-  Cache.init();
-  Quarantine.init(MaxQuarantineSize, MaxCacheSize);
-  EXPECT_EQ(Quarantine.getMaxSize(), MaxQuarantineSize);
-  EXPECT_EQ(Quarantine.getCacheSize(), MaxCacheSize);
-
-  bool DrainOccurred = false;
-  scudo::uptr CacheSize = Cache.getSize();
-  EXPECT_EQ(Cache.getSize(), 0UL);
-  // We quarantine enough blocks that a drain has to occur. Verify this by
-  // looking for a decrease of the size of the cache.
-  for (scudo::uptr I = 0; I < 128UL; I++) {
-    Quarantine.put(&Cache, Cb, FakePtr, LargeBlockSize);
-    if (!DrainOccurred && Cache.getSize() < CacheSize)
-      DrainOccurred = true;
-    CacheSize = Cache.getSize();
-  }
-  EXPECT_TRUE(DrainOccurred);
-
-  Quarantine.drainAndRecycle(&Cache, Cb);
-  EXPECT_EQ(Cache.getSize(), 0UL);
-
-  scudo::ScopedString Str;
-  Quarantine.getStats(&Str);
-  Str.output();
-}
-
-struct PopulateQuarantineThread {
-  pthread_t Thread;
-  QuarantineT *Quarantine;
-  CacheT Cache;
-};
-
-void *populateQuarantine(void *Param) {
-  PopulateQuarantineThread *P = static_cast<PopulateQuarantineThread *>(Param);
-  P->Cache.init();
-  for (scudo::uptr I = 0; I < 128UL; I++)
-    P->Quarantine->put(&P->Cache, Cb, FakePtr, LargeBlockSize);
-  return 0;
-}
-
-TEST(ScudoQuarantineTest, ThreadedGlobalQuarantine) {
-  QuarantineT Quarantine;
-  Quarantine.init(MaxQuarantineSize, MaxCacheSize);
-
-  const scudo::uptr NumberOfThreads = 32U;
-  PopulateQuarantineThread T[NumberOfThreads];
-  for (scudo::uptr I = 0; I < NumberOfThreads; I++) {
-    T[I].Quarantine = &Quarantine;
-    pthread_create(&T[I].Thread, 0, populateQuarantine, &T[I]);
-  }
-  for (scudo::uptr I = 0; I < NumberOfThreads; I++)
-    pthread_join(T[I].Thread, 0);
-
-  scudo::ScopedString Str;
-  Quarantine.getStats(&Str);
-  Str.output();
-
-  for (scudo::uptr I = 0; I < NumberOfThreads; I++)
-    Quarantine.drainAndRecycle(&T[I].Cache, Cb);
-}
diff --git a/Telegram/ThirdParty/scudo/tests/release_test.cpp b/Telegram/ThirdParty/scudo/tests/release_test.cpp
deleted file mode 100644
index 14b398a91..000000000
--- a/Telegram/ThirdParty/scudo/tests/release_test.cpp
+++ /dev/null
@@ -1,654 +0,0 @@
-//===-- release_test.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "list.h"
-#include "release.h"
-#include "size_class_map.h"
-
-#include <string.h>
-
-#include <algorithm>
-#include <random>
-#include <set>
-
-TEST(ScudoReleaseTest, RegionPageMap) {
-  for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
-    // Various valid counter's max values packed into one word.
-    scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I);
-    ASSERT_TRUE(PageMap2N.isAllocated());
-    EXPECT_EQ(1U, PageMap2N.getBufferNumElements());
-    // Check the "all bit set" values too.
-    scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I);
-    ASSERT_TRUE(PageMap2N1_1.isAllocated());
-    EXPECT_EQ(1U, PageMap2N1_1.getBufferNumElements());
-    // Verify the packing ratio, the counter is Expected to be packed into the
-    // closest power of 2 bits.
-    scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
-    ASSERT_TRUE(PageMap.isAllocated());
-    EXPECT_EQ(scudo::roundUpPowerOfTwo(I + 1), PageMap.getBufferNumElements());
-  }
-
-  // Go through 1, 2, 4, 8, .. {32,64} bits per counter.
-  for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
-    // Make sure counters request one memory page for the buffer.
-    const scudo::uptr NumCounters =
-        (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
-    scudo::RegionPageMap PageMap(1U, NumCounters,
-                                       1UL << ((1UL << I) - 1));
-    ASSERT_TRUE(PageMap.isAllocated());
-    PageMap.inc(0U, 0U);
-    for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
-      EXPECT_EQ(0UL, PageMap.get(0U, C));
-      PageMap.inc(0U, C);
-      EXPECT_EQ(1UL, PageMap.get(0U, C - 1));
-    }
-    EXPECT_EQ(0UL, PageMap.get(0U, NumCounters - 1));
-    PageMap.inc(0U, NumCounters - 1);
-    if (I > 0) {
-      PageMap.incRange(0u, 0U, NumCounters - 1);
-      for (scudo::uptr C = 0; C < NumCounters; C++)
-        EXPECT_EQ(2UL, PageMap.get(0U, C));
-    }
-  }
-
-  // Similar to the above except that we are using incN().
-  for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
-    // Make sure counters request one memory page for the buffer.
-    const scudo::uptr NumCounters =
-        (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
-    scudo::uptr MaxValue = 1UL << ((1UL << I) - 1);
-    if (MaxValue <= 1U)
-      continue;
-
-    scudo::RegionPageMap PageMap(1U, NumCounters, MaxValue);
-
-    scudo::uptr N = MaxValue / 2;
-    PageMap.incN(0U, 0, N);
-    for (scudo::uptr C = 1; C < NumCounters; C++) {
-      EXPECT_EQ(0UL, PageMap.get(0U, C));
-      PageMap.incN(0U, C, N);
-      EXPECT_EQ(N, PageMap.get(0U, C - 1));
-    }
-    EXPECT_EQ(N, PageMap.get(0U, NumCounters - 1));
-  }
-}
-
-class StringRangeRecorder {
-public:
-  std::string ReportedPages;
-
-  StringRangeRecorder()
-      : PageSizeScaledLog(scudo::getLog2(scudo::getPageSizeCached())) {}
-
-  void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
-    From >>= PageSizeScaledLog;
-    To >>= PageSizeScaledLog;
-    EXPECT_LT(From, To);
-    if (!ReportedPages.empty())
-      EXPECT_LT(LastPageReported, From);
-    ReportedPages.append(From - LastPageReported, '.');
-    ReportedPages.append(To - From, 'x');
-    LastPageReported = To;
-  }
-
-private:
-  const scudo::uptr PageSizeScaledLog;
-  scudo::uptr LastPageReported = 0;
-};
-
-TEST(ScudoReleaseTest, FreePagesRangeTracker) {
-  // 'x' denotes a page to be released, '.' denotes a page to be kept around.
-  const char *TestCases[] = {
-      "",
-      ".",
-      "x",
-      "........",
-      "xxxxxxxxxxx",
-      "..............xxxxx",
-      "xxxxxxxxxxxxxxxxxx.....",
-      "......xxxxxxxx........",
-      "xxx..........xxxxxxxxxxxxxxx",
-      "......xxxx....xxxx........",
-      "xxx..........xxxxxxxx....xxxxxxx",
-      "x.x.x.x.x.x.x.x.x.x.x.x.",
-      ".x.x.x.x.x.x.x.x.x.x.x.x",
-      ".x.x.x.x.x.x.x.x.x.x.x.x.",
-      "x.x.x.x.x.x.x.x.x.x.x.x.x",
-  };
-  typedef scudo::FreePagesRangeTracker<StringRangeRecorder> RangeTracker;
-
-  for (auto TestCase : TestCases) {
-    StringRangeRecorder Recorder;
-    RangeTracker Tracker(Recorder);
-    for (scudo::uptr I = 0; TestCase[I] != 0; I++)
-      Tracker.processNextPage(TestCase[I] == 'x');
-    Tracker.finish();
-    // Strip trailing '.'-pages before comparing the results as they are not
-    // going to be reported to range_recorder anyway.
-    const char *LastX = strrchr(TestCase, 'x');
-    std::string Expected(
-        TestCase,
-        LastX == nullptr ? 0U : static_cast<size_t>(LastX - TestCase + 1));
-    EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
-  }
-}
-
-class ReleasedPagesRecorder {
-public:
-  ReleasedPagesRecorder() = default;
-  explicit ReleasedPagesRecorder(scudo::uptr Base) : Base(Base) {}
-  std::set<scudo::uptr> ReportedPages;
-
-  void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
-    const scudo::uptr PageSize = scudo::getPageSizeCached();
-    for (scudo::uptr I = From; I < To; I += PageSize)
-      ReportedPages.insert(I + getBase());
-  }
-
-  scudo::uptr getBase() const { return Base; }
-  scudo::uptr Base = 0;
-};
-
-// Simplified version of a TransferBatch.
-template <class SizeClassMap> struct FreeBatch {
-  static const scudo::u16 MaxCount = SizeClassMap::MaxNumCachedHint;
-  void clear() { Count = 0; }
-  void add(scudo::uptr P) {
-    DCHECK_LT(Count, MaxCount);
-    Batch[Count++] = P;
-  }
-  scudo::u16 getCount() const { return Count; }
-  scudo::uptr get(scudo::u16 I) const {
-    DCHECK_LE(I, Count);
-    return Batch[I];
-  }
-  FreeBatch *Next;
-
-private:
-  scudo::uptr Batch[MaxCount];
-  scudo::u16 Count;
-};
-
-template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
-  typedef FreeBatch<SizeClassMap> Batch;
-  const scudo::uptr PagesCount = 1024;
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-  const scudo::uptr PageSizeLog = scudo::getLog2(PageSize);
-  std::mt19937 R;
-  scudo::u32 RandState = 42;
-
-  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
-    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
-    const scudo::uptr MaxBlocks = PagesCount * PageSize / BlockSize;
-
-    // Generate the random free list.
-    std::vector<scudo::uptr> FreeArray;
-    bool InFreeRange = false;
-    scudo::uptr CurrentRangeEnd = 0;
-    for (scudo::uptr I = 0; I < MaxBlocks; I++) {
-      if (I == CurrentRangeEnd) {
-        InFreeRange = (scudo::getRandomU32(&RandState) & 1U) == 1;
-        CurrentRangeEnd += (scudo::getRandomU32(&RandState) & 0x7f) + 1;
-      }
-      if (InFreeRange)
-        FreeArray.push_back(I * BlockSize);
-    }
-    if (FreeArray.empty())
-      continue;
-    // Shuffle the array to ensure that the order is irrelevant.
-    std::shuffle(FreeArray.begin(), FreeArray.end(), R);
-
-    // Build the FreeList from the FreeArray.
-    scudo::SinglyLinkedList<Batch> FreeList;
-    FreeList.clear();
-    Batch *CurrentBatch = nullptr;
-    for (auto const &Block : FreeArray) {
-      if (!CurrentBatch) {
-        CurrentBatch = new Batch;
-        CurrentBatch->clear();
-        FreeList.push_back(CurrentBatch);
-      }
-      CurrentBatch->add(Block);
-      if (CurrentBatch->getCount() == Batch::MaxCount)
-        CurrentBatch = nullptr;
-    }
-
-    // Release the memory.
-    auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
-    auto DecompactPtr = [](scudo::uptr P) { return P; };
-    ReleasedPagesRecorder Recorder;
-    scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
-                                      /*ReleaseSize=*/MaxBlocks * BlockSize);
-    ASSERT_FALSE(Context.hasBlockMarked());
-    Context.markFreeBlocksInRegion(FreeList, DecompactPtr, Recorder.getBase(),
-                                   /*RegionIndex=*/0, MaxBlocks * BlockSize,
-                                   /*MayContainLastBlockInRegion=*/true);
-    ASSERT_TRUE(Context.hasBlockMarked());
-    releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-    scudo::RegionPageMap &PageMap = Context.PageMap;
-
-    // Verify that there are no released pages touched by used chunks and all
-    // ranges of free chunks big enough to contain the entire memory pages had
-    // these pages released.
-    scudo::uptr VerifiedReleasedPages = 0;
-    std::set<scudo::uptr> FreeBlocks(FreeArray.begin(), FreeArray.end());
-
-    scudo::uptr CurrentBlock = 0;
-    InFreeRange = false;
-    scudo::uptr CurrentFreeRangeStart = 0;
-    for (scudo::uptr I = 0; I < MaxBlocks; I++) {
-      const bool IsFreeBlock =
-          FreeBlocks.find(CurrentBlock) != FreeBlocks.end();
-      if (IsFreeBlock) {
-        if (!InFreeRange) {
-          InFreeRange = true;
-          CurrentFreeRangeStart = CurrentBlock;
-        }
-      } else {
-        // Verify that this used chunk does not touch any released page.
-        const scudo::uptr StartPage = CurrentBlock / PageSize;
-        const scudo::uptr EndPage = (CurrentBlock + BlockSize - 1) / PageSize;
-        for (scudo::uptr J = StartPage; J <= EndPage; J++) {
-          const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
-                                    Recorder.ReportedPages.end();
-          EXPECT_EQ(false, PageReleased);
-          EXPECT_EQ(false,
-                    PageMap.isAllCounted(0, (J * PageSize) >> PageSizeLog));
-        }
-
-        if (InFreeRange) {
-          InFreeRange = false;
-          // Verify that all entire memory pages covered by this range of free
-          // chunks were released.
-          scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
-          while (P + PageSize <= CurrentBlock) {
-            const bool PageReleased =
-                Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
-            EXPECT_EQ(true, PageReleased);
-            EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
-            VerifiedReleasedPages++;
-            P += PageSize;
-          }
-        }
-      }
-
-      CurrentBlock += BlockSize;
-    }
-
-    if (InFreeRange) {
-      scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
-      const scudo::uptr EndPage =
-          scudo::roundUp(MaxBlocks * BlockSize, PageSize);
-      while (P + PageSize <= EndPage) {
-        const bool PageReleased =
-            Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
-        EXPECT_EQ(true, PageReleased);
-        EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
-        VerifiedReleasedPages++;
-        P += PageSize;
-      }
-    }
-
-    EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages);
-
-    while (!FreeList.empty()) {
-      CurrentBatch = FreeList.front();
-      FreeList.pop_front();
-      delete CurrentBatch;
-    }
-  }
-}
-
-template <class SizeClassMap> void testPageMapMarkRange() {
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-
-  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
-    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
-
-    const scudo::uptr GroupNum = 2;
-    const scudo::uptr GroupSize = scudo::roundUp(BlockSize, PageSize) * 2;
-    const scudo::uptr RegionSize =
-        scudo::roundUpSlow(GroupSize * GroupNum, BlockSize);
-    const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
-
-    std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
-    for (scudo::uptr Block = 0; Block < RoundedRegionSize; Block += BlockSize) {
-      for (scudo::uptr Page = Block / PageSize;
-           Page <= (Block + BlockSize - 1) / PageSize &&
-           Page < RoundedRegionSize / PageSize;
-           ++Page) {
-        ASSERT_LT(Page, Pages.size());
-        ++Pages[Page];
-      }
-    }
-
-    for (scudo::uptr GroupId = 0; GroupId < GroupNum; ++GroupId) {
-      const scudo::uptr GroupBeg = GroupId * GroupSize;
-      const scudo::uptr GroupEnd = GroupBeg + GroupSize;
-
-      scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
-                                        /*ReleaseSize=*/RegionSize);
-      Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U,
-                                    /*RegionIndex=*/0, RegionSize);
-
-      scudo::uptr FirstBlock =
-          ((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize;
-
-      // All the pages before first block page are not supposed to be marked.
-      if (FirstBlock / PageSize > 0) {
-        for (scudo::uptr Page = 0; Page <= FirstBlock / PageSize - 1; ++Page)
-          EXPECT_EQ(Context.PageMap.get(/*Region=*/0, Page), 0U);
-      }
-
-      // Verify the pages used by the blocks in the group except that if the
-      // end of the last block is not aligned with `GroupEnd`, it'll be verified
-      // later.
-      scudo::uptr Block;
-      for (Block = FirstBlock; Block + BlockSize <= GroupEnd;
-           Block += BlockSize) {
-        for (scudo::uptr Page = Block / PageSize;
-             Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
-          // First used page in the group has two cases, which are w/ and w/o
-          // block sitting across the boundary.
-          if (Page == FirstBlock / PageSize) {
-            if (FirstBlock % PageSize == 0) {
-              EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0U, Page));
-            } else {
-              // There's a block straddling `GroupBeg`, it's supposed to only
-              // increment the counter and we expect it should be 1 less
-              // (exclude the straddling one) than the total blocks on the page.
-              EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page),
-                        Pages[Page] - 1);
-            }
-          } else {
-            EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
-          }
-        }
-      }
-
-      if (Block == GroupEnd)
-        continue;
-
-      // Examine the last block which sits across the group boundary.
-      if (Block + BlockSize == RegionSize) {
-        // This is the last block in the region, it's supposed to mark all the
-        // pages as all counted.
-        for (scudo::uptr Page = Block / PageSize;
-             Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
-          EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
-        }
-      } else {
-        for (scudo::uptr Page = Block / PageSize;
-             Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
-          if (Page <= (GroupEnd - 1) / PageSize)
-            EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
-          else
-            EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page), 1U);
-        }
-      }
-
-      const scudo::uptr FirstUncountedPage =
-          scudo::roundUp(Block + BlockSize, PageSize);
-      for (scudo::uptr Page = FirstUncountedPage;
-           Page <= RoundedRegionSize / PageSize; ++Page) {
-        EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page), 0U);
-      }
-    } // Iterate each Group
-
-    // Release the entire region. This is to ensure the last page is counted.
-    scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
-                                      /*ReleaseSize=*/RegionSize);
-    Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0,
-                                  /*RegionIndex=*/0, RegionSize);
-    for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page)
-      EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
-  } // Iterate each size class
-}
-
-template <class SizeClassMap> void testReleasePartialRegion() {
-  typedef FreeBatch<SizeClassMap> Batch;
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-
-  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
-    // In the following, we want to ensure the region includes at least 2 pages
-    // and we will release all the pages except the first one. The handling of
-    // the last block is tricky, so we always test the case that includes the
-    // last block.
-    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
-    const scudo::uptr ReleaseBase = scudo::roundUp(BlockSize, PageSize);
-    const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
-    const scudo::uptr RegionSize =
-        scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) + ReleaseBase,
-                           BlockSize) +
-        BlockSize;
-    const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
-
-    scudo::SinglyLinkedList<Batch> FreeList;
-    FreeList.clear();
-
-    // Skip the blocks in the first page and add the remaining.
-    std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
-    for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
-         Block + BlockSize <= RoundedRegionSize; Block += BlockSize) {
-      for (scudo::uptr Page = Block / PageSize;
-           Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
-        ASSERT_LT(Page, Pages.size());
-        ++Pages[Page];
-      }
-    }
-
-    // This follows the logic how we count the last page. It should be
-    // consistent with how markFreeBlocksInRegion() handles the last block.
-    if (RoundedRegionSize % BlockSize != 0)
-      ++Pages.back();
-
-    Batch *CurrentBatch = nullptr;
-    for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
-         Block < RegionSize; Block += BlockSize) {
-      if (CurrentBatch == nullptr ||
-          CurrentBatch->getCount() == Batch::MaxCount) {
-        CurrentBatch = new Batch;
-        CurrentBatch->clear();
-        FreeList.push_back(CurrentBatch);
-      }
-      CurrentBatch->add(Block);
-    }
-
-    auto VerifyReleaseToOs = [&](scudo::PageReleaseContext &Context) {
-      auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
-      ReleasedPagesRecorder Recorder(ReleaseBase);
-      releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-      const scudo::uptr FirstBlock = scudo::roundUpSlow(ReleaseBase, BlockSize);
-
-      for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) {
-        if (P < FirstBlock) {
-          // If FirstBlock is not aligned with page boundary, the first touched
-          // page will not be released either.
-          EXPECT_TRUE(Recorder.ReportedPages.find(P) ==
-                      Recorder.ReportedPages.end());
-        } else {
-          EXPECT_TRUE(Recorder.ReportedPages.find(P) !=
-                      Recorder.ReportedPages.end());
-        }
-      }
-    };
-
-    // Test marking by visiting each block.
-    {
-      auto DecompactPtr = [](scudo::uptr P) { return P; };
-      scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
-                                        /*ReleaseSize=*/RegionSize - PageSize,
-                                        ReleaseBase);
-      Context.markFreeBlocksInRegion(FreeList, DecompactPtr, /*Base=*/0U,
-                                     /*RegionIndex=*/0, RegionSize,
-                                     /*MayContainLastBlockInRegion=*/true);
-      for (const Batch &It : FreeList) {
-        for (scudo::u16 I = 0; I < It.getCount(); I++) {
-          scudo::uptr Block = It.get(I);
-          for (scudo::uptr Page = Block / PageSize;
-               Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
-            EXPECT_EQ(Pages[Page], Context.PageMap.get(/*Region=*/0U,
-                                                       Page - BasePageOffset));
-          }
-        }
-      }
-
-      VerifyReleaseToOs(Context);
-    }
-
-    // Test range marking.
-    {
-      scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
-                                        /*ReleaseSize=*/RegionSize - PageSize,
-                                        ReleaseBase);
-      Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U,
-                                    /*RegionIndex=*/0, RegionSize);
-      for (scudo::uptr Page = ReleaseBase / PageSize;
-           Page < RoundedRegionSize / PageSize; ++Page) {
-        if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) !=
-            Pages[Page]) {
-          EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0,
-                                                   Page - BasePageOffset));
-        }
-      }
-
-      VerifyReleaseToOs(Context);
-    }
-
-    // Check the buffer size of PageMap.
-    {
-      scudo::PageReleaseContext Full(BlockSize, /*NumberOfRegions=*/1U,
-                                     /*ReleaseSize=*/RegionSize);
-      Full.ensurePageMapAllocated();
-      scudo::PageReleaseContext Partial(BlockSize, /*NumberOfRegions=*/1U,
-                                        /*ReleaseSize=*/RegionSize - PageSize,
-                                        ReleaseBase);
-      Partial.ensurePageMapAllocated();
-
-      EXPECT_GE(Full.PageMap.getBufferNumElements(),
-                Partial.PageMap.getBufferNumElements());
-    }
-
-    while (!FreeList.empty()) {
-      CurrentBatch = FreeList.front();
-      FreeList.pop_front();
-      delete CurrentBatch;
-    }
-  } // Iterate each size class
-}
-
-TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
-  testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
-}
-
-TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) {
-  testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>();
-}
-
-TEST(ScudoReleaseTest, PageMapMarkRange) {
-  testPageMapMarkRange<scudo::DefaultSizeClassMap>();
-  testPageMapMarkRange<scudo::AndroidSizeClassMap>();
-  testPageMapMarkRange<scudo::FuchsiaSizeClassMap>();
-}
-
-TEST(ScudoReleaseTest, ReleasePartialRegion) {
-  testReleasePartialRegion<scudo::DefaultSizeClassMap>();
-  testReleasePartialRegion<scudo::AndroidSizeClassMap>();
-  testReleasePartialRegion<scudo::FuchsiaSizeClassMap>();
-}
-
-template <class SizeClassMap> void testReleaseRangeWithSingleBlock() {
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-
-  // We want to test if a memory group only contains single block that will be
-  // handled properly. The case is like:
-  //
-  //   From                     To
-  //     +----------------------+
-  //  +------------+------------+
-  //  |            |            |
-  //  +------------+------------+
-  //                            ^
-  //                        RegionSize
-  //
-  // Note that `From` will be page aligned.
-  //
-  // If the second from the last block is aligned at `From`, then we expect all
-  // the pages after `From` will be marked as can-be-released. Otherwise, the
-  // pages only touched by the last blocks will be marked as can-be-released.
-  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
-    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
-    const scudo::uptr From = scudo::roundUp(BlockSize, PageSize);
-    const scudo::uptr To =
-        From % BlockSize == 0
-            ? From + BlockSize
-            : scudo::roundDownSlow(From + BlockSize, BlockSize) + BlockSize;
-    const scudo::uptr RoundedRegionSize = scudo::roundUp(To, PageSize);
-
-    std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
-    for (scudo::uptr Block = (To - BlockSize); Block < RoundedRegionSize;
-         Block += BlockSize) {
-      for (scudo::uptr Page = Block / PageSize;
-           Page <= (Block + BlockSize - 1) / PageSize &&
-           Page < RoundedRegionSize / PageSize;
-           ++Page) {
-        ASSERT_LT(Page, Pages.size());
-        ++Pages[Page];
-      }
-    }
-
-    scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
-                                      /*ReleaseSize=*/To,
-                                      /*ReleaseBase=*/0U);
-    Context.markRangeAsAllCounted(From, To, /*Base=*/0U, /*RegionIndex=*/0,
-                                  /*RegionSize=*/To);
-
-    for (scudo::uptr Page = 0; Page < RoundedRegionSize; Page += PageSize) {
-      if (Context.PageMap.get(/*Region=*/0U, Page / PageSize) !=
-          Pages[Page / PageSize]) {
-        EXPECT_TRUE(
-            Context.PageMap.isAllCounted(/*Region=*/0U, Page / PageSize));
-      }
-    }
-  } // for each size class
-}
-
-TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) {
-  testReleaseRangeWithSingleBlock<scudo::DefaultSizeClassMap>();
-  testReleaseRangeWithSingleBlock<scudo::AndroidSizeClassMap>();
-  testReleaseRangeWithSingleBlock<scudo::FuchsiaSizeClassMap>();
-}
-
-TEST(ScudoReleaseTest, BufferPool) {
-  constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1;
-  constexpr scudo::uptr StaticBufferNumElements = 512U;
-
-  // Allocate the buffer pool on the heap because it is quite large (slightly
-  // more than StaticBufferCount * StaticBufferNumElements * sizeof(uptr)) and
-  // it may not fit in the stack on some platforms.
-  using BufferPool =
-      scudo::BufferPool<StaticBufferCount, StaticBufferNumElements>;
-  std::unique_ptr<BufferPool> Pool(new BufferPool());
-
-  std::vector<BufferPool::Buffer> Buffers;
-  for (scudo::uptr I = 0; I < StaticBufferCount; ++I) {
-    BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements);
-    EXPECT_TRUE(Pool->isStaticBufferTestOnly(Buffer));
-    Buffers.push_back(Buffer);
-  }
-
-  // The static buffer is supposed to be used up.
-  BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements);
-  EXPECT_FALSE(Pool->isStaticBufferTestOnly(Buffer));
-
-  Pool->releaseBuffer(Buffer);
-  for (auto &Buffer : Buffers)
-    Pool->releaseBuffer(Buffer);
-}
diff --git a/Telegram/ThirdParty/scudo/tests/report_test.cpp b/Telegram/ThirdParty/scudo/tests/report_test.cpp
deleted file mode 100644
index 92f1ee813..000000000
--- a/Telegram/ThirdParty/scudo/tests/report_test.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-//===-- report_test.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "report.h"
-
-TEST(ScudoReportDeathTest, Check) {
-  CHECK_LT(-1, 1);
-  EXPECT_DEATH(CHECK_GT(-1, 1),
-               "\\(-1\\) > \\(1\\) \\(\\(u64\\)op1=18446744073709551615, "
-               "\\(u64\\)op2=1");
-}
-
-TEST(ScudoReportDeathTest, Generic) {
-  // Potentially unused if EXPECT_DEATH isn't defined.
-  UNUSED void *P = reinterpret_cast<void *>(0x42424242U);
-  EXPECT_DEATH(scudo::reportError("TEST123"), "Scudo ERROR.*TEST123");
-  EXPECT_DEATH(scudo::reportInvalidFlag("ABC", "DEF"), "Scudo ERROR.*ABC.*DEF");
-  EXPECT_DEATH(scudo::reportHeaderCorruption(P), "Scudo ERROR.*42424242");
-  EXPECT_DEATH(scudo::reportSanityCheckError("XYZ"), "Scudo ERROR.*XYZ");
-  EXPECT_DEATH(scudo::reportAlignmentTooBig(123, 456), "Scudo ERROR.*123.*456");
-  EXPECT_DEATH(scudo::reportAllocationSizeTooBig(123, 456, 789),
-               "Scudo ERROR.*123.*456.*789");
-  EXPECT_DEATH(scudo::reportOutOfMemory(4242), "Scudo ERROR.*4242");
-  EXPECT_DEATH(
-      scudo::reportInvalidChunkState(scudo::AllocatorAction::Recycling, P),
-      "Scudo ERROR.*recycling.*42424242");
-  EXPECT_DEATH(
-      scudo::reportInvalidChunkState(scudo::AllocatorAction::Sizing, P),
-      "Scudo ERROR.*sizing.*42424242");
-  EXPECT_DEATH(
-      scudo::reportMisalignedPointer(scudo::AllocatorAction::Deallocating, P),
-      "Scudo ERROR.*deallocating.*42424242");
-  EXPECT_DEATH(scudo::reportDeallocTypeMismatch(
-                   scudo::AllocatorAction::Reallocating, P, 0, 1),
-               "Scudo ERROR.*reallocating.*42424242");
-  EXPECT_DEATH(scudo::reportDeleteSizeMismatch(P, 123, 456),
-               "Scudo ERROR.*42424242.*123.*456");
-}
-
-TEST(ScudoReportDeathTest, CSpecific) {
-  EXPECT_DEATH(scudo::reportAlignmentNotPowerOfTwo(123), "Scudo ERROR.*123");
-  EXPECT_DEATH(scudo::reportCallocOverflow(123, 456), "Scudo ERROR.*123.*456");
-  EXPECT_DEATH(scudo::reportInvalidPosixMemalignAlignment(789),
-               "Scudo ERROR.*789");
-  EXPECT_DEATH(scudo::reportPvallocOverflow(123), "Scudo ERROR.*123");
-  EXPECT_DEATH(scudo::reportInvalidAlignedAllocAlignment(123, 456),
-               "Scudo ERROR.*123.*456");
-}
diff --git a/Telegram/ThirdParty/scudo/tests/scudo_unit_test.h b/Telegram/ThirdParty/scudo/tests/scudo_unit_test.h
deleted file mode 100644
index 428341643..000000000
--- a/Telegram/ThirdParty/scudo/tests/scudo_unit_test.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//===-- scudo_unit_test.h ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-#if SCUDO_FUCHSIA
-#include <zxtest/zxtest.h>
-using Test = ::zxtest::Test;
-#else
-#include "gtest/gtest.h"
-using Test = ::testing::Test;
-#endif
-
-// If EXPECT_DEATH isn't defined, make it a no-op.
-#ifndef EXPECT_DEATH
-// If ASSERT_DEATH is defined, make EXPECT_DEATH a wrapper to it.
-#ifdef ASSERT_DEATH
-#define EXPECT_DEATH(X, Y) ASSERT_DEATH(([&] { X; }), "")
-#else
-#define EXPECT_DEATH(X, Y)                                                     \
-  do {                                                                         \
-  } while (0)
-#endif // ASSERT_DEATH
-#endif // EXPECT_DEATH
-
-// If EXPECT_STREQ isn't defined, define our own simple one.
-#ifndef EXPECT_STREQ
-#define EXPECT_STREQ(X, Y) EXPECT_EQ(strcmp(X, Y), 0)
-#endif
-
-#if SCUDO_FUCHSIA
-#define SKIP_ON_FUCHSIA(T) DISABLED_##T
-#else
-#define SKIP_ON_FUCHSIA(T) T
-#endif
-
-#if SCUDO_DEBUG
-#define SKIP_NO_DEBUG(T) T
-#else
-#define SKIP_NO_DEBUG(T) DISABLED_##T
-#endif
-
-#if SCUDO_FUCHSIA
-// The zxtest library provides a default main function that does the same thing
-// for Fuchsia builds.
-#define SCUDO_NO_TEST_MAIN
-#endif
-
-extern bool UseQuarantine;
diff --git a/Telegram/ThirdParty/scudo/tests/scudo_unit_test_main.cpp b/Telegram/ThirdParty/scudo/tests/scudo_unit_test_main.cpp
deleted file mode 100644
index 881e0265b..000000000
--- a/Telegram/ThirdParty/scudo/tests/scudo_unit_test_main.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-//===-- scudo_unit_test_main.cpp --------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "memtag.h"
-#include "tests/scudo_unit_test.h"
-
-// Match Android's default configuration, which disables Scudo's mismatch
-// allocation check, as it is being triggered by some third party code.
-#if SCUDO_ANDROID
-#define DEALLOC_TYPE_MISMATCH "false"
-#else
-#define DEALLOC_TYPE_MISMATCH "true"
-#endif
-
-static void EnableMemoryTaggingIfSupported() {
-  if (!scudo::archSupportsMemoryTagging())
-    return;
-  static bool Done = []() {
-    if (!scudo::systemDetectsMemoryTagFaultsTestOnly())
-      scudo::enableSystemMemoryTaggingTestOnly();
-    return true;
-  }();
-  (void)Done;
-}
-
-// This allows us to turn on/off a Quarantine for specific tests. The Quarantine
-// parameters are on the low end, to avoid having to loop excessively in some
-// tests.
-bool UseQuarantine = true;
-extern "C" __attribute__((visibility("default"))) const char *
-__scudo_default_options() {
-  // The wrapper tests initialize the global allocator early, before main(). We
-  // need to have Memory Tagging enabled before that happens or the allocator
-  // will disable the feature entirely.
-  EnableMemoryTaggingIfSupported();
-  if (!UseQuarantine)
-    return "dealloc_type_mismatch=" DEALLOC_TYPE_MISMATCH;
-  return "quarantine_size_kb=256:thread_local_quarantine_size_kb=128:"
-         "quarantine_max_chunk_size=512:"
-         "dealloc_type_mismatch=" DEALLOC_TYPE_MISMATCH;
-}
-
-#if !defined(SCUDO_NO_TEST_MAIN)
-int main(int argc, char **argv) {
-  EnableMemoryTaggingIfSupported();
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-#endif
diff --git a/Telegram/ThirdParty/scudo/tests/secondary_test.cpp b/Telegram/ThirdParty/scudo/tests/secondary_test.cpp
deleted file mode 100644
index 18d2e187f..000000000
--- a/Telegram/ThirdParty/scudo/tests/secondary_test.cpp
+++ /dev/null
@@ -1,253 +0,0 @@
-//===-- secondary_test.cpp --------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "memtag.h"
-#include "tests/scudo_unit_test.h"
-
-#include "allocator_config.h"
-#include "secondary.h"
-
-#include <algorithm>
-#include <condition_variable>
-#include <memory>
-#include <mutex>
-#include <random>
-#include <stdio.h>
-#include <thread>
-#include <vector>
-
-template <typename Config> static scudo::Options getOptionsForConfig() {
-  if (!Config::MaySupportMemoryTagging || !scudo::archSupportsMemoryTagging() ||
-      !scudo::systemSupportsMemoryTagging())
-    return {};
-  scudo::AtomicOptions AO;
-  AO.set(scudo::OptionBit::UseMemoryTagging);
-  return AO.load();
-}
-
-template <typename Config> static void testSecondaryBasic(void) {
-  using SecondaryT = scudo::MapAllocator<Config>;
-  scudo::Options Options = getOptionsForConfig<Config>();
-
-  scudo::GlobalStats S;
-  S.init();
-  std::unique_ptr<SecondaryT> L(new SecondaryT);
-  L->init(&S);
-  const scudo::uptr Size = 1U << 16;
-  void *P = L->allocate(Options, Size);
-  EXPECT_NE(P, nullptr);
-  memset(P, 'A', Size);
-  EXPECT_GE(SecondaryT::getBlockSize(P), Size);
-  L->deallocate(Options, P);
-
-  // If the Secondary can't cache that pointer, it will be unmapped.
-  if (!L->canCache(Size)) {
-    EXPECT_DEATH(
-        {
-          // Repeat few time to avoid missing crash if it's mmaped by unrelated
-          // code.
-          for (int i = 0; i < 10; ++i) {
-            P = L->allocate(Options, Size);
-            L->deallocate(Options, P);
-            memset(P, 'A', Size);
-          }
-        },
-        "");
-  }
-
-  const scudo::uptr Align = 1U << 16;
-  P = L->allocate(Options, Size + Align, Align);
-  EXPECT_NE(P, nullptr);
-  void *AlignedP = reinterpret_cast<void *>(
-      scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
-  memset(AlignedP, 'A', Size);
-  L->deallocate(Options, P);
-
-  std::vector<void *> V;
-  for (scudo::uptr I = 0; I < 32U; I++)
-    V.push_back(L->allocate(Options, Size));
-  std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()()));
-  while (!V.empty()) {
-    L->deallocate(Options, V.back());
-    V.pop_back();
-  }
-  scudo::ScopedString Str;
-  L->getStats(&Str);
-  Str.output();
-  L->unmapTestOnly();
-}
-
-struct NoCacheConfig {
-  static const bool MaySupportMemoryTagging = false;
-  struct Secondary {
-    template <typename Config>
-    using CacheT = scudo::MapAllocatorNoCache<Config>;
-  };
-};
-
-struct TestConfig {
-  static const bool MaySupportMemoryTagging = false;
-  struct Secondary {
-    struct Cache {
-      static const scudo::u32 EntriesArraySize = 128U;
-      static const scudo::u32 QuarantineSize = 0U;
-      static const scudo::u32 DefaultMaxEntriesCount = 64U;
-      static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
-      static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
-      static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-    };
-
-    template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
-  };
-};
-
-TEST(ScudoSecondaryTest, SecondaryBasic) {
-  testSecondaryBasic<NoCacheConfig>();
-  testSecondaryBasic<scudo::DefaultConfig>();
-  testSecondaryBasic<TestConfig>();
-}
-
-struct MapAllocatorTest : public Test {
-  using Config = scudo::DefaultConfig;
-  using LargeAllocator = scudo::MapAllocator<Config>;
-
-  void SetUp() override { Allocator->init(nullptr); }
-
-  void TearDown() override { Allocator->unmapTestOnly(); }
-
-  std::unique_ptr<LargeAllocator> Allocator =
-      std::make_unique<LargeAllocator>();
-  scudo::Options Options = getOptionsForConfig<Config>();
-};
-
-// This exercises a variety of combinations of size and alignment for the
-// MapAllocator. The size computation done here mimic the ones done by the
-// combined allocator.
-TEST_F(MapAllocatorTest, SecondaryCombinations) {
-  constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
-  constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
-  for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
-    for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
-         AlignLog++) {
-      const scudo::uptr Align = 1U << AlignLog;
-      for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
-        if ((1LL << SizeLog) + Delta <= 0)
-          continue;
-        const scudo::uptr UserSize = scudo::roundUp(
-            static_cast<scudo::uptr>((1LL << SizeLog) + Delta), MinAlign);
-        const scudo::uptr Size =
-            HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
-        void *P = Allocator->allocate(Options, Size, Align);
-        EXPECT_NE(P, nullptr);
-        void *AlignedP = reinterpret_cast<void *>(
-            scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
-        memset(AlignedP, 0xff, UserSize);
-        Allocator->deallocate(Options, P);
-      }
-    }
-  }
-  scudo::ScopedString Str;
-  Allocator->getStats(&Str);
-  Str.output();
-}
-
-TEST_F(MapAllocatorTest, SecondaryIterate) {
-  std::vector<void *> V;
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-  for (scudo::uptr I = 0; I < 32U; I++)
-    V.push_back(Allocator->allocate(
-        Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize));
-  auto Lambda = [&V](scudo::uptr Block) {
-    EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
-              V.end());
-  };
-  Allocator->disable();
-  Allocator->iterateOverBlocks(Lambda);
-  Allocator->enable();
-  while (!V.empty()) {
-    Allocator->deallocate(Options, V.back());
-    V.pop_back();
-  }
-  scudo::ScopedString Str;
-  Allocator->getStats(&Str);
-  Str.output();
-}
-
-TEST_F(MapAllocatorTest, SecondaryOptions) {
-  // Attempt to set a maximum number of entries higher than the array size.
-  EXPECT_FALSE(
-      Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4096U));
-  // A negative number will be cast to a scudo::u32, and fail.
-  EXPECT_FALSE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, -1));
-  if (Allocator->canCache(0U)) {
-    // Various valid combinations.
-    EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
-    EXPECT_TRUE(
-        Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
-    EXPECT_TRUE(Allocator->canCache(1UL << 18));
-    EXPECT_TRUE(
-        Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 17));
-    EXPECT_FALSE(Allocator->canCache(1UL << 18));
-    EXPECT_TRUE(Allocator->canCache(1UL << 16));
-    EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 0U));
-    EXPECT_FALSE(Allocator->canCache(1UL << 16));
-    EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
-    EXPECT_TRUE(
-        Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
-    EXPECT_TRUE(Allocator->canCache(1UL << 16));
-  }
-}
-
-struct MapAllocatorWithReleaseTest : public MapAllocatorTest {
-  void SetUp() override { Allocator->init(nullptr, /*ReleaseToOsInterval=*/0); }
-
-  void performAllocations() {
-    std::vector<void *> V;
-    const scudo::uptr PageSize = scudo::getPageSizeCached();
-    {
-      std::unique_lock<std::mutex> Lock(Mutex);
-      while (!Ready)
-        Cv.wait(Lock);
-    }
-    for (scudo::uptr I = 0; I < 128U; I++) {
-      // Deallocate 75% of the blocks.
-      const bool Deallocate = (std::rand() & 3) != 0;
-      void *P = Allocator->allocate(
-          Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize);
-      if (Deallocate)
-        Allocator->deallocate(Options, P);
-      else
-        V.push_back(P);
-    }
-    while (!V.empty()) {
-      Allocator->deallocate(Options, V.back());
-      V.pop_back();
-    }
-  }
-
-  std::mutex Mutex;
-  std::condition_variable Cv;
-  bool Ready = false;
-};
-
-TEST_F(MapAllocatorWithReleaseTest, SecondaryThreadsRace) {
-  std::thread Threads[16];
-  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
-    Threads[I] =
-        std::thread(&MapAllocatorWithReleaseTest::performAllocations, this);
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Ready = true;
-    Cv.notify_all();
-  }
-  for (auto &T : Threads)
-    T.join();
-  scudo::ScopedString Str;
-  Allocator->getStats(&Str);
-  Str.output();
-}
diff --git a/Telegram/ThirdParty/scudo/tests/size_class_map_test.cpp b/Telegram/ThirdParty/scudo/tests/size_class_map_test.cpp
deleted file mode 100644
index 05b5835ff..000000000
--- a/Telegram/ThirdParty/scudo/tests/size_class_map_test.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-//===-- size_class_map_test.cpp ---------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "size_class_map.h"
-
-template <class SizeClassMap> void testSizeClassMap() {
-  typedef SizeClassMap SCMap;
-  scudo::printMap<SCMap>();
-  scudo::validateMap<SCMap>();
-}
-
-TEST(ScudoSizeClassMapTest, DefaultSizeClassMap) {
-  testSizeClassMap<scudo::DefaultSizeClassMap>();
-}
-
-TEST(ScudoSizeClassMapTest, AndroidSizeClassMap) {
-  testSizeClassMap<scudo::AndroidSizeClassMap>();
-}
-
-struct OneClassSizeClassConfig {
-  static const scudo::uptr NumBits = 1;
-  static const scudo::uptr MinSizeLog = 5;
-  static const scudo::uptr MidSizeLog = 5;
-  static const scudo::uptr MaxSizeLog = 5;
-  static const scudo::u16 MaxNumCachedHint = 0;
-  static const scudo::uptr MaxBytesCachedLog = 0;
-  static const scudo::uptr SizeDelta = 0;
-};
-
-TEST(ScudoSizeClassMapTest, OneClassSizeClassMap) {
-  testSizeClassMap<scudo::FixedSizeClassMap<OneClassSizeClassConfig>>();
-}
-
-#if SCUDO_CAN_USE_PRIMARY64
-struct LargeMaxSizeClassConfig {
-  static const scudo::uptr NumBits = 3;
-  static const scudo::uptr MinSizeLog = 4;
-  static const scudo::uptr MidSizeLog = 8;
-  static const scudo::uptr MaxSizeLog = 63;
-  static const scudo::u16 MaxNumCachedHint = 128;
-  static const scudo::uptr MaxBytesCachedLog = 16;
-  static const scudo::uptr SizeDelta = 0;
-};
-
-TEST(ScudoSizeClassMapTest, LargeMaxSizeClassMap) {
-  testSizeClassMap<scudo::FixedSizeClassMap<LargeMaxSizeClassConfig>>();
-}
-#endif
diff --git a/Telegram/ThirdParty/scudo/tests/stats_test.cpp b/Telegram/ThirdParty/scudo/tests/stats_test.cpp
deleted file mode 100644
index cdadfbad3..000000000
--- a/Telegram/ThirdParty/scudo/tests/stats_test.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-//===-- stats_test.cpp ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "stats.h"
-
-TEST(ScudoStatsTest, LocalStats) {
-  scudo::LocalStats LStats;
-  LStats.init();
-  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
-    EXPECT_EQ(LStats.get(static_cast<scudo::StatType>(I)), 0U);
-  LStats.add(scudo::StatAllocated, 4096U);
-  EXPECT_EQ(LStats.get(scudo::StatAllocated), 4096U);
-  LStats.sub(scudo::StatAllocated, 4096U);
-  EXPECT_EQ(LStats.get(scudo::StatAllocated), 0U);
-  LStats.set(scudo::StatAllocated, 4096U);
-  EXPECT_EQ(LStats.get(scudo::StatAllocated), 4096U);
-}
-
-TEST(ScudoStatsTest, GlobalStats) {
-  scudo::GlobalStats GStats;
-  GStats.init();
-  scudo::uptr Counters[scudo::StatCount] = {};
-  GStats.get(Counters);
-  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
-    EXPECT_EQ(Counters[I], 0U);
-  scudo::LocalStats LStats;
-  LStats.init();
-  GStats.link(&LStats);
-  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
-    LStats.add(static_cast<scudo::StatType>(I), 4096U);
-  GStats.get(Counters);
-  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
-    EXPECT_EQ(Counters[I], 4096U);
-  // Unlinking the local stats move numbers to the global stats.
-  GStats.unlink(&LStats);
-  GStats.get(Counters);
-  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
-    EXPECT_EQ(Counters[I], 4096U);
-}
diff --git a/Telegram/ThirdParty/scudo/tests/strings_test.cpp b/Telegram/ThirdParty/scudo/tests/strings_test.cpp
deleted file mode 100644
index 7a69ffd97..000000000
--- a/Telegram/ThirdParty/scudo/tests/strings_test.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-//===-- strings_test.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "string_utils.h"
-
-#include <limits.h>
-
-TEST(ScudoStringsTest, Constructor) {
-  scudo::ScopedString Str;
-  EXPECT_EQ(0ul, Str.length());
-  EXPECT_EQ('\0', *Str.data());
-}
-
-TEST(ScudoStringsTest, Basic) {
-  scudo::ScopedString Str;
-  Str.append("a%db%zdc%ue%zuf%xh%zxq%pe%sr", static_cast<int>(-1),
-             static_cast<scudo::uptr>(-2), static_cast<unsigned>(-4),
-             static_cast<scudo::uptr>(5), static_cast<unsigned>(10),
-             static_cast<scudo::uptr>(11), reinterpret_cast<void *>(0x123),
-             "_string_");
-  EXPECT_EQ(Str.length(), strlen(Str.data()));
-
-  std::string expectedString = "a-1b-2c4294967292e5fahbq0x";
-  expectedString += std::string(SCUDO_POINTER_FORMAT_LENGTH - 3, '0');
-  expectedString += "123e_string_r";
-  EXPECT_EQ(Str.length(), strlen(Str.data()));
-  EXPECT_STREQ(expectedString.c_str(), Str.data());
-}
-
-TEST(ScudoStringsTest, Clear) {
-  scudo::ScopedString Str;
-  Str.append("123");
-  Str.clear();
-  EXPECT_EQ(0ul, Str.length());
-  EXPECT_EQ('\0', *Str.data());
-}
-
-TEST(ScudoStringsTest, ClearLarge) {
-  constexpr char appendString[] = "123";
-  scudo::ScopedString Str;
-  Str.reserve(sizeof(appendString) * 10000);
-  for (int i = 0; i < 10000; ++i)
-    Str.append(appendString);
-  Str.clear();
-  EXPECT_EQ(0ul, Str.length());
-  EXPECT_EQ('\0', *Str.data());
-}
-
-TEST(ScudoStringsTest, Precision) {
-  scudo::ScopedString Str;
-  Str.append("%.*s", 3, "12345");
-  EXPECT_EQ(Str.length(), strlen(Str.data()));
-  EXPECT_STREQ("123", Str.data());
-  Str.clear();
-  Str.append("%.*s", 6, "12345");
-  EXPECT_EQ(Str.length(), strlen(Str.data()));
-  EXPECT_STREQ("12345", Str.data());
-  Str.clear();
-  Str.append("%-6s", "12345");
-  EXPECT_EQ(Str.length(), strlen(Str.data()));
-  EXPECT_STREQ("12345 ", Str.data());
-}
-
-static void fillString(scudo::ScopedString &Str, scudo::uptr Size) {
-  for (scudo::uptr I = 0; I < Size; I++)
-    Str.append("A");
-}
-
-TEST(ScudoStringTest, PotentialOverflows) {
-  // Use a ScopedString that spans a page, and attempt to write past the end
-  // of it with variations of append. The expectation is for nothing to crash.
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-  scudo::ScopedString Str;
-  Str.reserve(2 * PageSize);
-  Str.clear();
-  fillString(Str, 2 * PageSize);
-  Str.clear();
-  fillString(Str, PageSize - 64);
-  Str.append("%-128s", "12345");
-  Str.clear();
-  fillString(Str, PageSize - 16);
-  Str.append("%024x", 12345);
-  Str.clear();
-  fillString(Str, PageSize - 16);
-  Str.append("EEEEEEEEEEEEEEEEEEEEEEEE");
-}
-
-template <typename T>
-static void testAgainstLibc(const char *Format, T Arg1, T Arg2) {
-  scudo::ScopedString Str;
-  Str.append(Format, Arg1, Arg2);
-  char Buffer[128];
-  snprintf(Buffer, sizeof(Buffer), Format, Arg1, Arg2);
-  EXPECT_EQ(Str.length(), strlen(Str.data()));
-  EXPECT_STREQ(Buffer, Str.data());
-}
-
-TEST(ScudoStringsTest, MinMax) {
-  testAgainstLibc<int>("%d-%d", INT_MIN, INT_MAX);
-  testAgainstLibc<unsigned>("%u-%u", 0, UINT_MAX);
-  testAgainstLibc<unsigned>("%x-%x", 0, UINT_MAX);
-  testAgainstLibc<long>("%zd-%zd", LONG_MIN, LONG_MAX);
-  testAgainstLibc<unsigned long>("%zu-%zu", 0, ULONG_MAX);
-  testAgainstLibc<unsigned long>("%zx-%zx", 0, ULONG_MAX);
-}
-
-TEST(ScudoStringsTest, Padding) {
-  testAgainstLibc<int>("%3d - %3d", 1, 0);
-  testAgainstLibc<int>("%3d - %3d", -1, 123);
-  testAgainstLibc<int>("%3d - %3d", -1, -123);
-  testAgainstLibc<int>("%3d - %3d", 12, 1234);
-  testAgainstLibc<int>("%3d - %3d", -12, -1234);
-  testAgainstLibc<int>("%03d - %03d", 1, 0);
-  testAgainstLibc<int>("%03d - %03d", -1, 123);
-  testAgainstLibc<int>("%03d - %03d", -1, -123);
-  testAgainstLibc<int>("%03d - %03d", 12, 1234);
-  testAgainstLibc<int>("%03d - %03d", -12, -1234);
-}
diff --git a/Telegram/ThirdParty/scudo/tests/timing_test.cpp b/Telegram/ThirdParty/scudo/tests/timing_test.cpp
deleted file mode 100644
index 09a6c3122..000000000
--- a/Telegram/ThirdParty/scudo/tests/timing_test.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-//===-- timing_test.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "timing.h"
-
-#include <string>
-
-class ScudoTimingTest : public Test {
-public:
-  void testFunc1() { scudo::ScopedTimer ST(Manager, __func__); }
-
-  void testFunc2() {
-    scudo::ScopedTimer ST(Manager, __func__);
-    testFunc1();
-  }
-
-  void testChainedCalls() {
-    scudo::ScopedTimer ST(Manager, __func__);
-    testFunc2();
-  }
-
-  void testIgnoredTimer() {
-    scudo::ScopedTimer ST(Manager, __func__);
-    ST.ignore();
-  }
-
-  void printAllTimersStats() { Manager.printAll(); }
-
-  scudo::TimingManager &getTimingManager() { return Manager; }
-
-private:
-  scudo::TimingManager Manager;
-};
-
-// Given that the output of statistics of timers are dumped through
-// `scudo::Printf` which is platform dependent, so we don't have a reliable way
-// to catch the output and verify the details. Now we only verify the number of
-// invocations on linux.
-TEST_F(ScudoTimingTest, SimpleTimer) {
-#if SCUDO_LINUX
-  testing::internal::LogToStderr();
-  testing::internal::CaptureStderr();
-#endif
-
-  testIgnoredTimer();
-  testChainedCalls();
-  printAllTimersStats();
-
-#if SCUDO_LINUX
-  std::string output = testing::internal::GetCapturedStderr();
-  EXPECT_TRUE(output.find("testIgnoredTimer (1)") == std::string::npos);
-  EXPECT_TRUE(output.find("testChainedCalls (1)") != std::string::npos);
-  EXPECT_TRUE(output.find("testFunc2 (1)") != std::string::npos);
-  EXPECT_TRUE(output.find("testFunc1 (1)") != std::string::npos);
-#endif
-}
-
-TEST_F(ScudoTimingTest, NestedTimer) {
-#if SCUDO_LINUX
-  testing::internal::LogToStderr();
-  testing::internal::CaptureStderr();
-#endif
-
-  {
-    scudo::ScopedTimer Outer(getTimingManager(), "Outer");
-    {
-      scudo::ScopedTimer Inner1(getTimingManager(), Outer, "Inner1");
-      { scudo::ScopedTimer Inner2(getTimingManager(), Inner1, "Inner2"); }
-    }
-  }
-  printAllTimersStats();
-
-#if SCUDO_LINUX
-  std::string output = testing::internal::GetCapturedStderr();
-  EXPECT_TRUE(output.find("Outer (1)") != std::string::npos);
-  EXPECT_TRUE(output.find("Inner1 (1)") != std::string::npos);
-  EXPECT_TRUE(output.find("Inner2 (1)") != std::string::npos);
-#endif
-}
diff --git a/Telegram/ThirdParty/scudo/tests/tsd_test.cpp b/Telegram/ThirdParty/scudo/tests/tsd_test.cpp
deleted file mode 100644
index fad8fcf90..000000000
--- a/Telegram/ThirdParty/scudo/tests/tsd_test.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-//===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "tsd_exclusive.h"
-#include "tsd_shared.h"
-
-#include <stdlib.h>
-
-#include <condition_variable>
-#include <mutex>
-#include <set>
-#include <thread>
-
-// We mock out an allocator with a TSD registry, mostly using empty stubs. The
-// cache contains a single volatile uptr, to be able to test that several
-// concurrent threads will not access or modify the same cache at the same time.
-template <class Config> class MockAllocator {
-public:
-  using ThisT = MockAllocator<Config>;
-  using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
-  using CacheT = struct MockCache {
-    volatile scudo::uptr Canary;
-  };
-  using QuarantineCacheT = struct MockQuarantine {};
-
-  void init() {
-    // This should only be called once by the registry.
-    EXPECT_FALSE(Initialized);
-    Initialized = true;
-  }
-
-  void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
-  void initCache(CacheT *Cache) { *Cache = {}; }
-  void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
-  TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
-  void callPostInitCallback() {}
-
-  bool isInitialized() { return Initialized; }
-
-  void *operator new(size_t Size) {
-    void *P = nullptr;
-    EXPECT_EQ(0, posix_memalign(&P, alignof(ThisT), Size));
-    return P;
-  }
-  void operator delete(void *P) { free(P); }
-
-private:
-  bool Initialized = false;
-  TSDRegistryT TSDRegistry;
-};
-
-struct OneCache {
-  template <class Allocator>
-  using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>;
-};
-
-struct SharedCaches {
-  template <class Allocator>
-  using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>;
-};
-
-struct ExclusiveCaches {
-  template <class Allocator>
-  using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
-};
-
-TEST(ScudoTSDTest, TSDRegistryInit) {
-  using AllocatorT = MockAllocator<OneCache>;
-  auto Deleter = [](AllocatorT *A) {
-    A->unmapTestOnly();
-    delete A;
-  };
-  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
-                                                           Deleter);
-  EXPECT_FALSE(Allocator->isInitialized());
-
-  auto Registry = Allocator->getTSDRegistry();
-  Registry->initOnceMaybe(Allocator.get());
-  EXPECT_TRUE(Allocator->isInitialized());
-}
-
-template <class AllocatorT> static void testRegistry() {
-  auto Deleter = [](AllocatorT *A) {
-    A->unmapTestOnly();
-    delete A;
-  };
-  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
-                                                           Deleter);
-  EXPECT_FALSE(Allocator->isInitialized());
-
-  auto Registry = Allocator->getTSDRegistry();
-  Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
-  EXPECT_TRUE(Allocator->isInitialized());
-
-  bool UnlockRequired;
-  auto TSD = Registry->getTSDAndLock(&UnlockRequired);
-  TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-  EXPECT_NE(TSD, nullptr);
-  EXPECT_EQ(TSD->getCache().Canary, 0U);
-  if (UnlockRequired)
-    TSD->unlock();
-
-  Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
-  TSD = Registry->getTSDAndLock(&UnlockRequired);
-  TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-  EXPECT_NE(TSD, nullptr);
-  EXPECT_EQ(TSD->getCache().Canary, 0U);
-  memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
-  if (UnlockRequired)
-    TSD->unlock();
-}
-
-TEST(ScudoTSDTest, TSDRegistryBasic) {
-  testRegistry<MockAllocator<OneCache>>();
-  testRegistry<MockAllocator<SharedCaches>>();
-#if !SCUDO_FUCHSIA
-  testRegistry<MockAllocator<ExclusiveCaches>>();
-#endif
-}
-
-static std::mutex Mutex;
-static std::condition_variable Cv;
-static bool Ready;
-
-template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
-  auto Registry = Allocator->getTSDRegistry();
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    while (!Ready)
-      Cv.wait(Lock);
-  }
-  Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
-  bool UnlockRequired;
-  auto TSD = Registry->getTSDAndLock(&UnlockRequired);
-  TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-  EXPECT_NE(TSD, nullptr);
-  // For an exclusive TSD, the cache should be empty. We cannot guarantee the
-  // same for a shared TSD.
-  if (!UnlockRequired)
-    EXPECT_EQ(TSD->getCache().Canary, 0U);
-  // Transform the thread id to a uptr to use it as canary.
-  const scudo::uptr Canary = static_cast<scudo::uptr>(
-      std::hash<std::thread::id>{}(std::this_thread::get_id()));
-  TSD->getCache().Canary = Canary;
-  // Loop a few times to make sure that a concurrent thread isn't modifying it.
-  for (scudo::uptr I = 0; I < 4096U; I++)
-    EXPECT_EQ(TSD->getCache().Canary, Canary);
-  if (UnlockRequired)
-    TSD->unlock();
-}
-
-template <class AllocatorT> static void testRegistryThreaded() {
-  Ready = false;
-  auto Deleter = [](AllocatorT *A) {
-    A->unmapTestOnly();
-    delete A;
-  };
-  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
-                                                           Deleter);
-  std::thread Threads[32];
-  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
-    Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Ready = true;
-    Cv.notify_all();
-  }
-  for (auto &T : Threads)
-    T.join();
-}
-
-TEST(ScudoTSDTest, TSDRegistryThreaded) {
-  testRegistryThreaded<MockAllocator<OneCache>>();
-  testRegistryThreaded<MockAllocator<SharedCaches>>();
-#if !SCUDO_FUCHSIA
-  testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
-#endif
-}
-
-static std::set<void *> Pointers;
-
-static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
-  std::set<void *> Set;
-  auto Registry = Allocator->getTSDRegistry();
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    while (!Ready)
-      Cv.wait(Lock);
-  }
-  Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
-  bool UnlockRequired;
-  for (scudo::uptr I = 0; I < 4096U; I++) {
-    auto TSD = Registry->getTSDAndLock(&UnlockRequired);
-    TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
-    EXPECT_NE(TSD, nullptr);
-    Set.insert(reinterpret_cast<void *>(TSD));
-    if (UnlockRequired)
-      TSD->unlock();
-  }
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Pointers.insert(Set.begin(), Set.end());
-  }
-}
-
-TEST(ScudoTSDTest, TSDRegistryTSDsCount) {
-  Ready = false;
-  Pointers.clear();
-  using AllocatorT = MockAllocator<SharedCaches>;
-  auto Deleter = [](AllocatorT *A) {
-    A->unmapTestOnly();
-    delete A;
-  };
-  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
-                                                           Deleter);
-  // We attempt to use as many TSDs as the shared cache offers by creating a
-  // decent amount of threads that will be run concurrently and attempt to get
-  // and lock TSDs. We put them all in a set and count the number of entries
-  // after we are done.
-  std::thread Threads[32];
-  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
-    Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Ready = true;
-    Cv.notify_all();
-  }
-  for (auto &T : Threads)
-    T.join();
-  // The initial number of TSDs we get will be the minimum of the default count
-  // and the number of CPUs.
-  EXPECT_LE(Pointers.size(), 8U);
-  Pointers.clear();
-  auto Registry = Allocator->getTSDRegistry();
-  // Increase the number of TSDs to 16.
-  Registry->setOption(scudo::Option::MaxTSDsCount, 16);
-  Ready = false;
-  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
-    Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Ready = true;
-    Cv.notify_all();
-  }
-  for (auto &T : Threads)
-    T.join();
-  // We should get 16 distinct TSDs back.
-  EXPECT_EQ(Pointers.size(), 16U);
-}
diff --git a/Telegram/ThirdParty/scudo/tests/vector_test.cpp b/Telegram/ThirdParty/scudo/tests/vector_test.cpp
deleted file mode 100644
index dc23c2a34..000000000
--- a/Telegram/ThirdParty/scudo/tests/vector_test.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- vector_test.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tests/scudo_unit_test.h"
-
-#include "vector.h"
-
-TEST(ScudoVectorTest, Basic) {
-  scudo::Vector<int> V;
-  EXPECT_EQ(V.size(), 0U);
-  V.push_back(42);
-  EXPECT_EQ(V.size(), 1U);
-  EXPECT_EQ(V[0], 42);
-  V.push_back(43);
-  EXPECT_EQ(V.size(), 2U);
-  EXPECT_EQ(V[0], 42);
-  EXPECT_EQ(V[1], 43);
-}
-
-TEST(ScudoVectorTest, Stride) {
-  scudo::Vector<scudo::uptr> V;
-  for (scudo::uptr I = 0; I < 1000; I++) {
-    V.push_back(I);
-    EXPECT_EQ(V.size(), I + 1U);
-    EXPECT_EQ(V[I], I);
-  }
-  for (scudo::uptr I = 0; I < 1000; I++)
-    EXPECT_EQ(V[I], I);
-}
-
-TEST(ScudoVectorTest, ResizeReduction) {
-  scudo::Vector<int> V;
-  V.push_back(0);
-  V.push_back(0);
-  EXPECT_EQ(V.size(), 2U);
-  V.resize(1);
-  EXPECT_EQ(V.size(), 1U);
-}
diff --git a/Telegram/ThirdParty/scudo/tests/wrappers_c_test.cpp b/Telegram/ThirdParty/scudo/tests/wrappers_c_test.cpp
deleted file mode 100644
index f5e17d721..000000000
--- a/Telegram/ThirdParty/scudo/tests/wrappers_c_test.cpp
+++ /dev/null
@@ -1,663 +0,0 @@
-//===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "common.h"
-#include "memtag.h"
-#include "scudo/interface.h"
-#include "tests/scudo_unit_test.h"
-
-#include <errno.h>
-#include <limits.h>
-#include <malloc.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <vector>
-
-#ifndef __GLIBC_PREREQ
-#define __GLIBC_PREREQ(x, y) 0
-#endif
-
-#if SCUDO_FUCHSIA
-// Fuchsia only has valloc
-#define HAVE_VALLOC 1
-#elif SCUDO_ANDROID
-// Android only has pvalloc/valloc on 32 bit
-#if !defined(__LP64__)
-#define HAVE_PVALLOC 1
-#define HAVE_VALLOC 1
-#endif // !defined(__LP64__)
-#else
-// All others assumed to support both functions.
-#define HAVE_PVALLOC 1
-#define HAVE_VALLOC 1
-#endif
-
-extern "C" {
-void malloc_enable(void);
-void malloc_disable(void);
-int malloc_iterate(uintptr_t base, size_t size,
-                   void (*callback)(uintptr_t base, size_t size, void *arg),
-                   void *arg);
-void *valloc(size_t size);
-void *pvalloc(size_t size);
-
-#ifndef SCUDO_ENABLE_HOOKS_TESTS
-#define SCUDO_ENABLE_HOOKS_TESTS 0
-#endif
-
-#if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
-#error "Hooks tests should have hooks enabled as well!"
-#endif
-
-struct AllocContext {
-  void *Ptr;
-  size_t Size;
-};
-struct DeallocContext {
-  void *Ptr;
-};
-struct ReallocContext {
-  void *AllocPtr;
-  void *DeallocPtr;
-  size_t Size;
-};
-static AllocContext AC;
-static DeallocContext DC;
-static ReallocContext RC;
-
-#if (SCUDO_ENABLE_HOOKS_TESTS == 1)
-__attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
-                                                                  size_t Size) {
-  AC.Ptr = Ptr;
-  AC.Size = Size;
-}
-__attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
-  DC.Ptr = Ptr;
-}
-__attribute__((visibility("default"))) void
-__scudo_realloc_allocate_hook(void *OldPtr, void *NewPtr, size_t Size) {
-  // Verify that __scudo_realloc_deallocate_hook is called first and set the
-  // right pointer.
-  EXPECT_EQ(OldPtr, RC.DeallocPtr);
-  RC.AllocPtr = NewPtr;
-  RC.Size = Size;
-
-  // Note that this is only used for testing. In general, only one pair of hooks
-  // will be invoked in `realloc`. if __scudo_realloc_*_hook are not defined,
-  // it'll call the general hooks only. To make the test easier, we call the
-  // general one here so that either case (whether __scudo_realloc_*_hook are
-  // defined) will be verified without separating them into different tests.
-  __scudo_allocate_hook(NewPtr, Size);
-}
-__attribute__((visibility("default"))) void
-__scudo_realloc_deallocate_hook(void *Ptr) {
-  RC.DeallocPtr = Ptr;
-
-  // See the comment in the __scudo_realloc_allocate_hook above.
-  __scudo_deallocate_hook(Ptr);
-}
-#endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
-}
-
-class ScudoWrappersCTest : public Test {
-protected:
-  void SetUp() override {
-    if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
-      printf("Hooks are enabled but hooks tests are disabled.\n");
-  }
-
-  void invalidateHookPtrs() {
-    if (SCUDO_ENABLE_HOOKS_TESTS) {
-      void *InvalidPtr = reinterpret_cast<void *>(0xdeadbeef);
-      AC.Ptr = InvalidPtr;
-      DC.Ptr = InvalidPtr;
-      RC.AllocPtr = RC.DeallocPtr = InvalidPtr;
-    }
-  }
-  void verifyAllocHookPtr(UNUSED void *Ptr) {
-    if (SCUDO_ENABLE_HOOKS_TESTS)
-      EXPECT_EQ(Ptr, AC.Ptr);
-  }
-  void verifyAllocHookSize(UNUSED size_t Size) {
-    if (SCUDO_ENABLE_HOOKS_TESTS)
-      EXPECT_EQ(Size, AC.Size);
-  }
-  void verifyDeallocHookPtr(UNUSED void *Ptr) {
-    if (SCUDO_ENABLE_HOOKS_TESTS)
-      EXPECT_EQ(Ptr, DC.Ptr);
-  }
-  void verifyReallocHookPtrs(UNUSED void *OldPtr, void *NewPtr, size_t Size) {
-    if (SCUDO_ENABLE_HOOKS_TESTS) {
-      EXPECT_EQ(OldPtr, RC.DeallocPtr);
-      EXPECT_EQ(NewPtr, RC.AllocPtr);
-      EXPECT_EQ(Size, RC.Size);
-    }
-  }
-};
-using ScudoWrappersCDeathTest = ScudoWrappersCTest;
-
-// Note that every C allocation function in the test binary will be fulfilled
-// by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
-// But this might also lead to unexpected side-effects, since the allocation and
-// deallocation operations in the TEST functions will coexist with others (see
-// the EXPECT_DEATH comment below).
-
-// We have to use a small quarantine to make sure that our double-free tests
-// trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
-// freed (this depends on the size obviously) and the following free succeeds.
-
-static const size_t Size = 100U;
-
-TEST_F(ScudoWrappersCDeathTest, Malloc) {
-  void *P = malloc(Size);
-  EXPECT_NE(P, nullptr);
-  EXPECT_LE(Size, malloc_usable_size(P));
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
-  verifyAllocHookPtr(P);
-  verifyAllocHookSize(Size);
-
-  // An update to this warning in Clang now triggers in this line, but it's ok
-  // because the check is expecting a bad pointer and should fail.
-#if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
-#endif
-  EXPECT_DEATH(
-      free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
-#if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
-#pragma GCC diagnostic pop
-#endif
-
-  free(P);
-  verifyDeallocHookPtr(P);
-  EXPECT_DEATH(free(P), "");
-
-  P = malloc(0U);
-  EXPECT_NE(P, nullptr);
-  free(P);
-
-  errno = 0;
-  EXPECT_EQ(malloc(SIZE_MAX), nullptr);
-  EXPECT_EQ(errno, ENOMEM);
-}
-
-TEST_F(ScudoWrappersCTest, Calloc) {
-  void *P = calloc(1U, Size);
-  EXPECT_NE(P, nullptr);
-  EXPECT_LE(Size, malloc_usable_size(P));
-  verifyAllocHookPtr(P);
-  verifyAllocHookSize(Size);
-  for (size_t I = 0; I < Size; I++)
-    EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
-  free(P);
-  verifyDeallocHookPtr(P);
-
-  P = calloc(1U, 0U);
-  EXPECT_NE(P, nullptr);
-  free(P);
-  P = calloc(0U, 1U);
-  EXPECT_NE(P, nullptr);
-  free(P);
-
-  errno = 0;
-  EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
-  EXPECT_EQ(errno, ENOMEM);
-  errno = 0;
-  EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
-  if (SCUDO_ANDROID)
-    EXPECT_EQ(errno, ENOMEM);
-  errno = 0;
-  EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
-  EXPECT_EQ(errno, ENOMEM);
-}
-
-TEST_F(ScudoWrappersCTest, SmallAlign) {
-  // Allocating pointers by the powers of 2 from 1 to 0x10000
-  // Using powers of 2 due to memalign using powers of 2 and test more sizes
-  constexpr size_t MaxSize = 0x10000;
-  std::vector<void *> ptrs;
-  // Reserving space to prevent further allocation during the test
-  ptrs.reserve((scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) *
-               (scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) * 3);
-  for (size_t Size = 1; Size <= MaxSize; Size <<= 1) {
-    for (size_t Align = 1; Align <= MaxSize; Align <<= 1) {
-      for (size_t Count = 0; Count < 3; ++Count) {
-        void *P = memalign(Align, Size);
-        EXPECT_TRUE(reinterpret_cast<uintptr_t>(P) % Align == 0);
-        ptrs.push_back(P);
-      }
-    }
-  }
-  for (void *ptr : ptrs)
-    free(ptr);
-}
-
-TEST_F(ScudoWrappersCTest, Memalign) {
-  void *P;
-  for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
-    const size_t Alignment = 1U << I;
-
-    P = memalign(Alignment, Size);
-    EXPECT_NE(P, nullptr);
-    EXPECT_LE(Size, malloc_usable_size(P));
-    EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
-    verifyAllocHookPtr(P);
-    verifyAllocHookSize(Size);
-    free(P);
-    verifyDeallocHookPtr(P);
-
-    P = nullptr;
-    EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
-    EXPECT_NE(P, nullptr);
-    EXPECT_LE(Size, malloc_usable_size(P));
-    EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
-    verifyAllocHookPtr(P);
-    verifyAllocHookSize(Size);
-    free(P);
-    verifyDeallocHookPtr(P);
-  }
-
-  EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
-  EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
-  EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
-
-  // Android's memalign accepts non power-of-2 alignments, and 0.
-  if (SCUDO_ANDROID) {
-    for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
-      P = memalign(Alignment, 1024U);
-      EXPECT_NE(P, nullptr);
-      verifyAllocHookPtr(P);
-      verifyAllocHookSize(Size);
-      free(P);
-      verifyDeallocHookPtr(P);
-    }
-  }
-}
-
-TEST_F(ScudoWrappersCTest, AlignedAlloc) {
-  const size_t Alignment = 4096U;
-  void *P = aligned_alloc(Alignment, Alignment * 4U);
-  EXPECT_NE(P, nullptr);
-  EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
-  verifyAllocHookPtr(P);
-  verifyAllocHookSize(Alignment * 4U);
-  free(P);
-  verifyDeallocHookPtr(P);
-
-  errno = 0;
-  P = aligned_alloc(Alignment, Size);
-  EXPECT_EQ(P, nullptr);
-  EXPECT_EQ(errno, EINVAL);
-}
-
-TEST_F(ScudoWrappersCDeathTest, Realloc) {
-  invalidateHookPtrs();
-  // realloc(nullptr, N) is malloc(N)
-  void *P = realloc(nullptr, Size);
-  EXPECT_NE(P, nullptr);
-  verifyAllocHookPtr(P);
-  verifyAllocHookSize(Size);
-  free(P);
-  verifyDeallocHookPtr(P);
-
-  invalidateHookPtrs();
-  P = malloc(Size);
-  EXPECT_NE(P, nullptr);
-  // realloc(P, 0U) is free(P) and returns nullptr
-  EXPECT_EQ(realloc(P, 0U), nullptr);
-  verifyDeallocHookPtr(P);
-
-  P = malloc(Size);
-  EXPECT_NE(P, nullptr);
-  EXPECT_LE(Size, malloc_usable_size(P));
-  memset(P, 0x42, Size);
-
-  invalidateHookPtrs();
-  void *OldP = P;
-  P = realloc(P, Size * 2U);
-  EXPECT_NE(P, nullptr);
-  EXPECT_LE(Size * 2U, malloc_usable_size(P));
-  for (size_t I = 0; I < Size; I++)
-    EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
-  if (OldP == P) {
-    verifyDeallocHookPtr(OldP);
-    verifyAllocHookPtr(OldP);
-  } else {
-    verifyAllocHookPtr(P);
-    verifyAllocHookSize(Size * 2U);
-    verifyDeallocHookPtr(OldP);
-  }
-  verifyReallocHookPtrs(OldP, P, Size * 2U);
-
-  invalidateHookPtrs();
-  OldP = P;
-  P = realloc(P, Size / 2U);
-  EXPECT_NE(P, nullptr);
-  EXPECT_LE(Size / 2U, malloc_usable_size(P));
-  for (size_t I = 0; I < Size / 2U; I++)
-    EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
-  if (OldP == P) {
-    verifyDeallocHookPtr(OldP);
-    verifyAllocHookPtr(OldP);
-  } else {
-    verifyAllocHookPtr(P);
-    verifyAllocHookSize(Size / 2U);
-  }
-  verifyReallocHookPtrs(OldP, P, Size / 2U);
-  free(P);
-
-  EXPECT_DEATH(P = realloc(P, Size), "");
-
-  errno = 0;
-  EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
-  EXPECT_EQ(errno, ENOMEM);
-  P = malloc(Size);
-  EXPECT_NE(P, nullptr);
-  errno = 0;
-  EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
-  EXPECT_EQ(errno, ENOMEM);
-  free(P);
-
-  // Android allows realloc of memalign pointers.
-  if (SCUDO_ANDROID) {
-    const size_t Alignment = 1024U;
-    P = memalign(Alignment, Size);
-    EXPECT_NE(P, nullptr);
-    EXPECT_LE(Size, malloc_usable_size(P));
-    EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
-    memset(P, 0x42, Size);
-
-    P = realloc(P, Size * 2U);
-    EXPECT_NE(P, nullptr);
-    EXPECT_LE(Size * 2U, malloc_usable_size(P));
-    for (size_t I = 0; I < Size; I++)
-      EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
-    free(P);
-  }
-}
-
-#if !SCUDO_FUCHSIA
-TEST_F(ScudoWrappersCTest, MallOpt) {
-  errno = 0;
-  EXPECT_EQ(mallopt(-1000, 1), 0);
-  // mallopt doesn't set errno.
-  EXPECT_EQ(errno, 0);
-
-  EXPECT_EQ(mallopt(M_PURGE, 0), 1);
-
-  EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
-  EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
-  EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
-  EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
-
-  if (SCUDO_ANDROID) {
-    EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX, 100), 1);
-    EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2), 1);
-    EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX, 10), 1);
-  }
-}
-#endif
-
-TEST_F(ScudoWrappersCTest, OtherAlloc) {
-#if HAVE_PVALLOC
-  const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
-
-  void *P = pvalloc(Size);
-  EXPECT_NE(P, nullptr);
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
-  EXPECT_LE(PageSize, malloc_usable_size(P));
-  verifyAllocHookPtr(P);
-  // Size will be rounded up to PageSize.
-  verifyAllocHookSize(PageSize);
-  free(P);
-  verifyDeallocHookPtr(P);
-
-  EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
-
-  P = pvalloc(Size);
-  EXPECT_NE(P, nullptr);
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
-  free(P);
-#endif
-
-#if HAVE_VALLOC
-  EXPECT_EQ(valloc(SIZE_MAX), nullptr);
-#endif
-}
-
-template<typename FieldType>
-void MallInfoTest() {
-  // mallinfo is deprecated.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
-  const FieldType BypassQuarantineSize = 1024U;
-  struct mallinfo MI = mallinfo();
-  FieldType Allocated = MI.uordblks;
-  void *P = malloc(BypassQuarantineSize);
-  EXPECT_NE(P, nullptr);
-  MI = mallinfo();
-  EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
-  EXPECT_GT(MI.hblkhd, static_cast<FieldType>(0));
-  FieldType Free = MI.fordblks;
-  free(P);
-  MI = mallinfo();
-  EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
-#pragma clang diagnostic pop
-}
-
-#if !SCUDO_FUCHSIA
-TEST_F(ScudoWrappersCTest, MallInfo) {
-#if SCUDO_ANDROID
-  // Android accidentally set the fields to size_t instead of int.
-  MallInfoTest<size_t>();
-#else
-  MallInfoTest<int>();
-#endif
-}
-#endif
-
-#if __GLIBC_PREREQ(2, 33) || SCUDO_ANDROID
-TEST_F(ScudoWrappersCTest, MallInfo2) {
-  const size_t BypassQuarantineSize = 1024U;
-  struct mallinfo2 MI = mallinfo2();
-  size_t Allocated = MI.uordblks;
-  void *P = malloc(BypassQuarantineSize);
-  EXPECT_NE(P, nullptr);
-  MI = mallinfo2();
-  EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
-  EXPECT_GT(MI.hblkhd, 0U);
-  size_t Free = MI.fordblks;
-  free(P);
-  MI = mallinfo2();
-  EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
-}
-#endif
-
-static uintptr_t BoundaryP;
-static size_t Count;
-
-static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
-  if (scudo::archSupportsMemoryTagging()) {
-    Base = scudo::untagPointer(Base);
-    BoundaryP = scudo::untagPointer(BoundaryP);
-  }
-  if (Base == BoundaryP)
-    Count++;
-}
-
-// Verify that a block located on an iteration boundary is not mis-accounted.
-// To achieve this, we allocate a chunk for which the backing block will be
-// aligned on a page, then run the malloc_iterate on both the pages that the
-// block is a boundary for. It must only be seen once by the callback function.
-TEST_F(ScudoWrappersCTest, MallocIterateBoundary) {
-  const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
-#if SCUDO_ANDROID
-  // Android uses a 16 byte alignment for both 32 bit and 64 bit.
-  const size_t BlockDelta = 16U;
-#else
-  const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
-#endif
-  const size_t SpecialSize = PageSize - BlockDelta;
-
-  // We aren't guaranteed that any size class is exactly a page wide. So we need
-  // to keep making allocations until we get an allocation that starts exactly
-  // on a page boundary. The BlockDelta value is expected to be the number of
-  // bytes to subtract from a returned pointer to get to the actual start of
-  // the pointer in the size class. In practice, this means BlockDelta should
-  // be set to the minimum alignment in bytes for the allocation.
-  //
-  // With a 16-byte block alignment and 4096-byte page size, each allocation has
-  // a probability of (1 - (16/4096)) of failing to meet the alignment
-  // requirements, and the probability of failing 65536 times is
-  // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
-  // 65536 tries, give up.
-  uintptr_t Block;
-  void *P = nullptr;
-  for (unsigned I = 0; I != 65536; ++I) {
-    void *PrevP = P;
-    P = malloc(SpecialSize);
-    EXPECT_NE(P, nullptr);
-    *reinterpret_cast<void **>(P) = PrevP;
-    BoundaryP = reinterpret_cast<uintptr_t>(P);
-    Block = BoundaryP - BlockDelta;
-    if ((Block & (PageSize - 1)) == 0U)
-      break;
-  }
-  EXPECT_EQ((Block & (PageSize - 1)), 0U);
-
-  Count = 0U;
-  malloc_disable();
-  malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
-  malloc_iterate(Block, PageSize, callback, nullptr);
-  malloc_enable();
-  EXPECT_EQ(Count, 1U);
-
-  while (P) {
-    void *NextP = *reinterpret_cast<void **>(P);
-    free(P);
-    P = NextP;
-  }
-}
-
-// Fuchsia doesn't have alarm, fork or malloc_info.
-#if !SCUDO_FUCHSIA
-TEST_F(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
-  // We expect heap operations within a disable/enable scope to deadlock.
-  EXPECT_DEATH(
-      {
-        void *P = malloc(Size);
-        EXPECT_NE(P, nullptr);
-        free(P);
-        malloc_disable();
-        alarm(1);
-        P = malloc(Size);
-        malloc_enable();
-      },
-      "");
-}
-
-TEST_F(ScudoWrappersCTest, MallocInfo) {
-  // Use volatile so that the allocations don't get optimized away.
-  void *volatile P1 = malloc(1234);
-  void *volatile P2 = malloc(4321);
-
-  char Buffer[16384];
-  FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
-  EXPECT_NE(F, nullptr);
-  errno = 0;
-  EXPECT_EQ(malloc_info(0, F), 0);
-  EXPECT_EQ(errno, 0);
-  fclose(F);
-  EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
-  EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
-  EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
-
-  free(P1);
-  free(P2);
-}
-
-TEST_F(ScudoWrappersCDeathTest, Fork) {
-  void *P;
-  pid_t Pid = fork();
-  EXPECT_GE(Pid, 0) << strerror(errno);
-  if (Pid == 0) {
-    P = malloc(Size);
-    EXPECT_NE(P, nullptr);
-    memset(P, 0x42, Size);
-    free(P);
-    _exit(0);
-  }
-  waitpid(Pid, nullptr, 0);
-  P = malloc(Size);
-  EXPECT_NE(P, nullptr);
-  memset(P, 0x42, Size);
-  free(P);
-
-  // fork should stall if the allocator has been disabled.
-  EXPECT_DEATH(
-      {
-        malloc_disable();
-        alarm(1);
-        Pid = fork();
-        EXPECT_GE(Pid, 0);
-      },
-      "");
-}
-
-static pthread_mutex_t Mutex;
-static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
-static bool Ready;
-
-static void *enableMalloc(UNUSED void *Unused) {
-  // Initialize the allocator for this thread.
-  void *P = malloc(Size);
-  EXPECT_NE(P, nullptr);
-  memset(P, 0x42, Size);
-  free(P);
-
-  // Signal the main thread we are ready.
-  pthread_mutex_lock(&Mutex);
-  Ready = true;
-  pthread_cond_signal(&Conditional);
-  pthread_mutex_unlock(&Mutex);
-
-  // Wait for the malloc_disable & fork, then enable the allocator again.
-  sleep(1);
-  malloc_enable();
-
-  return nullptr;
-}
-
-TEST_F(ScudoWrappersCTest, DisableForkEnable) {
-  pthread_t ThreadId;
-  Ready = false;
-  EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
-
-  // Wait for the thread to be warmed up.
-  pthread_mutex_lock(&Mutex);
-  while (!Ready)
-    pthread_cond_wait(&Conditional, &Mutex);
-  pthread_mutex_unlock(&Mutex);
-
-  // Disable the allocator and fork. fork should succeed after malloc_enable.
-  malloc_disable();
-  pid_t Pid = fork();
-  EXPECT_GE(Pid, 0);
-  if (Pid == 0) {
-    void *P = malloc(Size);
-    EXPECT_NE(P, nullptr);
-    memset(P, 0x42, Size);
-    free(P);
-    _exit(0);
-  }
-  waitpid(Pid, nullptr, 0);
-  EXPECT_EQ(pthread_join(ThreadId, 0), 0);
-}
-
-#endif // SCUDO_FUCHSIA
diff --git a/Telegram/ThirdParty/scudo/tests/wrappers_cpp_test.cpp b/Telegram/ThirdParty/scudo/tests/wrappers_cpp_test.cpp
deleted file mode 100644
index c802ed22f..000000000
--- a/Telegram/ThirdParty/scudo/tests/wrappers_cpp_test.cpp
+++ /dev/null
@@ -1,273 +0,0 @@
-//===-- wrappers_cpp_test.cpp -----------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "memtag.h"
-#include "tests/scudo_unit_test.h"
-
-#include <atomic>
-#include <condition_variable>
-#include <fstream>
-#include <memory>
-#include <mutex>
-#include <thread>
-#include <vector>
-
-// Android does not support checking for new/delete mismatches.
-#if SCUDO_ANDROID
-#define SKIP_MISMATCH_TESTS 1
-#else
-#define SKIP_MISMATCH_TESTS 0
-#endif
-
-void operator delete(void *, size_t) noexcept;
-void operator delete[](void *, size_t) noexcept;
-
-extern "C" {
-#ifndef SCUDO_ENABLE_HOOKS_TESTS
-#define SCUDO_ENABLE_HOOKS_TESTS 0
-#endif
-
-#if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
-#error "Hooks tests should have hooks enabled as well!"
-#endif
-
-struct AllocContext {
-  void *Ptr;
-  size_t Size;
-};
-struct DeallocContext {
-  void *Ptr;
-};
-static AllocContext AC;
-static DeallocContext DC;
-
-#if (SCUDO_ENABLE_HOOKS_TESTS == 1)
-__attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
-                                                                  size_t Size) {
-  AC.Ptr = Ptr;
-  AC.Size = Size;
-}
-__attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
-  DC.Ptr = Ptr;
-}
-#endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
-}
-
-class ScudoWrappersCppTest : public Test {
-protected:
-  void SetUp() override {
-    if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
-      printf("Hooks are enabled but hooks tests are disabled.\n");
-  }
-
-  void verifyAllocHookPtr(UNUSED void *Ptr) {
-    if (SCUDO_ENABLE_HOOKS_TESTS)
-      EXPECT_EQ(Ptr, AC.Ptr);
-  }
-  void verifyAllocHookSize(UNUSED size_t Size) {
-    if (SCUDO_ENABLE_HOOKS_TESTS)
-      EXPECT_EQ(Size, AC.Size);
-  }
-  void verifyDeallocHookPtr(UNUSED void *Ptr) {
-    if (SCUDO_ENABLE_HOOKS_TESTS)
-      EXPECT_EQ(Ptr, DC.Ptr);
-  }
-
-  template <typename T> void testCxxNew() {
-    T *P = new T;
-    EXPECT_NE(P, nullptr);
-    verifyAllocHookPtr(P);
-    verifyAllocHookSize(sizeof(T));
-    memset(P, 0x42, sizeof(T));
-    EXPECT_DEATH(delete[] P, "");
-    delete P;
-    verifyDeallocHookPtr(P);
-    EXPECT_DEATH(delete P, "");
-
-    P = new T;
-    EXPECT_NE(P, nullptr);
-    memset(P, 0x42, sizeof(T));
-    operator delete(P, sizeof(T));
-    verifyDeallocHookPtr(P);
-
-    P = new (std::nothrow) T;
-    verifyAllocHookPtr(P);
-    verifyAllocHookSize(sizeof(T));
-    EXPECT_NE(P, nullptr);
-    memset(P, 0x42, sizeof(T));
-    delete P;
-    verifyDeallocHookPtr(P);
-
-    const size_t N = 16U;
-    T *A = new T[N];
-    EXPECT_NE(A, nullptr);
-    verifyAllocHookPtr(A);
-    verifyAllocHookSize(sizeof(T) * N);
-    memset(A, 0x42, sizeof(T) * N);
-    EXPECT_DEATH(delete A, "");
-    delete[] A;
-    verifyDeallocHookPtr(A);
-    EXPECT_DEATH(delete[] A, "");
-
-    A = new T[N];
-    EXPECT_NE(A, nullptr);
-    memset(A, 0x42, sizeof(T) * N);
-    operator delete[](A, sizeof(T) * N);
-    verifyDeallocHookPtr(A);
-
-    A = new (std::nothrow) T[N];
-    verifyAllocHookPtr(A);
-    verifyAllocHookSize(sizeof(T) * N);
-    EXPECT_NE(A, nullptr);
-    memset(A, 0x42, sizeof(T) * N);
-    delete[] A;
-    verifyDeallocHookPtr(A);
-  }
-};
-using ScudoWrappersCppDeathTest = ScudoWrappersCppTest;
-
-class Pixel {
-public:
-  enum class Color { Red, Green, Blue };
-  int X = 0;
-  int Y = 0;
-  Color C = Color::Red;
-};
-
-// Note that every Cxx allocation function in the test binary will be fulfilled
-// by Scudo. See the comment in the C counterpart of this file.
-
-TEST_F(ScudoWrappersCppDeathTest, New) {
-  if (getenv("SKIP_TYPE_MISMATCH") || SKIP_MISMATCH_TESTS) {
-    printf("Skipped type mismatch tests.\n");
-    return;
-  }
-  testCxxNew<bool>();
-  testCxxNew<uint8_t>();
-  testCxxNew<uint16_t>();
-  testCxxNew<uint32_t>();
-  testCxxNew<uint64_t>();
-  testCxxNew<float>();
-  testCxxNew<double>();
-  testCxxNew<long double>();
-  testCxxNew<Pixel>();
-}
-
-static std::mutex Mutex;
-static std::condition_variable Cv;
-static bool Ready;
-
-static void stressNew() {
-  std::vector<uintptr_t *> V;
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    while (!Ready)
-      Cv.wait(Lock);
-  }
-  for (size_t I = 0; I < 256U; I++) {
-    const size_t N = static_cast<size_t>(std::rand()) % 128U;
-    uintptr_t *P = new uintptr_t[N];
-    if (P) {
-      memset(P, 0x42, sizeof(uintptr_t) * N);
-      V.push_back(P);
-    }
-  }
-  while (!V.empty()) {
-    delete[] V.back();
-    V.pop_back();
-  }
-}
-
-TEST_F(ScudoWrappersCppTest, ThreadedNew) {
-  // TODO: Investigate why libc sometimes crashes with tag missmatch in
-  // __pthread_clockjoin_ex.
-  std::unique_ptr<scudo::ScopedDisableMemoryTagChecks> NoTags;
-  if (!SCUDO_ANDROID && scudo::archSupportsMemoryTagging() &&
-      scudo::systemSupportsMemoryTagging())
-    NoTags = std::make_unique<scudo::ScopedDisableMemoryTagChecks>();
-
-  Ready = false;
-  std::thread Threads[32];
-  for (size_t I = 0U; I < sizeof(Threads) / sizeof(Threads[0]); I++)
-    Threads[I] = std::thread(stressNew);
-  {
-    std::unique_lock<std::mutex> Lock(Mutex);
-    Ready = true;
-    Cv.notify_all();
-  }
-  for (auto &T : Threads)
-    T.join();
-}
-
-#if !SCUDO_FUCHSIA
-TEST_F(ScudoWrappersCppTest, AllocAfterFork) {
-  // This test can fail flakily when ran as a part of large number of
-  // other tests if the maxmimum number of mappings allowed is low.
-  // We tried to reduce the number of iterations of the loops with
-  // moderate success, so we will now skip this test under those
-  // circumstances.
-  if (SCUDO_LINUX) {
-    long MaxMapCount = 0;
-    // If the file can't be accessed, we proceed with the test.
-    std::ifstream Stream("/proc/sys/vm/max_map_count");
-    if (Stream.good()) {
-      Stream >> MaxMapCount;
-      if (MaxMapCount < 200000)
-        return;
-    }
-  }
-
-  std::atomic_bool Stop;
-
-  // Create threads that simply allocate and free different sizes.
-  std::vector<std::thread *> Threads;
-  for (size_t N = 0; N < 5; N++) {
-    std::thread *T = new std::thread([&Stop] {
-      while (!Stop) {
-        for (size_t SizeLog = 3; SizeLog <= 20; SizeLog++) {
-          char *P = new char[1UL << SizeLog];
-          EXPECT_NE(P, nullptr);
-          // Make sure this value is not optimized away.
-          asm volatile("" : : "r,m"(P) : "memory");
-          delete[] P;
-        }
-      }
-    });
-    Threads.push_back(T);
-  }
-
-  // Create a thread to fork and allocate.
-  for (size_t N = 0; N < 50; N++) {
-    pid_t Pid;
-    if ((Pid = fork()) == 0) {
-      for (size_t SizeLog = 3; SizeLog <= 20; SizeLog++) {
-        char *P = new char[1UL << SizeLog];
-        EXPECT_NE(P, nullptr);
-        // Make sure this value is not optimized away.
-        asm volatile("" : : "r,m"(P) : "memory");
-        // Make sure we can touch all of the allocation.
-        memset(P, 0x32, 1U << SizeLog);
-        // EXPECT_LE(1U << SizeLog, malloc_usable_size(ptr));
-        delete[] P;
-      }
-      _exit(10);
-    }
-    EXPECT_NE(-1, Pid);
-    int Status;
-    EXPECT_EQ(Pid, waitpid(Pid, &Status, 0));
-    EXPECT_FALSE(WIFSIGNALED(Status));
-    EXPECT_EQ(10, WEXITSTATUS(Status));
-  }
-
-  printf("Waiting for threads to complete\n");
-  Stop = true;
-  for (auto Thread : Threads)
-    Thread->join();
-  Threads.clear();
-}
-#endif
diff --git a/Telegram/ThirdParty/scudo/thread_annotations.h b/Telegram/ThirdParty/scudo/thread_annotations.h
deleted file mode 100644
index 68a1087c2..000000000
--- a/Telegram/ThirdParty/scudo/thread_annotations.h
+++ /dev/null
@@ -1,70 +0,0 @@
-//===-- thread_annotations.h ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_THREAD_ANNOTATIONS_
-#define SCUDO_THREAD_ANNOTATIONS_
-
-// Enable thread safety attributes only with clang.
-// The attributes can be safely ignored when compiling with other compilers.
-#if defined(__clang__)
-#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
-#else
-#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
-#endif
-
-#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
-
-#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
-
-#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
-
-#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
-
-#define ACQUIRED_BEFORE(...)                                                   \
-  THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
-
-#define ACQUIRED_AFTER(...)                                                    \
-  THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
-
-#define REQUIRES(...)                                                          \
-  THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
-
-#define REQUIRES_SHARED(...)                                                   \
-  THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
-
-#define ACQUIRE(...)                                                           \
-  THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
-
-#define ACQUIRE_SHARED(...)                                                    \
-  THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
-
-#define RELEASE(...)                                                           \
-  THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
-
-#define RELEASE_SHARED(...)                                                    \
-  THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
-
-#define TRY_ACQUIRE(...)                                                       \
-  THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
-
-#define TRY_ACQUIRE_SHARED(...)                                                \
-  THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
-
-#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
-
-#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
-
-#define ASSERT_SHARED_CAPABILITY(x)                                            \
-  THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
-
-#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
-
-#define NO_THREAD_SAFETY_ANALYSIS                                              \
-  THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
-
-#endif // SCUDO_THREAD_ANNOTATIONS_
diff --git a/Telegram/ThirdParty/scudo/timing.cpp b/Telegram/ThirdParty/scudo/timing.cpp
deleted file mode 100644
index 59ae21d10..000000000
--- a/Telegram/ThirdParty/scudo/timing.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//===-- timing.cpp ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "timing.h"
-
-namespace scudo {
-
-Timer::~Timer() {
-  if (Manager)
-    Manager->report(*this);
-}
-
-ScopedTimer::ScopedTimer(TimingManager &Manager, const char *Name)
-    : Timer(Manager.getOrCreateTimer(Name)) {
-  start();
-}
-
-ScopedTimer::ScopedTimer(TimingManager &Manager, const Timer &Nest,
-                         const char *Name)
-    : Timer(Manager.nest(Nest, Name)) {
-  start();
-}
-
-} // namespace scudo
diff --git a/Telegram/ThirdParty/scudo/timing.h b/Telegram/ThirdParty/scudo/timing.h
deleted file mode 100644
index 84caa79e5..000000000
--- a/Telegram/ThirdParty/scudo/timing.h
+++ /dev/null
@@ -1,221 +0,0 @@
-//===-- timing.h ------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TIMING_H_
-#define SCUDO_TIMING_H_
-
-#include "common.h"
-#include "mutex.h"
-#include "string_utils.h"
-#include "thread_annotations.h"
-
-#include <inttypes.h>
-#include <string.h>
-
-namespace scudo {
-
-class TimingManager;
-
-// A simple timer for evaluating execution time of code snippets. It can be used
-// along with TimingManager or standalone.
-class Timer {
-public:
-  // The use of Timer without binding to a TimingManager is supposed to do the
-  // timer logging manually. Otherwise, TimingManager will do the logging stuff
-  // for you.
-  Timer() = default;
-  Timer(Timer &&Other)
-      : StartTime(0), AccTime(Other.AccTime), Manager(Other.Manager),
-        HandleId(Other.HandleId) {
-    Other.Manager = nullptr;
-  }
-
-  Timer(const Timer &) = delete;
-
-  ~Timer();
-
-  void start() {
-    CHECK_EQ(StartTime, 0U);
-    StartTime = getMonotonicTime();
-  }
-  void stop() {
-    AccTime += getMonotonicTime() - StartTime;
-    StartTime = 0;
-  }
-  u64 getAccumulatedTime() const { return AccTime; }
-
-  // Unset the bound TimingManager so that we don't report the data back. This
-  // is useful if we only want to track subset of certain scope events.
-  void ignore() {
-    StartTime = 0;
-    AccTime = 0;
-    Manager = nullptr;
-  }
-
-protected:
-  friend class TimingManager;
-  Timer(TimingManager &Manager, u32 HandleId)
-      : Manager(&Manager), HandleId(HandleId) {}
-
-  u64 StartTime = 0;
-  u64 AccTime = 0;
-  TimingManager *Manager = nullptr;
-  u32 HandleId;
-};
-
-// A RAII-style wrapper for easy scope execution measurement. Note that in order
-// not to take additional space for the message like `Name`. It only works with
-// TimingManager.
-class ScopedTimer : public Timer {
-public:
-  ScopedTimer(TimingManager &Manager, const char *Name);
-  ScopedTimer(TimingManager &Manager, const Timer &Nest, const char *Name);
-  ~ScopedTimer() { stop(); }
-};
-
-// In Scudo, the execution time of single run of code snippets may not be
-// useful, we are more interested in the average time from several runs.
-// TimingManager lets the registered timer report their data and reports the
-// average execution time for each timer periodically.
-class TimingManager {
-public:
-  TimingManager(u32 PrintingInterval = DefaultPrintingInterval)
-      : PrintingInterval(PrintingInterval) {}
-  ~TimingManager() {
-    if (NumAllocatedTimers != 0)
-      printAll();
-  }
-
-  Timer getOrCreateTimer(const char *Name) EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-
-    CHECK_LT(strlen(Name), MaxLenOfTimerName);
-    for (u32 I = 0; I < NumAllocatedTimers; ++I) {
-      if (strncmp(Name, Timers[I].Name, MaxLenOfTimerName) == 0)
-        return Timer(*this, I);
-    }
-
-    CHECK_LT(NumAllocatedTimers, MaxNumberOfTimers);
-    strncpy(Timers[NumAllocatedTimers].Name, Name, MaxLenOfTimerName);
-    TimerRecords[NumAllocatedTimers].AccumulatedTime = 0;
-    TimerRecords[NumAllocatedTimers].Occurrence = 0;
-    return Timer(*this, NumAllocatedTimers++);
-  }
-
-  // Add a sub-Timer associated with another Timer. This is used when we want to
-  // detail the execution time in the scope of a Timer.
-  // For example,
-  //   void Foo() {
-  //     // T1 records the time spent in both first and second tasks.
-  //     ScopedTimer T1(getTimingManager(), "Task1");
-  //     {
-  //       // T2 records the time spent in first task
-  //       ScopedTimer T2(getTimingManager, T1, "Task2");
-  //       // Do first task.
-  //     }
-  //     // Do second task.
-  //   }
-  //
-  // The report will show proper indents to indicate the nested relation like,
-  //   -- Average Operation Time -- -- Name (# of Calls) --
-  //             10.0(ns)            Task1 (1)
-  //              5.0(ns)              Task2 (1)
-  Timer nest(const Timer &T, const char *Name) EXCLUDES(Mutex) {
-    CHECK_EQ(T.Manager, this);
-    Timer Nesting = getOrCreateTimer(Name);
-
-    ScopedLock L(Mutex);
-    CHECK_NE(Nesting.HandleId, T.HandleId);
-    Timers[Nesting.HandleId].Nesting = T.HandleId;
-    return Nesting;
-  }
-
-  void report(const Timer &T) EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-
-    const u32 HandleId = T.HandleId;
-    CHECK_LT(HandleId, MaxNumberOfTimers);
-    TimerRecords[HandleId].AccumulatedTime += T.getAccumulatedTime();
-    ++TimerRecords[HandleId].Occurrence;
-    ++NumEventsReported;
-    if (NumEventsReported % PrintingInterval == 0)
-      printAllImpl();
-  }
-
-  void printAll() EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    printAllImpl();
-  }
-
-private:
-  void printAllImpl() REQUIRES(Mutex) {
-    static char NameHeader[] = "-- Name (# of Calls) --";
-    static char AvgHeader[] = "-- Average Operation Time --";
-    ScopedString Str;
-    Str.append("%-15s %-15s\n", AvgHeader, NameHeader);
-
-    for (u32 I = 0; I < NumAllocatedTimers; ++I) {
-      if (Timers[I].Nesting != MaxNumberOfTimers)
-        continue;
-      printImpl(Str, I);
-    }
-
-    Str.output();
-  }
-
-  void printImpl(ScopedString &Str, const u32 HandleId,
-                 const u32 ExtraIndent = 0) REQUIRES(Mutex) {
-    const u64 AccumulatedTime = TimerRecords[HandleId].AccumulatedTime;
-    const u64 Occurrence = TimerRecords[HandleId].Occurrence;
-    const u64 Integral = Occurrence == 0 ? 0 : AccumulatedTime / Occurrence;
-    // Only keep single digit of fraction is enough and it enables easier layout
-    // maintenance.
-    const u64 Fraction =
-        Occurrence == 0 ? 0
-                        : ((AccumulatedTime % Occurrence) * 10) / Occurrence;
-
-    Str.append("%14" PRId64 ".%" PRId64 "(ns) %-11s", Integral, Fraction, " ");
-
-    for (u32 I = 0; I < ExtraIndent; ++I)
-      Str.append("%s", "  ");
-    Str.append("%s (%" PRId64 ")\n", Timers[HandleId].Name, Occurrence);
-
-    for (u32 I = 0; I < NumAllocatedTimers; ++I)
-      if (Timers[I].Nesting == HandleId)
-        printImpl(Str, I, ExtraIndent + 1);
-  }
-
-  // Instead of maintaining pages for timer registration, a static buffer is
-  // sufficient for most use cases in Scudo.
-  static constexpr u32 MaxNumberOfTimers = 50;
-  static constexpr u32 MaxLenOfTimerName = 50;
-  static constexpr u32 DefaultPrintingInterval = 100;
-
-  struct Record {
-    u64 AccumulatedTime = 0;
-    u64 Occurrence = 0;
-  };
-
-  struct TimerInfo {
-    char Name[MaxLenOfTimerName + 1];
-    u32 Nesting = MaxNumberOfTimers;
-  };
-
-  HybridMutex Mutex;
-  // The frequency of proactively dumping the timer statistics. For example, the
-  // default setting is to dump the statistics every 100 reported events.
-  u32 PrintingInterval GUARDED_BY(Mutex);
-  u64 NumEventsReported GUARDED_BY(Mutex) = 0;
-  u32 NumAllocatedTimers GUARDED_BY(Mutex) = 0;
-  TimerInfo Timers[MaxNumberOfTimers] GUARDED_BY(Mutex);
-  Record TimerRecords[MaxNumberOfTimers] GUARDED_BY(Mutex);
-};
-
-} // namespace scudo
-
-#endif // SCUDO_TIMING_H_
diff --git a/Telegram/ThirdParty/scudo/tools/compute_size_class_config.cpp b/Telegram/ThirdParty/scudo/tools/compute_size_class_config.cpp
deleted file mode 100644
index bcaa58349..000000000
--- a/Telegram/ThirdParty/scudo/tools/compute_size_class_config.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-//===-- compute_size_class_config.cpp -------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <algorithm>
-#include <vector>
-
-struct Alloc {
-  size_t size, count;
-};
-
-size_t measureWastage(const std::vector<Alloc> &allocs,
-                      const std::vector<size_t> &classes, size_t pageSize,
-                      size_t headerSize) {
-  size_t totalWastage = 0;
-  for (auto &a : allocs) {
-    size_t sizePlusHeader = a.size + headerSize;
-    size_t wastage = -1ull;
-    for (auto c : classes)
-      if (c >= sizePlusHeader && c - sizePlusHeader < wastage)
-        wastage = c - sizePlusHeader;
-    if (wastage == -1ull)
-      continue;
-    if (wastage > 2 * pageSize)
-      wastage = 2 * pageSize;
-    totalWastage += wastage * a.count;
-  }
-  return totalWastage;
-}
-
-void readAllocs(std::vector<Alloc> &allocs, const char *path) {
-  FILE *f = fopen(path, "r");
-  if (!f) {
-    fprintf(stderr, "compute_size_class_config: could not open %s: %s\n", path,
-            strerror(errno));
-    exit(1);
-  }
-
-  const char header[] = "<malloc version=\"scudo-1\">\n";
-  char buf[sizeof(header) - 1];
-  if (fread(buf, 1, sizeof(header) - 1, f) != sizeof(header) - 1 ||
-      memcmp(buf, header, sizeof(header) - 1) != 0) {
-    fprintf(stderr, "compute_size_class_config: invalid input format\n");
-    exit(1);
-  }
-
-  Alloc a;
-  while (fscanf(f, "<alloc size=\"%zu\" count=\"%zu\"/>\n", &a.size,
-                &a.count) == 2)
-    allocs.push_back(a);
-  fclose(f);
-}
-
-size_t log2Floor(size_t x) { return sizeof(long) * 8 - 1 - __builtin_clzl(x); }
-
-void usage() {
-  fprintf(stderr,
-          "usage: compute_size_class_config [-p pageSize] [-c largestClass] "
-          "[-h headerSize] [-n numClasses] [-b numBits] profile...\n");
-  exit(1);
-}
-
-int main(int argc, char **argv) {
-  size_t pageSize = 4096;
-  size_t largestClass = 65552;
-  size_t headerSize = 16;
-  size_t numClasses = 32;
-  size_t numBits = 5;
-
-  std::vector<Alloc> allocs;
-  for (size_t i = 1; i != argc;) {
-    auto matchArg = [&](size_t &arg, const char *name) {
-      if (strcmp(argv[i], name) == 0) {
-        if (i + 1 != argc) {
-          arg = atoi(argv[i + 1]);
-          i += 2;
-        } else {
-          usage();
-        }
-        return true;
-      }
-      return false;
-    };
-    if (matchArg(pageSize, "-p") || matchArg(largestClass, "-c") ||
-        matchArg(headerSize, "-h") || matchArg(numClasses, "-n") ||
-        matchArg(numBits, "-b"))
-      continue;
-    readAllocs(allocs, argv[i]);
-    ++i;
-  }
-
-  if (allocs.empty())
-    usage();
-
-  std::vector<size_t> classes;
-  classes.push_back(largestClass);
-  for (size_t i = 1; i != numClasses; ++i) {
-    size_t minWastage = -1ull;
-    size_t minWastageClass;
-    for (size_t newClass = 16; newClass != largestClass; newClass += 16) {
-      // Skip classes with more than numBits bits, ignoring leading or trailing
-      // zero bits.
-      if (__builtin_ctzl(newClass - headerSize) +
-              __builtin_clzl(newClass - headerSize) <
-          sizeof(long) * 8 - numBits)
-        continue;
-
-      classes.push_back(newClass);
-      size_t newWastage = measureWastage(allocs, classes, pageSize, headerSize);
-      classes.pop_back();
-      if (newWastage < minWastage) {
-        minWastage = newWastage;
-        minWastageClass = newClass;
-      }
-    }
-    classes.push_back(minWastageClass);
-  }
-
-  std::sort(classes.begin(), classes.end());
-  size_t minSizeLog = log2Floor(headerSize);
-  size_t midSizeIndex = 0;
-  while (classes[midSizeIndex + 1] - classes[midSizeIndex] == (1 << minSizeLog))
-    midSizeIndex++;
-  size_t midSizeLog = log2Floor(classes[midSizeIndex] - headerSize);
-  size_t maxSizeLog = log2Floor(classes.back() - headerSize - 1) + 1;
-
-  printf(R"(// wastage = %zu
-
-struct MySizeClassConfig {
-  static const uptr NumBits = %zu;
-  static const uptr MinSizeLog = %zu;
-  static const uptr MidSizeLog = %zu;
-  static const uptr MaxSizeLog = %zu;
-  static const u16 MaxNumCachedHint = 14;
-  static const uptr MaxBytesCachedLog = 14;
-
-  static constexpr u32 Classes[] = {)",
-         measureWastage(allocs, classes, pageSize, headerSize), numBits,
-         minSizeLog, midSizeLog, maxSizeLog);
-  for (size_t i = 0; i != classes.size(); ++i) {
-    if ((i % 8) == 0)
-      printf("\n      ");
-    else
-      printf(" ");
-    printf("0x%05zx,", classes[i]);
-  }
-  printf(R"(
-  };
-  static const uptr SizeDelta = %zu;
-};
-)",
-         headerSize);
-}
diff --git a/Telegram/ThirdParty/scudo/trusty.cpp b/Telegram/ThirdParty/scudo/trusty.cpp
deleted file mode 100644
index 26b349c6e..000000000
--- a/Telegram/ThirdParty/scudo/trusty.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-//===-- trusty.cpp ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-#if SCUDO_TRUSTY
-
-#include "common.h"
-#include "mutex.h"
-#include "report_linux.h"
-#include "trusty.h"
-
-#include <errno.h>           // for errno
-#include <lk/err_ptr.h>      // for PTR_ERR and IS_ERR
-#include <stdio.h>           // for printf()
-#include <stdlib.h>          // for getenv()
-#include <sys/auxv.h>        // for getauxval()
-#include <time.h>            // for clock_gettime()
-#include <trusty_err.h>      // for lk_err_to_errno()
-#include <trusty_syscalls.h> // for _trusty_brk()
-#include <uapi/mm.h>         // for MMAP flags
-
-namespace scudo {
-
-uptr getPageSize() { return getauxval(AT_PAGESZ); }
-
-void NORETURN die() { abort(); }
-
-void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
-          UNUSED MapPlatformData *Data) {
-  uint32_t MmapFlags =
-      MMAP_FLAG_ANONYMOUS | MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE;
-
-  // If the MAP_NOACCESS flag is set, Scudo tries to reserve
-  // a memory region without mapping physical pages. This corresponds
-  // to MMAP_FLAG_NO_PHYSICAL in Trusty.
-  if (Flags & MAP_NOACCESS)
-    MmapFlags |= MMAP_FLAG_NO_PHYSICAL;
-  if (Addr)
-    MmapFlags |= MMAP_FLAG_FIXED_NOREPLACE;
-
-  if (Flags & MAP_MEMTAG)
-    MmapFlags |= MMAP_FLAG_PROT_MTE;
-
-  void *P = (void *)_trusty_mmap(Addr, Size, MmapFlags, 0);
-
-  if (IS_ERR(P)) {
-    errno = lk_err_to_errno(PTR_ERR(P));
-    if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
-      reportMapError(Size);
-    return nullptr;
-  }
-
-  return P;
-}
-
-void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
-           UNUSED MapPlatformData *Data) {
-  if (_trusty_munmap(Addr, Size) != 0)
-    reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
-}
-
-void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
-                         UNUSED MapPlatformData *Data) {}
-
-void releasePagesToOS(UNUSED uptr BaseAddress, UNUSED uptr Offset,
-                      UNUSED uptr Size, UNUSED MapPlatformData *Data) {}
-
-const char *getEnv(const char *Name) { return getenv(Name); }
-
-// All mutex operations are a no-op since Trusty doesn't currently support
-// threads.
-bool HybridMutex::tryLock() { return true; }
-
-void HybridMutex::lockSlow() {}
-
-void HybridMutex::unlock() {}
-
-void HybridMutex::assertHeldImpl() {}
-
-u64 getMonotonicTime() {
-  timespec TS;
-  clock_gettime(CLOCK_MONOTONIC, &TS);
-  return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
-         static_cast<u64>(TS.tv_nsec);
-}
-
-u64 getMonotonicTimeFast() {
-#if defined(CLOCK_MONOTONIC_COARSE)
-  timespec TS;
-  clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
-  return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
-         static_cast<u64>(TS.tv_nsec);
-#else
-  return getMonotonicTime();
-#endif
-}
-
-u32 getNumberOfCPUs() { return 0; }
-
-u32 getThreadID() { return 0; }
-
-bool getRandom(UNUSED void *Buffer, UNUSED uptr Length, UNUSED bool Blocking) {
-  return false;
-}
-
-void outputRaw(const char *Buffer) { printf("%s", Buffer); }
-
-void setAbortMessage(UNUSED const char *Message) {}
-
-} // namespace scudo
-
-#endif // SCUDO_TRUSTY
diff --git a/Telegram/ThirdParty/scudo/trusty.h b/Telegram/ThirdParty/scudo/trusty.h
deleted file mode 100644
index 50edd1c6f..000000000
--- a/Telegram/ThirdParty/scudo/trusty.h
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- trusty.h -----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TRUSTY_H_
-#define SCUDO_TRUSTY_H_
-
-#include "platform.h"
-
-#if SCUDO_TRUSTY
-
-namespace scudo {
-// MapPlatformData is unused on Trusty, define it as a minimially sized
-// structure.
-struct MapPlatformData {};
-} // namespace scudo
-
-#endif // SCUDO_TRUSTY
-
-#endif // SCUDO_TRUSTY_H_
diff --git a/Telegram/ThirdParty/scudo/tsd.h b/Telegram/ThirdParty/scudo/tsd.h
deleted file mode 100644
index b2108a019..000000000
--- a/Telegram/ThirdParty/scudo/tsd.h
+++ /dev/null
@@ -1,90 +0,0 @@
-//===-- tsd.h ---------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TSD_H_
-#define SCUDO_TSD_H_
-
-#include "atomic_helpers.h"
-#include "common.h"
-#include "mutex.h"
-#include "thread_annotations.h"
-
-#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
-#include <pthread.h>
-
-// With some build setups, this might still not be defined.
-#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
-#define PTHREAD_DESTRUCTOR_ITERATIONS 4
-#endif
-
-namespace scudo {
-
-template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
-  using ThisT = TSD<Allocator>;
-  u8 DestructorIterations = 0;
-
-  void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
-    DCHECK_EQ(DestructorIterations, 0U);
-    DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
-    Instance->initCache(&Cache);
-    DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
-  }
-
-  inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
-    if (Mutex.tryLock()) {
-      atomic_store_relaxed(&Precedence, 0);
-      return true;
-    }
-    if (atomic_load_relaxed(&Precedence) == 0)
-      atomic_store_relaxed(&Precedence,
-                           static_cast<uptr>(getMonotonicTimeFast() >>
-                                             FIRST_32_SECOND_64(16, 0)));
-    return false;
-  }
-  inline void lock() NO_THREAD_SAFETY_ANALYSIS {
-    atomic_store_relaxed(&Precedence, 0);
-    Mutex.lock();
-  }
-  inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
-  inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
-
-  void commitBack(Allocator *Instance) { Instance->commitBack(this); }
-
-  // As the comments attached to `getCache()`, the TSD doesn't always need to be
-  // locked. In that case, we would only skip the check before we have all TSDs
-  // locked in all paths.
-  void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
-    if (SCUDO_DEBUG && !BypassCheck)
-      Mutex.assertHeld();
-  }
-
-  // Ideally, we may want to assert that all the operations on
-  // Cache/QuarantineCache always have the `Mutex` acquired. However, the
-  // current architecture of accessing TSD is not easy to cooperate with the
-  // thread-safety analysis because of pointer aliasing. So now we just add the
-  // assertion on the getters of Cache/QuarantineCache.
-  //
-  // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
-  // TSD doesn't always require holding the lock. Add this assertion while the
-  // lock is always acquired.
-  typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
-  typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
-    return QuarantineCache;
-  }
-
-private:
-  HybridMutex Mutex;
-  atomic_uptr Precedence = {};
-
-  typename Allocator::CacheT Cache GUARDED_BY(Mutex);
-  typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
-};
-
-} // namespace scudo
-
-#endif // SCUDO_TSD_H_
diff --git a/Telegram/ThirdParty/scudo/tsd_exclusive.h b/Telegram/ThirdParty/scudo/tsd_exclusive.h
deleted file mode 100644
index 238367420..000000000
--- a/Telegram/ThirdParty/scudo/tsd_exclusive.h
+++ /dev/null
@@ -1,178 +0,0 @@
-//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TSD_EXCLUSIVE_H_
-#define SCUDO_TSD_EXCLUSIVE_H_
-
-#include "tsd.h"
-
-#include "string_utils.h"
-
-namespace scudo {
-
-struct ThreadState {
-  bool DisableMemInit : 1;
-  enum : unsigned {
-    NotInitialized = 0,
-    Initialized,
-    TornDown,
-  } InitState : 2;
-};
-
-template <class Allocator> void teardownThread(void *Ptr);
-
-template <class Allocator> struct TSDRegistryExT {
-  void init(Allocator *Instance) REQUIRES(Mutex) {
-    DCHECK(!Initialized);
-    Instance->init();
-    CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
-    FallbackTSD.init(Instance);
-    Initialized = true;
-  }
-
-  void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    if (LIKELY(Initialized))
-      return;
-    init(Instance); // Sets Initialized.
-  }
-
-  void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
-    DCHECK(Instance);
-    if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
-      DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
-                Instance);
-      ThreadTSD.commitBack(Instance);
-      ThreadTSD = {};
-    }
-    CHECK_EQ(pthread_key_delete(PThreadKey), 0);
-    PThreadKey = {};
-    FallbackTSD.commitBack(Instance);
-    FallbackTSD = {};
-    State = {};
-    ScopedLock L(Mutex);
-    Initialized = false;
-  }
-
-  void drainCaches(Allocator *Instance) {
-    // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
-    // drain the `ThreadTSD` of current thread and `FallbackTSD`.
-    Instance->drainCache(&ThreadTSD);
-    FallbackTSD.lock();
-    Instance->drainCache(&FallbackTSD);
-    FallbackTSD.unlock();
-  }
-
-  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
-    if (LIKELY(State.InitState != ThreadState::NotInitialized))
-      return;
-    initThread(Instance, MinimalInit);
-  }
-
-  // TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
-  // embedding the logic into TSD or always locking the TSD. It will enable us
-  // to properly mark thread annotation here and adding proper runtime
-  // assertions in the member functions of TSD. For example, assert the lock is
-  // acquired before calling TSD::commitBack().
-  ALWAYS_INLINE TSD<Allocator> *
-  getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
-    if (LIKELY(State.InitState == ThreadState::Initialized &&
-               !atomic_load(&Disabled, memory_order_acquire))) {
-      *UnlockRequired = false;
-      return &ThreadTSD;
-    }
-    FallbackTSD.lock();
-    *UnlockRequired = true;
-    return &FallbackTSD;
-  }
-
-  // To disable the exclusive TSD registry, we effectively lock the fallback TSD
-  // and force all threads to attempt to use it instead of their local one.
-  void disable() NO_THREAD_SAFETY_ANALYSIS {
-    Mutex.lock();
-    FallbackTSD.lock();
-    atomic_store(&Disabled, 1U, memory_order_release);
-  }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS {
-    atomic_store(&Disabled, 0U, memory_order_release);
-    FallbackTSD.unlock();
-    Mutex.unlock();
-  }
-
-  bool setOption(Option O, sptr Value) {
-    if (O == Option::ThreadDisableMemInit)
-      State.DisableMemInit = Value;
-    if (O == Option::MaxTSDsCount)
-      return false;
-    return true;
-  }
-
-  bool getDisableMemInit() { return State.DisableMemInit; }
-
-  void getStats(ScopedString *Str) {
-    // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
-    // printing only self `ThreadTSD` which may mislead the usage, we just skip
-    // it.
-    Str->append("Exclusive TSD don't support iterating each TSD\n");
-  }
-
-private:
-  // Using minimal initialization allows for global initialization while keeping
-  // the thread specific structure untouched. The fallback structure will be
-  // used instead.
-  NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
-    initOnceMaybe(Instance);
-    if (UNLIKELY(MinimalInit))
-      return;
-    CHECK_EQ(
-        pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
-    ThreadTSD.init(Instance);
-    State.InitState = ThreadState::Initialized;
-    Instance->callPostInitCallback();
-  }
-
-  pthread_key_t PThreadKey = {};
-  bool Initialized GUARDED_BY(Mutex) = false;
-  atomic_u8 Disabled = {};
-  TSD<Allocator> FallbackTSD;
-  HybridMutex Mutex;
-  static thread_local ThreadState State;
-  static thread_local TSD<Allocator> ThreadTSD;
-
-  friend void teardownThread<Allocator>(void *Ptr);
-};
-
-template <class Allocator>
-thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
-template <class Allocator>
-thread_local ThreadState TSDRegistryExT<Allocator>::State;
-
-template <class Allocator>
-void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
-  typedef TSDRegistryExT<Allocator> TSDRegistryT;
-  Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
-  // The glibc POSIX thread-local-storage deallocation routine calls user
-  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
-  // We want to be called last since other destructors might call free and the
-  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
-  // quarantine and swallowing the cache.
-  if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
-    TSDRegistryT::ThreadTSD.DestructorIterations--;
-    // If pthread_setspecific fails, we will go ahead with the teardown.
-    if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
-                                   Ptr) == 0))
-      return;
-  }
-  TSDRegistryT::ThreadTSD.commitBack(Instance);
-  TSDRegistryT::State.InitState = ThreadState::TornDown;
-}
-
-} // namespace scudo
-
-#endif // SCUDO_TSD_EXCLUSIVE_H_
diff --git a/Telegram/ThirdParty/scudo/tsd_shared.h b/Telegram/ThirdParty/scudo/tsd_shared.h
deleted file mode 100644
index 1bca578ee..000000000
--- a/Telegram/ThirdParty/scudo/tsd_shared.h
+++ /dev/null
@@ -1,252 +0,0 @@
-//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TSD_SHARED_H_
-#define SCUDO_TSD_SHARED_H_
-
-#include "tsd.h"
-
-#include "string_utils.h"
-
-#if SCUDO_HAS_PLATFORM_TLS_SLOT
-// This is a platform-provided header that needs to be on the include path when
-// Scudo is compiled. It must declare a function with the prototype:
-//   uintptr_t *getPlatformAllocatorTlsSlot()
-// that returns the address of a thread-local word of storage reserved for
-// Scudo, that must be zero-initialized in newly created threads.
-#include "scudo_platform_tls_slot.h"
-#endif
-
-namespace scudo {
-
-template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
-struct TSDRegistrySharedT {
-  void init(Allocator *Instance) REQUIRES(Mutex) {
-    DCHECK(!Initialized);
-    Instance->init();
-    for (u32 I = 0; I < TSDsArraySize; I++)
-      TSDs[I].init(Instance);
-    const u32 NumberOfCPUs = getNumberOfCPUs();
-    setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount
-                                        : Min(NumberOfCPUs, DefaultTSDCount));
-    Initialized = true;
-  }
-
-  void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
-    ScopedLock L(Mutex);
-    if (LIKELY(Initialized))
-      return;
-    init(Instance); // Sets Initialized.
-  }
-
-  void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
-    for (u32 I = 0; I < TSDsArraySize; I++) {
-      TSDs[I].commitBack(Instance);
-      TSDs[I] = {};
-    }
-    setCurrentTSD(nullptr);
-    ScopedLock L(Mutex);
-    Initialized = false;
-  }
-
-  void drainCaches(Allocator *Instance) {
-    ScopedLock L(MutexTSDs);
-    for (uptr I = 0; I < NumberOfTSDs; ++I) {
-      TSDs[I].lock();
-      Instance->drainCache(&TSDs[I]);
-      TSDs[I].unlock();
-    }
-  }
-
-  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
-                                     UNUSED bool MinimalInit) {
-    if (LIKELY(getCurrentTSD()))
-      return;
-    initThread(Instance);
-  }
-
-  // TSDs is an array of locks and which is not supported for marking
-  // thread-safety capability.
-  ALWAYS_INLINE TSD<Allocator> *
-  getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
-    TSD<Allocator> *TSD = getCurrentTSD();
-    DCHECK(TSD);
-    *UnlockRequired = true;
-    // Try to lock the currently associated context.
-    if (TSD->tryLock())
-      return TSD;
-    // If that fails, go down the slow path.
-    if (TSDsArraySize == 1U) {
-      // Only 1 TSD, not need to go any further.
-      // The compiler will optimize this one way or the other.
-      TSD->lock();
-      return TSD;
-    }
-    return getTSDAndLockSlow(TSD);
-  }
-
-  void disable() NO_THREAD_SAFETY_ANALYSIS {
-    Mutex.lock();
-    for (u32 I = 0; I < TSDsArraySize; I++)
-      TSDs[I].lock();
-  }
-
-  void enable() NO_THREAD_SAFETY_ANALYSIS {
-    for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
-      TSDs[I].unlock();
-    Mutex.unlock();
-  }
-
-  bool setOption(Option O, sptr Value) {
-    if (O == Option::MaxTSDsCount)
-      return setNumberOfTSDs(static_cast<u32>(Value));
-    if (O == Option::ThreadDisableMemInit)
-      setDisableMemInit(Value);
-    // Not supported by the TSD Registry, but not an error either.
-    return true;
-  }
-
-  bool getDisableMemInit() const { return *getTlsPtr() & 1; }
-
-  void getStats(ScopedString *Str) EXCLUDES(MutexTSDs) {
-    ScopedLock L(MutexTSDs);
-
-    Str->append("Stats: SharedTSDs: %u available; total %u\n", NumberOfTSDs,
-                TSDsArraySize);
-    for (uptr I = 0; I < NumberOfTSDs; ++I) {
-      TSDs[I].lock();
-      // Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
-      // thread annotations. However, given the TSD is only locked on shared
-      // path, do the assertion in a separate path to avoid confusing the
-      // analyzer.
-      TSDs[I].assertLocked(/*BypassCheck=*/true);
-      Str->append("  Shared TSD[%zu]:\n", I);
-      TSDs[I].getCache().getStats(Str);
-      TSDs[I].unlock();
-    }
-  }
-
-private:
-  ALWAYS_INLINE uptr *getTlsPtr() const {
-#if SCUDO_HAS_PLATFORM_TLS_SLOT
-    return reinterpret_cast<uptr *>(getPlatformAllocatorTlsSlot());
-#else
-    static thread_local uptr ThreadTSD;
-    return &ThreadTSD;
-#endif
-  }
-
-  static_assert(alignof(TSD<Allocator>) >= 2, "");
-
-  ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
-    *getTlsPtr() &= 1;
-    *getTlsPtr() |= reinterpret_cast<uptr>(CurrentTSD);
-  }
-
-  ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
-    return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
-  }
-
-  bool setNumberOfTSDs(u32 N) EXCLUDES(MutexTSDs) {
-    ScopedLock L(MutexTSDs);
-    if (N < NumberOfTSDs)
-      return false;
-    if (N > TSDsArraySize)
-      N = TSDsArraySize;
-    NumberOfTSDs = N;
-    NumberOfCoPrimes = 0;
-    // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
-    // array of TSDs in a random order. For details, see:
-    // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
-    for (u32 I = 0; I < N; I++) {
-      u32 A = I + 1;
-      u32 B = N;
-      // Find the GCD between I + 1 and N. If 1, they are coprimes.
-      while (B != 0) {
-        const u32 T = A;
-        A = B;
-        B = T % B;
-      }
-      if (A == 1)
-        CoPrimes[NumberOfCoPrimes++] = I + 1;
-    }
-    return true;
-  }
-
-  void setDisableMemInit(bool B) {
-    *getTlsPtr() &= ~1ULL;
-    *getTlsPtr() |= B;
-  }
-
-  NOINLINE void initThread(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
-    initOnceMaybe(Instance);
-    // Initial context assignment is done in a plain round-robin fashion.
-    const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
-    setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
-    Instance->callPostInitCallback();
-  }
-
-  // TSDs is an array of locks which is not supported for marking thread-safety
-  // capability.
-  NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD)
-      EXCLUDES(MutexTSDs) {
-    // Use the Precedence of the current TSD as our random seed. Since we are
-    // in the slow path, it means that tryLock failed, and as a result it's
-    // very likely that said Precedence is non-zero.
-    const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
-    u32 N, Inc;
-    {
-      ScopedLock L(MutexTSDs);
-      N = NumberOfTSDs;
-      DCHECK_NE(NumberOfCoPrimes, 0U);
-      Inc = CoPrimes[R % NumberOfCoPrimes];
-    }
-    if (N > 1U) {
-      u32 Index = R % N;
-      uptr LowestPrecedence = UINTPTR_MAX;
-      TSD<Allocator> *CandidateTSD = nullptr;
-      // Go randomly through at most 4 contexts and find a candidate.
-      for (u32 I = 0; I < Min(4U, N); I++) {
-        if (TSDs[Index].tryLock()) {
-          setCurrentTSD(&TSDs[Index]);
-          return &TSDs[Index];
-        }
-        const uptr Precedence = TSDs[Index].getPrecedence();
-        // A 0 precedence here means another thread just locked this TSD.
-        if (Precedence && Precedence < LowestPrecedence) {
-          CandidateTSD = &TSDs[Index];
-          LowestPrecedence = Precedence;
-        }
-        Index += Inc;
-        if (Index >= N)
-          Index -= N;
-      }
-      if (CandidateTSD) {
-        CandidateTSD->lock();
-        setCurrentTSD(CandidateTSD);
-        return CandidateTSD;
-      }
-    }
-    // Last resort, stick with the current one.
-    CurrentTSD->lock();
-    return CurrentTSD;
-  }
-
-  atomic_u32 CurrentIndex = {};
-  u32 NumberOfTSDs GUARDED_BY(MutexTSDs) = 0;
-  u32 NumberOfCoPrimes GUARDED_BY(MutexTSDs) = 0;
-  u32 CoPrimes[TSDsArraySize] GUARDED_BY(MutexTSDs) = {};
-  bool Initialized GUARDED_BY(Mutex) = false;
-  HybridMutex Mutex;
-  HybridMutex MutexTSDs;
-  TSD<Allocator> TSDs[TSDsArraySize];
-};
-
-} // namespace scudo
-
-#endif // SCUDO_TSD_SHARED_H_
diff --git a/Telegram/ThirdParty/scudo/vector.h b/Telegram/ThirdParty/scudo/vector.h
deleted file mode 100644
index c0f1ba0ed..000000000
--- a/Telegram/ThirdParty/scudo/vector.h
+++ /dev/null
@@ -1,131 +0,0 @@
-//===-- vector.h ------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_VECTOR_H_
-#define SCUDO_VECTOR_H_
-
-#include "mem_map.h"
-
-#include <string.h>
-
-namespace scudo {
-
-// A low-level vector based on map. It stores the contents inline up to a fixed
-// capacity, or in an external memory buffer if it grows bigger than that. May
-// incur a significant memory overhead for small vectors. The current
-// implementation supports only POD types.
-//
-// NOTE: This class is not meant to be used directly, use Vector<T> instead.
-template <typename T> class VectorNoCtor {
-public:
-  T &operator[](uptr I) {
-    DCHECK_LT(I, Size);
-    return Data[I];
-  }
-  const T &operator[](uptr I) const {
-    DCHECK_LT(I, Size);
-    return Data[I];
-  }
-  void push_back(const T &Element) {
-    DCHECK_LE(Size, capacity());
-    if (Size == capacity()) {
-      const uptr NewCapacity = roundUpPowerOfTwo(Size + 1);
-      reallocate(NewCapacity);
-    }
-    memcpy(&Data[Size++], &Element, sizeof(T));
-  }
-  T &back() {
-    DCHECK_GT(Size, 0);
-    return Data[Size - 1];
-  }
-  void pop_back() {
-    DCHECK_GT(Size, 0);
-    Size--;
-  }
-  uptr size() const { return Size; }
-  const T *data() const { return Data; }
-  T *data() { return Data; }
-  constexpr uptr capacity() const { return CapacityBytes / sizeof(T); }
-  void reserve(uptr NewSize) {
-    // Never downsize internal buffer.
-    if (NewSize > capacity())
-      reallocate(NewSize);
-  }
-  void resize(uptr NewSize) {
-    if (NewSize > Size) {
-      reserve(NewSize);
-      memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
-    }
-    Size = NewSize;
-  }
-
-  void clear() { Size = 0; }
-  bool empty() const { return size() == 0; }
-
-  const T *begin() const { return data(); }
-  T *begin() { return data(); }
-  const T *end() const { return data() + size(); }
-  T *end() { return data() + size(); }
-
-protected:
-  constexpr void init(uptr InitialCapacity = 0) {
-    Data = &LocalData[0];
-    CapacityBytes = sizeof(LocalData);
-    if (InitialCapacity > capacity())
-      reserve(InitialCapacity);
-  }
-  void destroy() {
-    if (Data != &LocalData[0])
-      ExternalBuffer.unmap(ExternalBuffer.getBase(),
-                           ExternalBuffer.getCapacity());
-  }
-
-private:
-  void reallocate(uptr NewCapacity) {
-    DCHECK_GT(NewCapacity, 0);
-    DCHECK_LE(Size, NewCapacity);
-
-    MemMapT NewExternalBuffer;
-    NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
-    NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector");
-    T *NewExternalData = reinterpret_cast<T *>(NewExternalBuffer.getBase());
-
-    memcpy(NewExternalData, Data, Size * sizeof(T));
-    destroy();
-
-    Data = NewExternalData;
-    CapacityBytes = NewCapacity;
-    ExternalBuffer = NewExternalBuffer;
-  }
-
-  T *Data = nullptr;
-  uptr CapacityBytes = 0;
-  uptr Size = 0;
-
-  T LocalData[256 / sizeof(T)] = {};
-  MemMapT ExternalBuffer;
-};
-
-template <typename T> class Vector : public VectorNoCtor<T> {
-public:
-  constexpr Vector() { VectorNoCtor<T>::init(); }
-  explicit Vector(uptr Count) {
-    VectorNoCtor<T>::init(Count);
-    this->resize(Count);
-  }
-  ~Vector() { VectorNoCtor<T>::destroy(); }
-  // Disallow copies and moves.
-  Vector(const Vector &) = delete;
-  Vector &operator=(const Vector &) = delete;
-  Vector(Vector &&) = delete;
-  Vector &operator=(Vector &&) = delete;
-};
-
-} // namespace scudo
-
-#endif // SCUDO_VECTOR_H_
diff --git a/Telegram/ThirdParty/scudo/wrappers_c.cpp b/Telegram/ThirdParty/scudo/wrappers_c.cpp
deleted file mode 100644
index 60014a0f6..000000000
--- a/Telegram/ThirdParty/scudo/wrappers_c.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- wrappers_c.cpp ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-// Skip this compilation unit if compiled as part of Bionic.
-#if !SCUDO_ANDROID || !_BIONIC
-
-#include "allocator_config.h"
-#include "internal_defs.h"
-#include "platform.h"
-#include "scudo/interface.h"
-#include "wrappers_c.h"
-#include "wrappers_c_checks.h"
-
-#include <stdint.h>
-#include <stdio.h>
-
-#define SCUDO_PREFIX(name) name
-#define SCUDO_ALLOCATOR Allocator
-
-// Export the static allocator so that the C++ wrappers can access it.
-// Technically we could have a completely separated heap for C & C++ but in
-// reality the amount of cross pollination between the two is staggering.
-SCUDO_REQUIRE_CONSTANT_INITIALIZATION
-scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR;
-
-#include "wrappers_c.inc"
-
-#undef SCUDO_ALLOCATOR
-#undef SCUDO_PREFIX
-
-extern "C" INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
-
-#endif // !SCUDO_ANDROID || !_BIONIC
diff --git a/Telegram/ThirdParty/scudo/wrappers_c.h b/Telegram/ThirdParty/scudo/wrappers_c.h
deleted file mode 100644
index 08dc679b3..000000000
--- a/Telegram/ThirdParty/scudo/wrappers_c.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//===-- wrappers_c.h --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_WRAPPERS_C_H_
-#define SCUDO_WRAPPERS_C_H_
-
-#include "platform.h"
-#include "stats.h"
-
-// Bionic's struct mallinfo consists of size_t (mallinfo(3) uses int).
-#if SCUDO_ANDROID
-typedef size_t __scudo_mallinfo_data_t;
-#else
-typedef int __scudo_mallinfo_data_t;
-#endif
-
-struct __scudo_mallinfo {
-  __scudo_mallinfo_data_t arena;
-  __scudo_mallinfo_data_t ordblks;
-  __scudo_mallinfo_data_t smblks;
-  __scudo_mallinfo_data_t hblks;
-  __scudo_mallinfo_data_t hblkhd;
-  __scudo_mallinfo_data_t usmblks;
-  __scudo_mallinfo_data_t fsmblks;
-  __scudo_mallinfo_data_t uordblks;
-  __scudo_mallinfo_data_t fordblks;
-  __scudo_mallinfo_data_t keepcost;
-};
-
-struct __scudo_mallinfo2 {
-  size_t arena;
-  size_t ordblks;
-  size_t smblks;
-  size_t hblks;
-  size_t hblkhd;
-  size_t usmblks;
-  size_t fsmblks;
-  size_t uordblks;
-  size_t fordblks;
-  size_t keepcost;
-};
-
-// Android sometimes includes malloc.h no matter what, which yields to
-// conflicting return types for mallinfo() if we use our own structure. So if
-// struct mallinfo is declared (#define courtesy of malloc.h), use it directly.
-#if STRUCT_MALLINFO_DECLARED
-#define SCUDO_MALLINFO mallinfo
-#else
-#define SCUDO_MALLINFO __scudo_mallinfo
-#endif
-
-#if !SCUDO_ANDROID || !_BIONIC
-extern "C" void malloc_postinit();
-extern HIDDEN scudo::Allocator<scudo::Config, malloc_postinit> Allocator;
-#endif
-
-#endif // SCUDO_WRAPPERS_C_H_
diff --git a/Telegram/ThirdParty/scudo/wrappers_c.inc b/Telegram/ThirdParty/scudo/wrappers_c.inc
deleted file mode 100644
index 56d8ef201..000000000
--- a/Telegram/ThirdParty/scudo/wrappers_c.inc
+++ /dev/null
@@ -1,374 +0,0 @@
-//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_PREFIX
-#error "Define SCUDO_PREFIX prior to including this file!"
-#endif
-
-// malloc-type functions have to be aligned to std::max_align_t. This is
-// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
-// do not have to abide by the same requirement.
-#ifndef SCUDO_MALLOC_ALIGNMENT
-#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
-#endif
-
-static void reportAllocation(void *ptr, size_t size) {
-  if (SCUDO_ENABLE_HOOKS)
-    if (__scudo_allocate_hook && ptr)
-      __scudo_allocate_hook(ptr, size);
-}
-static void reportDeallocation(void *ptr) {
-  if (SCUDO_ENABLE_HOOKS)
-    if (__scudo_deallocate_hook)
-      __scudo_deallocate_hook(ptr);
-}
-static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
-  DCHECK_NE(new_ptr, nullptr);
-
-  if (SCUDO_ENABLE_HOOKS) {
-    if (__scudo_realloc_allocate_hook)
-      __scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
-    else if (__scudo_allocate_hook)
-      __scudo_allocate_hook(new_ptr, size);
-  }
-}
-static void reportReallocDeallocation(void *old_ptr) {
-  if (SCUDO_ENABLE_HOOKS) {
-    if (__scudo_realloc_deallocate_hook)
-      __scudo_realloc_deallocate_hook(old_ptr);
-    else if (__scudo_deallocate_hook)
-      __scudo_deallocate_hook(old_ptr);
-  }
-}
-
-extern "C" {
-
-INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
-  scudo::uptr Product;
-  if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
-    if (SCUDO_ALLOCATOR.canReturnNull()) {
-      errno = ENOMEM;
-      return nullptr;
-    }
-    scudo::reportCallocOverflow(nmemb, size);
-  }
-  void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc,
-                                       SCUDO_MALLOC_ALIGNMENT, true);
-  reportAllocation(Ptr, Product);
-  return scudo::setErrnoOnNull(Ptr);
-}
-
-INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
-  reportDeallocation(ptr);
-  SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
-}
-
-INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
-  struct SCUDO_MALLINFO Info = {};
-  scudo::StatCounters Stats;
-  SCUDO_ALLOCATOR.getStats(Stats);
-  // Space allocated in mmapped regions (bytes)
-  Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
-  // Maximum total allocated space (bytes)
-  Info.usmblks = Info.hblkhd;
-  // Space in freed fastbin blocks (bytes)
-  Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
-  // Total allocated space (bytes)
-  Info.uordblks =
-      static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
-  // Total free space (bytes)
-  Info.fordblks = Info.fsmblks;
-  return Info;
-}
-
-// On Android, mallinfo2 is an alias of mallinfo, so don't define both.
-#if !SCUDO_ANDROID
-INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
-  struct __scudo_mallinfo2 Info = {};
-  scudo::StatCounters Stats;
-  SCUDO_ALLOCATOR.getStats(Stats);
-  // Space allocated in mmapped regions (bytes)
-  Info.hblkhd = Stats[scudo::StatMapped];
-  // Maximum total allocated space (bytes)
-  Info.usmblks = Info.hblkhd;
-  // Space in freed fastbin blocks (bytes)
-  Info.fsmblks = Stats[scudo::StatFree];
-  // Total allocated space (bytes)
-  Info.uordblks = Stats[scudo::StatAllocated];
-  // Total free space (bytes)
-  Info.fordblks = Info.fsmblks;
-  return Info;
-}
-#endif
-
-INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
-  void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
-                                       SCUDO_MALLOC_ALIGNMENT);
-  reportAllocation(Ptr, size);
-  return scudo::setErrnoOnNull(Ptr);
-}
-
-#if SCUDO_ANDROID
-INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
-#else
-INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
-#endif
-  return SCUDO_ALLOCATOR.getUsableSize(ptr);
-}
-
-INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
-  // Android rounds up the alignment to a power of two if it isn't one.
-  if (SCUDO_ANDROID) {
-    if (UNLIKELY(!alignment)) {
-      alignment = 1U;
-    } else {
-      if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
-        alignment = scudo::roundUpPowerOfTwo(alignment);
-    }
-  } else {
-    if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
-      if (SCUDO_ALLOCATOR.canReturnNull()) {
-        errno = EINVAL;
-        return nullptr;
-      }
-      scudo::reportAlignmentNotPowerOfTwo(alignment);
-    }
-  }
-  void *Ptr =
-      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-
-INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
-                                                size_t size) {
-  if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
-    if (!SCUDO_ALLOCATOR.canReturnNull())
-      scudo::reportInvalidPosixMemalignAlignment(alignment);
-    return EINVAL;
-  }
-  void *Ptr =
-      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
-  if (UNLIKELY(!Ptr))
-    return ENOMEM;
-  reportAllocation(Ptr, size);
-
-  *memptr = Ptr;
-  return 0;
-}
-
-INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-  if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
-    if (SCUDO_ALLOCATOR.canReturnNull()) {
-      errno = ENOMEM;
-      return nullptr;
-    }
-    scudo::reportPvallocOverflow(size);
-  }
-  // pvalloc(0) should allocate one page.
-  void *Ptr =
-      SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
-                               scudo::Chunk::Origin::Memalign, PageSize);
-  reportAllocation(Ptr, scudo::roundUp(size, PageSize));
-
-  return scudo::setErrnoOnNull(Ptr);
-}
-
-INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
-  if (!ptr) {
-    void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
-                                         SCUDO_MALLOC_ALIGNMENT);
-    reportAllocation(Ptr, size);
-    return scudo::setErrnoOnNull(Ptr);
-  }
-  if (size == 0) {
-    reportDeallocation(ptr);
-    SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
-    return nullptr;
-  }
-
-  // Given that the reporting of deallocation and allocation are not atomic, we
-  // always pretend the old pointer will be released so that the user doesn't
-  // need to worry about the false double-use case from the view of hooks.
-  //
-  // For example, assume that `realloc` releases the old pointer and allocates a
-  // new pointer. Before the reporting of both operations has been done, another
-  // thread may get the old pointer from `malloc`. It may be misinterpreted as
-  // double-use if it's not handled properly on the hook side.
-  reportReallocDeallocation(ptr);
-  void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
-  if (NewPtr != nullptr) {
-    // Note that even if NewPtr == ptr, the size has changed. We still need to
-    // report the new size.
-    reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size);
-  } else {
-    // If `realloc` fails, the old pointer is not released. Report the old
-    // pointer as allocated again.
-    reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr,
-                            SCUDO_ALLOCATOR.getAllocSize(ptr));
-  }
-
-  return scudo::setErrnoOnNull(NewPtr);
-}
-
-INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
-  void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
-                                       scudo::getPageSizeCached());
-  reportAllocation(Ptr, size);
-
-  return scudo::setErrnoOnNull(Ptr);
-}
-
-INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
-    uintptr_t base, size_t size,
-    void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
-  SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
-  return 0;
-}
-
-INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
-
-INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
-  SCUDO_ALLOCATOR.disable();
-}
-
-void SCUDO_PREFIX(malloc_postinit)() {
-  SCUDO_ALLOCATOR.initGwpAsan();
-  pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
-                 SCUDO_PREFIX(malloc_enable));
-}
-
-INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
-  if (param == M_DECAY_TIME) {
-    if (SCUDO_ANDROID) {
-      if (value == 0) {
-        // Will set the release values to their minimum values.
-        value = INT32_MIN;
-      } else {
-        // Will set the release values to their maximum values.
-        value = INT32_MAX;
-      }
-    }
-
-    SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
-                              static_cast<scudo::sptr>(value));
-    return 1;
-  } else if (param == M_PURGE) {
-    SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
-    return 1;
-  } else if (param == M_PURGE_ALL) {
-    SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll);
-    return 1;
-  } else if (param == M_LOG_STATS) {
-    SCUDO_ALLOCATOR.printStats();
-    SCUDO_ALLOCATOR.printFragmentationInfo();
-    return 1;
-  } else {
-    scudo::Option option;
-    switch (param) {
-    case M_MEMTAG_TUNING:
-      option = scudo::Option::MemtagTuning;
-      break;
-    case M_THREAD_DISABLE_MEM_INIT:
-      option = scudo::Option::ThreadDisableMemInit;
-      break;
-    case M_CACHE_COUNT_MAX:
-      option = scudo::Option::MaxCacheEntriesCount;
-      break;
-    case M_CACHE_SIZE_MAX:
-      option = scudo::Option::MaxCacheEntrySize;
-      break;
-    case M_TSDS_COUNT_MAX:
-      option = scudo::Option::MaxTSDsCount;
-      break;
-    default:
-      return 0;
-    }
-    return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value));
-  }
-}
-
-INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
-                                                 size_t size) {
-  if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
-    if (SCUDO_ALLOCATOR.canReturnNull()) {
-      errno = EINVAL;
-      return nullptr;
-    }
-    scudo::reportInvalidAlignedAllocAlignment(alignment, size);
-  }
-
-  void *Ptr =
-      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment);
-  reportAllocation(Ptr, size);
-
-  return scudo::setErrnoOnNull(Ptr);
-}
-
-INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
-  const scudo::uptr max_size =
-      decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
-  auto *sizes = static_cast<scudo::uptr *>(
-      SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
-  auto callback = [](uintptr_t, size_t size, void *arg) {
-    auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
-    if (size < max_size)
-      sizes[size]++;
-  };
-
-  SCUDO_ALLOCATOR.disable();
-  SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
-  SCUDO_ALLOCATOR.enable();
-
-  fputs("<malloc version=\"scudo-1\">\n", stream);
-  for (scudo::uptr i = 0; i != max_size; ++i)
-    if (sizes[i])
-      fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
-  fputs("</malloc>\n", stream);
-  SCUDO_PREFIX(free)(sizes);
-  return 0;
-}
-
-// Disable memory tagging for the heap. The caller must disable memory tag
-// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
-// function, and may not re-enable them after calling the function.
-INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
-  SCUDO_ALLOCATOR.disableMemoryTagging();
-}
-
-// Sets whether scudo records stack traces and other metadata for allocations
-// and deallocations. This function only has an effect if the allocator and
-// hardware support memory tagging.
-INTERFACE WEAK void
-SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
-  SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
-}
-
-// Sets whether scudo zero-initializes all allocated memory.
-INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
-  SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
-                                                : scudo::NoFill);
-}
-
-// Sets whether scudo pattern-initializes all allocated memory.
-INTERFACE WEAK void
-SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
-  SCUDO_ALLOCATOR.setFillContents(
-      pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
-}
-
-// Sets whether scudo adds a small amount of slack at the end of large
-// allocations, before the guard page. This can be enabled to work around buggy
-// applications that read a few bytes past the end of their allocation.
-INTERFACE WEAK void
-SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
-  SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack);
-}
-
-} // extern "C"
diff --git a/Telegram/ThirdParty/scudo/wrappers_c_bionic.cpp b/Telegram/ThirdParty/scudo/wrappers_c_bionic.cpp
deleted file mode 100644
index 21694c3f1..000000000
--- a/Telegram/ThirdParty/scudo/wrappers_c_bionic.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-//===-- wrappers_c_bionic.cpp -----------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-// This is only used when compiled as part of Bionic.
-#if SCUDO_ANDROID && _BIONIC
-
-#include "allocator_config.h"
-#include "internal_defs.h"
-#include "platform.h"
-#include "scudo/interface.h"
-#include "wrappers_c.h"
-#include "wrappers_c_checks.h"
-
-#include <stdint.h>
-#include <stdio.h>
-
-// Regular MallocDispatch definitions.
-#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
-#define SCUDO_ALLOCATOR Allocator
-
-extern "C" void SCUDO_PREFIX(malloc_postinit)();
-SCUDO_REQUIRE_CONSTANT_INITIALIZATION
-static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)>
-    SCUDO_ALLOCATOR;
-
-#include "wrappers_c.inc"
-
-#undef SCUDO_ALLOCATOR
-#undef SCUDO_PREFIX
-
-// TODO(kostyak): support both allocators.
-INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
-
-INTERFACE void __scudo_get_error_info(
-    struct scudo_error_info *error_info, uintptr_t fault_addr,
-    const char *stack_depot, size_t stack_depot_size, const char *region_info,
-    const char *ring_buffer, size_t ring_buffer_size, const char *memory,
-    const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
-  (void)(stack_depot_size);
-  Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
-                         ring_buffer, ring_buffer_size, memory, memory_tags,
-                         memory_addr, memory_size);
-}
-
-INTERFACE const char *__scudo_get_stack_depot_addr() {
-  return Allocator.getStackDepotAddress();
-}
-
-INTERFACE size_t __scudo_get_stack_depot_size() {
-  return sizeof(scudo::StackDepot);
-}
-
-INTERFACE const char *__scudo_get_region_info_addr() {
-  return Allocator.getRegionInfoArrayAddress();
-}
-
-INTERFACE size_t __scudo_get_region_info_size() {
-  return Allocator.getRegionInfoArraySize();
-}
-
-INTERFACE const char *__scudo_get_ring_buffer_addr() {
-  return Allocator.getRingBufferAddress();
-}
-
-INTERFACE size_t __scudo_get_ring_buffer_size() {
-  return Allocator.getRingBufferSize();
-}
-
-#endif // SCUDO_ANDROID && _BIONIC
diff --git a/Telegram/ThirdParty/scudo/wrappers_c_checks.h b/Telegram/ThirdParty/scudo/wrappers_c_checks.h
deleted file mode 100644
index 9cd48e827..000000000
--- a/Telegram/ThirdParty/scudo/wrappers_c_checks.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//===-- wrappers_c_checks.h -------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CHECKS_H_
-#define SCUDO_CHECKS_H_
-
-#include "common.h"
-
-#include <errno.h>
-
-#ifndef __has_builtin
-#define __has_builtin(X) 0
-#endif
-
-namespace scudo {
-
-// A common errno setting logic shared by almost all Scudo C wrappers.
-inline void *setErrnoOnNull(void *Ptr) {
-  if (UNLIKELY(!Ptr))
-    errno = ENOMEM;
-  return Ptr;
-}
-
-// Checks return true on failure.
-
-// Checks aligned_alloc() parameters, verifies that the alignment is a power of
-// two and that the size is a multiple of alignment.
-inline bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
-  return Alignment == 0 || !isPowerOfTwo(Alignment) ||
-         !isAligned(Size, Alignment);
-}
-
-// Checks posix_memalign() parameters, verifies that alignment is a power of two
-// and a multiple of sizeof(void *).
-inline bool checkPosixMemalignAlignment(uptr Alignment) {
-  return Alignment == 0 || !isPowerOfTwo(Alignment) ||
-         !isAligned(Alignment, sizeof(void *));
-}
-
-// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
-// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
-// costly division.
-inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
-#if __has_builtin(__builtin_umull_overflow) && (SCUDO_WORDSIZE == 64U)
-  return __builtin_umull_overflow(Size, N,
-                                  reinterpret_cast<unsigned long *>(Product));
-#elif __has_builtin(__builtin_umul_overflow) && (SCUDO_WORDSIZE == 32U)
-  // On, e.g. armv7, uptr/uintptr_t may be defined as unsigned long
-  return __builtin_umul_overflow(Size, N,
-                                 reinterpret_cast<unsigned int *>(Product));
-#else
-  *Product = Size * N;
-  if (!Size)
-    return false;
-  return (*Product / Size) != N;
-#endif
-}
-
-// Returns true if the size passed to pvalloc overflows when rounded to the next
-// multiple of PageSize.
-inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
-  return roundUp(Size, PageSize) < Size;
-}
-
-} // namespace scudo
-
-#endif // SCUDO_CHECKS_H_
diff --git a/Telegram/ThirdParty/scudo/wrappers_cpp.cpp b/Telegram/ThirdParty/scudo/wrappers_cpp.cpp
deleted file mode 100644
index 098d4f71a..000000000
--- a/Telegram/ThirdParty/scudo/wrappers_cpp.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-//===-- wrappers_cpp.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "platform.h"
-
-// Skip this compilation unit if compiled as part of Bionic.
-#if !SCUDO_ANDROID || !_BIONIC
-
-#include "allocator_config.h"
-#include "internal_defs.h"
-#include "platform.h"
-#include "scudo/interface.h"
-#include "wrappers_c.h"
-
-#include <stdint.h>
-
-namespace std {
-struct nothrow_t {};
-enum class align_val_t : size_t {};
-} // namespace std
-
-static void reportAllocation(void *ptr, size_t size) {
-  if (SCUDO_ENABLE_HOOKS)
-    if (__scudo_allocate_hook && ptr)
-      __scudo_allocate_hook(ptr, size);
-}
-static void reportDeallocation(void *ptr) {
-  if (SCUDO_ENABLE_HOOKS)
-    if (__scudo_deallocate_hook)
-      __scudo_deallocate_hook(ptr);
-}
-
-INTERFACE WEAK void *operator new(size_t size) {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-INTERFACE WEAK void *operator new[](size_t size) {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-INTERFACE WEAK void *operator new(size_t size,
-                                  std::nothrow_t const &) NOEXCEPT {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-INTERFACE WEAK void *operator new[](size_t size,
-                                    std::nothrow_t const &) NOEXCEPT {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
-                                 static_cast<scudo::uptr>(align));
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
-                                 static_cast<scudo::uptr>(align));
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
-                                  std::nothrow_t const &) NOEXCEPT {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
-                                 static_cast<scudo::uptr>(align));
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
-                                    std::nothrow_t const &) NOEXCEPT {
-  void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
-                                 static_cast<scudo::uptr>(align));
-  reportAllocation(Ptr, size);
-  return Ptr;
-}
-
-INTERFACE WEAK void operator delete(void *ptr) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
-}
-INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
-}
-INTERFACE WEAK void operator delete(void *ptr,
-                                    std::nothrow_t const &) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
-}
-INTERFACE WEAK void operator delete[](void *ptr,
-                                      std::nothrow_t const &) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
-}
-INTERFACE WEAK void operator delete(void *ptr, size_t size) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size);
-}
-INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
-}
-INTERFACE WEAK void operator delete(void *ptr,
-                                    std::align_val_t align) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
-                       static_cast<scudo::uptr>(align));
-}
-INTERFACE WEAK void operator delete[](void *ptr,
-                                      std::align_val_t align) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
-                       static_cast<scudo::uptr>(align));
-}
-INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
-                                    std::nothrow_t const &) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
-                       static_cast<scudo::uptr>(align));
-}
-INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
-                                      std::nothrow_t const &) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
-                       static_cast<scudo::uptr>(align));
-}
-INTERFACE WEAK void operator delete(void *ptr, size_t size,
-                                    std::align_val_t align) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size,
-                       static_cast<scudo::uptr>(align));
-}
-INTERFACE WEAK void operator delete[](void *ptr, size_t size,
-                                      std::align_val_t align) NOEXCEPT {
-  reportDeallocation(ptr);
-  Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
-                       static_cast<scudo::uptr>(align));
-}
-
-#endif // !SCUDO_ANDROID || !_BIONIC
diff --git a/cmake b/cmake
index 022c15d43..f921cb6ab 160000
--- a/cmake
+++ b/cmake
@@ -1 +1 @@
-Subproject commit 022c15d437aba149b1495532b1560de2a71b13df
+Subproject commit f921cb6aba9ada6099b3f9c8c237986ecda238f5