mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-06-05 06:33:57 +02:00
parent
7c44cda76e
commit
10272ee0cf
106 changed files with 5 additions and 19901 deletions
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -76,6 +76,9 @@
|
||||||
[submodule "Telegram/lib_webview"]
|
[submodule "Telegram/lib_webview"]
|
||||||
path = Telegram/lib_webview
|
path = Telegram/lib_webview
|
||||||
url = https://github.com/desktop-app/lib_webview.git
|
url = https://github.com/desktop-app/lib_webview.git
|
||||||
|
[submodule "Telegram/ThirdParty/jemalloc"]
|
||||||
|
path = Telegram/ThirdParty/jemalloc
|
||||||
|
url = https://github.com/jemalloc/jemalloc
|
||||||
[submodule "Telegram/ThirdParty/dispatch"]
|
[submodule "Telegram/ThirdParty/dispatch"]
|
||||||
path = Telegram/ThirdParty/dispatch
|
path = Telegram/ThirdParty/dispatch
|
||||||
url = https://github.com/apple/swift-corelibs-libdispatch
|
url = https://github.com/apple/swift-corelibs-libdispatch
|
||||||
|
|
1
Telegram/ThirdParty/jemalloc
vendored
Submodule
1
Telegram/ThirdParty/jemalloc
vendored
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
|
253
Telegram/ThirdParty/scudo/CMakeLists.txt
vendored
253
Telegram/ThirdParty/scudo/CMakeLists.txt
vendored
|
@ -1,253 +0,0 @@
|
||||||
add_compiler_rt_component(scudo_standalone)
|
|
||||||
|
|
||||||
include_directories(../.. include)
|
|
||||||
|
|
||||||
set(SCUDO_CFLAGS)
|
|
||||||
|
|
||||||
list(APPEND SCUDO_CFLAGS
|
|
||||||
-Werror=conversion
|
|
||||||
-Wall
|
|
||||||
-Wextra
|
|
||||||
-pedantic
|
|
||||||
-g
|
|
||||||
-nostdinc++)
|
|
||||||
|
|
||||||
# Remove -stdlib= which is unused when passing -nostdinc++.
|
|
||||||
string(REGEX REPLACE "-stdlib=[a-zA-Z+]*" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
|
||||||
|
|
||||||
append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SCUDO_CFLAGS)
|
|
||||||
|
|
||||||
append_list_if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG -fno-exceptions SCUDO_CFLAGS)
|
|
||||||
|
|
||||||
append_list_if(COMPILER_RT_HAS_WNO_PEDANTIC -Wno-pedantic SCUDO_CFLAGS)
|
|
||||||
|
|
||||||
# FIXME: find cleaner way to agree with GWPAsan flags
|
|
||||||
append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SCUDO_CFLAGS)
|
|
||||||
|
|
||||||
if(COMPILER_RT_DEBUG)
|
|
||||||
list(APPEND SCUDO_CFLAGS -O0 -DSCUDO_DEBUG=1 -DSCUDO_ENABLE_HOOKS=1)
|
|
||||||
else()
|
|
||||||
list(APPEND SCUDO_CFLAGS -O3)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
append_list_if(COMPILER_RT_HAS_WTHREAD_SAFETY_FLAG -Werror=thread-safety
|
|
||||||
SCUDO_CFLAGS)
|
|
||||||
|
|
||||||
set(SCUDO_LINK_FLAGS)
|
|
||||||
|
|
||||||
list(APPEND SCUDO_LINK_FLAGS -Wl,-z,defs,-z,now,-z,relro)
|
|
||||||
|
|
||||||
list(APPEND SCUDO_LINK_FLAGS -ffunction-sections -fdata-sections -Wl,--gc-sections)
|
|
||||||
|
|
||||||
# We don't use the C++ standard library, so avoid including it by mistake.
|
|
||||||
append_list_if(COMPILER_RT_HAS_NOSTDLIBXX_FLAG -nostdlib++ SCUDO_LINK_FLAGS)
|
|
||||||
append_list_if(CXX_SUPPORTS_UNWINDLIB_NONE_FLAG --unwindlib=none SCUDO_LINK_FLAGS)
|
|
||||||
|
|
||||||
if(COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH)
|
|
||||||
list(APPEND SCUDO_CFLAGS "--sysroot=${COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(ANDROID)
|
|
||||||
list(APPEND SCUDO_CFLAGS -fno-emulated-tls)
|
|
||||||
|
|
||||||
# Put the shared library in the global group. For more details, see
|
|
||||||
# android-changes-for-ndk-developers.md#changes-to-library-search-order
|
|
||||||
append_list_if(COMPILER_RT_HAS_Z_GLOBAL -Wl,-z,global SCUDO_LINK_FLAGS)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(SCUDO_HEADERS
|
|
||||||
allocator_common.h
|
|
||||||
allocator_config.h
|
|
||||||
atomic_helpers.h
|
|
||||||
bytemap.h
|
|
||||||
checksum.h
|
|
||||||
chunk.h
|
|
||||||
condition_variable.h
|
|
||||||
condition_variable_base.h
|
|
||||||
condition_variable_linux.h
|
|
||||||
combined.h
|
|
||||||
common.h
|
|
||||||
flags_parser.h
|
|
||||||
flags.h
|
|
||||||
fuchsia.h
|
|
||||||
internal_defs.h
|
|
||||||
linux.h
|
|
||||||
list.h
|
|
||||||
local_cache.h
|
|
||||||
memtag.h
|
|
||||||
mem_map.h
|
|
||||||
mem_map_base.h
|
|
||||||
mem_map_fuchsia.h
|
|
||||||
mem_map_linux.h
|
|
||||||
mutex.h
|
|
||||||
options.h
|
|
||||||
platform.h
|
|
||||||
primary32.h
|
|
||||||
primary64.h
|
|
||||||
quarantine.h
|
|
||||||
release.h
|
|
||||||
report.h
|
|
||||||
report_linux.h
|
|
||||||
secondary.h
|
|
||||||
size_class_map.h
|
|
||||||
stack_depot.h
|
|
||||||
stats.h
|
|
||||||
string_utils.h
|
|
||||||
timing.h
|
|
||||||
tsd_exclusive.h
|
|
||||||
tsd_shared.h
|
|
||||||
tsd.h
|
|
||||||
vector.h
|
|
||||||
wrappers_c_checks.h
|
|
||||||
wrappers_c.h
|
|
||||||
|
|
||||||
include/scudo/interface.h
|
|
||||||
)
|
|
||||||
|
|
||||||
set(SCUDO_SOURCES
|
|
||||||
checksum.cpp
|
|
||||||
common.cpp
|
|
||||||
condition_variable_linux.cpp
|
|
||||||
crc32_hw.cpp
|
|
||||||
flags_parser.cpp
|
|
||||||
flags.cpp
|
|
||||||
fuchsia.cpp
|
|
||||||
linux.cpp
|
|
||||||
mem_map.cpp
|
|
||||||
mem_map_fuchsia.cpp
|
|
||||||
mem_map_linux.cpp
|
|
||||||
release.cpp
|
|
||||||
report.cpp
|
|
||||||
report_linux.cpp
|
|
||||||
string_utils.cpp
|
|
||||||
timing.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
# Temporary hack until LLVM libc supports inttypes.h print format macros
|
|
||||||
# See: https://github.com/llvm/llvm-project/issues/63317#issuecomment-1591906241
|
|
||||||
if(LLVM_LIBC_INCLUDE_SCUDO)
|
|
||||||
list(REMOVE_ITEM SCUDO_HEADERS timing.h)
|
|
||||||
list(REMOVE_ITEM SCUDO_SOURCES timing.cpp)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Enable the necessary instruction set for scudo_crc32.cpp, if available.
|
|
||||||
# Newer compiler versions use -mcrc32 rather than -msse4.2.
|
|
||||||
if (COMPILER_RT_HAS_MCRC32_FLAG)
|
|
||||||
set_source_files_properties(crc32_hw.cpp PROPERTIES COMPILE_FLAGS -mcrc32)
|
|
||||||
elseif (COMPILER_RT_HAS_MSSE4_2_FLAG)
|
|
||||||
set_source_files_properties(crc32_hw.cpp PROPERTIES COMPILE_FLAGS -msse4.2)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Enable the AArch64 CRC32 feature for crc32_hw.cpp, if available.
|
|
||||||
# Note that it is enabled by default starting with armv8.1-a.
|
|
||||||
if (COMPILER_RT_HAS_MCRC_FLAG)
|
|
||||||
set_source_files_properties(crc32_hw.cpp PROPERTIES COMPILE_FLAGS -mcrc)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(SCUDO_SOURCES_C_WRAPPERS
|
|
||||||
wrappers_c.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
set(SCUDO_SOURCES_CXX_WRAPPERS
|
|
||||||
wrappers_cpp.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
set(SCUDO_OBJECT_LIBS)
|
|
||||||
set(SCUDO_LINK_LIBS)
|
|
||||||
|
|
||||||
if (COMPILER_RT_HAS_GWP_ASAN)
|
|
||||||
if(COMPILER_RT_USE_LLVM_UNWINDER)
|
|
||||||
list(APPEND SCUDO_LINK_LIBS ${COMPILER_RT_UNWINDER_LINK_LIBS} dl)
|
|
||||||
elseif (COMPILER_RT_HAS_GCC_S_LIB)
|
|
||||||
list(APPEND SCUDO_LINK_LIBS gcc_s)
|
|
||||||
elseif (COMPILER_RT_HAS_GCC_LIB)
|
|
||||||
list(APPEND SCUDO_LINK_LIBS gcc)
|
|
||||||
elseif (NOT COMPILER_RT_USE_BUILTINS_LIBRARY)
|
|
||||||
message(FATAL_ERROR "No suitable unwinder library")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_dependencies(scudo_standalone gwp_asan)
|
|
||||||
list(APPEND SCUDO_OBJECT_LIBS
|
|
||||||
RTGwpAsan RTGwpAsanBacktraceLibc RTGwpAsanSegvHandler
|
|
||||||
RTGwpAsanOptionsParser)
|
|
||||||
|
|
||||||
append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fno-omit-frame-pointer
|
|
||||||
-mno-omit-leaf-frame-pointer
|
|
||||||
SCUDO_CFLAGS)
|
|
||||||
list(APPEND SCUDO_CFLAGS -DGWP_ASAN_HOOKS)
|
|
||||||
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(COMPILER_RT_BUILD_SCUDO_STANDALONE_WITH_LLVM_LIBC)
|
|
||||||
include_directories(${COMPILER_RT_BINARY_DIR}/../libc/include/)
|
|
||||||
|
|
||||||
set(SCUDO_DEPS libc-headers)
|
|
||||||
|
|
||||||
list(APPEND SCUDO_CFLAGS "-ffreestanding")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread SCUDO_LINK_FLAGS)
|
|
||||||
|
|
||||||
append_list_if(FUCHSIA zircon SCUDO_LINK_LIBS)
|
|
||||||
|
|
||||||
if(COMPILER_RT_DEFAULT_TARGET_ARCH MATCHES "mips|mips64|mipsel|mips64el")
|
|
||||||
list(APPEND SCUDO_LINK_LIBS atomic)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(COMPILER_RT_HAS_SCUDO_STANDALONE)
|
|
||||||
add_compiler_rt_object_libraries(RTScudoStandalone
|
|
||||||
ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
|
|
||||||
SOURCES ${SCUDO_SOURCES}
|
|
||||||
ADDITIONAL_HEADERS ${SCUDO_HEADERS}
|
|
||||||
CFLAGS ${SCUDO_CFLAGS}
|
|
||||||
DEPS ${SCUDO_DEPS})
|
|
||||||
add_compiler_rt_object_libraries(RTScudoStandaloneCWrappers
|
|
||||||
ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
|
|
||||||
SOURCES ${SCUDO_SOURCES_C_WRAPPERS}
|
|
||||||
ADDITIONAL_HEADERS ${SCUDO_HEADERS}
|
|
||||||
CFLAGS ${SCUDO_CFLAGS}
|
|
||||||
DEPS ${SCUDO_DEPS})
|
|
||||||
add_compiler_rt_object_libraries(RTScudoStandaloneCxxWrappers
|
|
||||||
ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
|
|
||||||
SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS}
|
|
||||||
ADDITIONAL_HEADERS ${SCUDO_HEADERS}
|
|
||||||
CFLAGS ${SCUDO_CFLAGS}
|
|
||||||
DEPS ${SCUDO_DEPS})
|
|
||||||
|
|
||||||
add_compiler_rt_runtime(clang_rt.scudo_standalone
|
|
||||||
STATIC
|
|
||||||
ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
|
|
||||||
SOURCES ${SCUDO_SOURCES} ${SCUDO_SOURCES_C_WRAPPERS}
|
|
||||||
ADDITIONAL_HEADERS ${SCUDO_HEADERS}
|
|
||||||
CFLAGS ${SCUDO_CFLAGS}
|
|
||||||
DEPS ${SCUDO_DEPS}
|
|
||||||
OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
|
|
||||||
PARENT_TARGET scudo_standalone)
|
|
||||||
add_compiler_rt_runtime(clang_rt.scudo_standalone_cxx
|
|
||||||
STATIC
|
|
||||||
ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
|
|
||||||
SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS}
|
|
||||||
ADDITIONAL_HEADERS ${SCUDO_HEADERS}
|
|
||||||
CFLAGS ${SCUDO_CFLAGS}
|
|
||||||
DEPS ${SCUDO_DEPS}
|
|
||||||
PARENT_TARGET scudo_standalone)
|
|
||||||
|
|
||||||
if(COMPILER_RT_SCUDO_STANDALONE_BUILD_SHARED)
|
|
||||||
add_compiler_rt_runtime(clang_rt.scudo_standalone
|
|
||||||
SHARED
|
|
||||||
ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
|
|
||||||
SOURCES ${SCUDO_SOURCES} ${SCUDO_SOURCES_C_WRAPPERS} ${SCUDO_SOURCES_CXX_WRAPPERS}
|
|
||||||
ADDITIONAL_HEADERS ${SCUDO_HEADERS}
|
|
||||||
CFLAGS ${SCUDO_CFLAGS}
|
|
||||||
DEPS ${SCUDO_DEPS}
|
|
||||||
OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
|
|
||||||
LINK_FLAGS ${SCUDO_LINK_FLAGS}
|
|
||||||
LINK_LIBS ${SCUDO_LINK_LIBS}
|
|
||||||
PARENT_TARGET scudo_standalone)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_subdirectory(benchmarks)
|
|
||||||
if(COMPILER_RT_INCLUDE_TESTS)
|
|
||||||
add_subdirectory(tests)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
85
Telegram/ThirdParty/scudo/allocator_common.h
vendored
85
Telegram/ThirdParty/scudo/allocator_common.h
vendored
|
@ -1,85 +0,0 @@
|
||||||
//===-- allocator_common.h --------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_ALLOCATOR_COMMON_H_
|
|
||||||
#define SCUDO_ALLOCATOR_COMMON_H_
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "list.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <class SizeClassAllocator> struct TransferBatch {
|
|
||||||
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
|
|
||||||
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
|
|
||||||
|
|
||||||
static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
|
|
||||||
void setFromArray(CompactPtrT *Array, u16 N) {
|
|
||||||
DCHECK_LE(N, MaxNumCached);
|
|
||||||
Count = N;
|
|
||||||
memcpy(Batch, Array, sizeof(Batch[0]) * Count);
|
|
||||||
}
|
|
||||||
void appendFromArray(CompactPtrT *Array, u16 N) {
|
|
||||||
DCHECK_LE(N, MaxNumCached - Count);
|
|
||||||
memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
|
|
||||||
// u16 will be promoted to int by arithmetic type conversion.
|
|
||||||
Count = static_cast<u16>(Count + N);
|
|
||||||
}
|
|
||||||
void appendFromTransferBatch(TransferBatch *B, u16 N) {
|
|
||||||
DCHECK_LE(N, MaxNumCached - Count);
|
|
||||||
DCHECK_GE(B->Count, N);
|
|
||||||
// Append from the back of `B`.
|
|
||||||
memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
|
|
||||||
// u16 will be promoted to int by arithmetic type conversion.
|
|
||||||
Count = static_cast<u16>(Count + N);
|
|
||||||
B->Count = static_cast<u16>(B->Count - N);
|
|
||||||
}
|
|
||||||
void clear() { Count = 0; }
|
|
||||||
void add(CompactPtrT P) {
|
|
||||||
DCHECK_LT(Count, MaxNumCached);
|
|
||||||
Batch[Count++] = P;
|
|
||||||
}
|
|
||||||
void moveToArray(CompactPtrT *Array) {
|
|
||||||
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
|
|
||||||
clear();
|
|
||||||
}
|
|
||||||
u16 getCount() const { return Count; }
|
|
||||||
bool isEmpty() const { return Count == 0U; }
|
|
||||||
CompactPtrT get(u16 I) const {
|
|
||||||
DCHECK_LE(I, Count);
|
|
||||||
return Batch[I];
|
|
||||||
}
|
|
||||||
TransferBatch *Next;
|
|
||||||
|
|
||||||
private:
|
|
||||||
CompactPtrT Batch[MaxNumCached];
|
|
||||||
u16 Count;
|
|
||||||
};
|
|
||||||
|
|
||||||
// A BatchGroup is used to collect blocks. Each group has a group id to
|
|
||||||
// identify the group kind of contained blocks.
|
|
||||||
template <class SizeClassAllocator> struct BatchGroup {
|
|
||||||
// `Next` is used by IntrusiveList.
|
|
||||||
BatchGroup *Next;
|
|
||||||
// The compact base address of each group
|
|
||||||
uptr CompactPtrGroupBase;
|
|
||||||
// Cache value of SizeClassAllocatorLocalCache::getMaxCached()
|
|
||||||
u16 MaxCachedPerBatch;
|
|
||||||
// Number of blocks pushed into this group. This is an increment-only
|
|
||||||
// counter.
|
|
||||||
uptr PushedBlocks;
|
|
||||||
// This is used to track how many bytes are not in-use since last time we
|
|
||||||
// tried to release pages.
|
|
||||||
uptr BytesInBGAtLastCheckpoint;
|
|
||||||
// Blocks are managed by TransferBatch in a list.
|
|
||||||
SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_ALLOCATOR_COMMON_H_
|
|
280
Telegram/ThirdParty/scudo/allocator_config.h
vendored
280
Telegram/ThirdParty/scudo/allocator_config.h
vendored
|
@ -1,280 +0,0 @@
|
||||||
//===-- allocator_config.h --------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_ALLOCATOR_CONFIG_H_
|
|
||||||
#define SCUDO_ALLOCATOR_CONFIG_H_
|
|
||||||
|
|
||||||
#include "combined.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "condition_variable.h"
|
|
||||||
#include "flags.h"
|
|
||||||
#include "primary32.h"
|
|
||||||
#include "primary64.h"
|
|
||||||
#include "secondary.h"
|
|
||||||
#include "size_class_map.h"
|
|
||||||
#include "tsd_exclusive.h"
|
|
||||||
#include "tsd_shared.h"
|
|
||||||
|
|
||||||
// To import a custom configuration, define `SCUDO_USE_CUSTOM_CONFIG` and
|
|
||||||
// aliasing the `Config` like:
|
|
||||||
//
|
|
||||||
// namespace scudo {
|
|
||||||
// // The instance of Scudo will be initiated with `Config`.
|
|
||||||
// typedef CustomConfig Config;
|
|
||||||
// // Aliasing as default configuration to run the tests with this config.
|
|
||||||
// typedef CustomConfig DefaultConfig;
|
|
||||||
// } // namespace scudo
|
|
||||||
//
|
|
||||||
// Put them in the header `custom_scudo_config.h` then you will be using the
|
|
||||||
// custom configuration and able to run all the tests as well.
|
|
||||||
#ifdef SCUDO_USE_CUSTOM_CONFIG
|
|
||||||
#include "custom_scudo_config.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// The combined allocator uses a structure as a template argument that
|
|
||||||
// specifies the configuration options for the various subcomponents of the
|
|
||||||
// allocator.
|
|
||||||
//
|
|
||||||
// struct ExampleConfig {
|
|
||||||
// // Indicates possible support for Memory Tagging.
|
|
||||||
// static const bool MaySupportMemoryTagging = false;
|
|
||||||
//
|
|
||||||
// // Thread-Specific Data Registry used, shared or exclusive.
|
|
||||||
// template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
|
|
||||||
//
|
|
||||||
// struct Primary {
|
|
||||||
// // SizeClassMap to use with the Primary.
|
|
||||||
// using SizeClassMap = DefaultSizeClassMap;
|
|
||||||
//
|
|
||||||
// // Log2 of the size of a size class region, as used by the Primary.
|
|
||||||
// static const uptr RegionSizeLog = 30U;
|
|
||||||
//
|
|
||||||
// // Log2 of the size of block group, as used by the Primary. Each group
|
|
||||||
// // contains a range of memory addresses, blocks in the range will belong
|
|
||||||
// // to the same group. In general, single region may have 1 or 2MB group
|
|
||||||
// // size. Multiple regions will have the group size equal to the region
|
|
||||||
// // size because the region size is usually smaller than 1 MB.
|
|
||||||
// // Smaller value gives fine-grained control of memory usage but the
|
|
||||||
// // trade-off is that it may take longer time of deallocation.
|
|
||||||
// static const uptr GroupSizeLog = 20U;
|
|
||||||
//
|
|
||||||
// // Defines the type and scale of a compact pointer. A compact pointer can
|
|
||||||
// // be understood as the offset of a pointer within the region it belongs
|
|
||||||
// // to, in increments of a power-of-2 scale.
|
|
||||||
// // eg: Ptr = Base + (CompactPtr << Scale).
|
|
||||||
// typedef u32 CompactPtrT;
|
|
||||||
// static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
//
|
|
||||||
// // Indicates support for offsetting the start of a region by
|
|
||||||
// // a random number of pages. Only used with primary64.
|
|
||||||
// static const bool EnableRandomOffset = true;
|
|
||||||
//
|
|
||||||
// // Call map for user memory with at least this size. Only used with
|
|
||||||
// // primary64.
|
|
||||||
// static const uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
//
|
|
||||||
// // Defines the minimal & maximal release interval that can be set.
|
|
||||||
// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
//
|
|
||||||
// // Use condition variable to shorten the waiting time of refillment of
|
|
||||||
// // freelist. Note that this depends on the implementation of condition
|
|
||||||
// // variable on each platform and the performance may vary so that it
|
|
||||||
// // doesn't guarantee a performance benefit.
|
|
||||||
// // Note that both variables have to be defined to enable it.
|
|
||||||
// static const bool UseConditionVariable = true;
|
|
||||||
// using ConditionVariableT = ConditionVariableLinux;
|
|
||||||
// };
|
|
||||||
// // Defines the type of Primary allocator to use.
|
|
||||||
// template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
|
|
||||||
//
|
|
||||||
// // Defines the type of cache used by the Secondary. Some additional
|
|
||||||
// // configuration entries can be necessary depending on the Cache.
|
|
||||||
// struct Secondary {
|
|
||||||
// struct Cache {
|
|
||||||
// static const u32 EntriesArraySize = 32U;
|
|
||||||
// static const u32 QuarantineSize = 0U;
|
|
||||||
// static const u32 DefaultMaxEntriesCount = 32U;
|
|
||||||
// static const uptr DefaultMaxEntrySize = 1UL << 19;
|
|
||||||
// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
// };
|
|
||||||
// // Defines the type of Secondary Cache to use.
|
|
||||||
// template <typename Config> using CacheT = MapAllocatorCache<Config>;
|
|
||||||
// };
|
|
||||||
// // Defines the type of Secondary allocator to use.
|
|
||||||
// template <typename Config> using SecondaryT = MapAllocator<Config>;
|
|
||||||
// };
|
|
||||||
|
|
||||||
#ifndef SCUDO_USE_CUSTOM_CONFIG
|
|
||||||
|
|
||||||
// Default configurations for various platforms. Note this is only enabled when
|
|
||||||
// there's no custom configuration in the build system.
|
|
||||||
struct DefaultConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = true;
|
|
||||||
template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = DefaultSizeClassMap;
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
static const uptr RegionSizeLog = 32U;
|
|
||||||
static const uptr GroupSizeLog = 21U;
|
|
||||||
typedef uptr CompactPtrT;
|
|
||||||
static const uptr CompactPtrScale = 0;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
#else
|
|
||||||
static const uptr RegionSizeLog = 19U;
|
|
||||||
static const uptr GroupSizeLog = 19U;
|
|
||||||
typedef uptr CompactPtrT;
|
|
||||||
#endif
|
|
||||||
static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
};
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
|
|
||||||
#else
|
|
||||||
template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct Secondary {
|
|
||||||
struct Cache {
|
|
||||||
static const u32 EntriesArraySize = 32U;
|
|
||||||
static const u32 QuarantineSize = 0U;
|
|
||||||
static const u32 DefaultMaxEntriesCount = 32U;
|
|
||||||
static const uptr DefaultMaxEntrySize = 1UL << 19;
|
|
||||||
static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
};
|
|
||||||
template <typename Config> using CacheT = MapAllocatorCache<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> using SecondaryT = MapAllocator<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SCUDO_USE_CUSTOM_CONFIG
|
|
||||||
|
|
||||||
struct AndroidConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = true;
|
|
||||||
template <class A>
|
|
||||||
using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = AndroidSizeClassMap;
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
static const uptr RegionSizeLog = 28U;
|
|
||||||
typedef u32 CompactPtrT;
|
|
||||||
static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
static const uptr GroupSizeLog = 20U;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
#else
|
|
||||||
static const uptr RegionSizeLog = 18U;
|
|
||||||
static const uptr GroupSizeLog = 18U;
|
|
||||||
typedef uptr CompactPtrT;
|
|
||||||
#endif
|
|
||||||
static const s32 MinReleaseToOsIntervalMs = 1000;
|
|
||||||
static const s32 MaxReleaseToOsIntervalMs = 1000;
|
|
||||||
};
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
|
|
||||||
#else
|
|
||||||
template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct Secondary {
|
|
||||||
struct Cache {
|
|
||||||
static const u32 EntriesArraySize = 256U;
|
|
||||||
static const u32 QuarantineSize = 32U;
|
|
||||||
static const u32 DefaultMaxEntriesCount = 32U;
|
|
||||||
static const uptr DefaultMaxEntrySize = 2UL << 20;
|
|
||||||
static const s32 MinReleaseToOsIntervalMs = 0;
|
|
||||||
static const s32 MaxReleaseToOsIntervalMs = 1000;
|
|
||||||
};
|
|
||||||
template <typename Config> using CacheT = MapAllocatorCache<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> using SecondaryT = MapAllocator<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
struct FuchsiaConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = false;
|
|
||||||
template <class A>
|
|
||||||
using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = FuchsiaSizeClassMap;
|
|
||||||
#if SCUDO_RISCV64
|
|
||||||
// Support 39-bit VMA for riscv-64
|
|
||||||
static const uptr RegionSizeLog = 28U;
|
|
||||||
static const uptr GroupSizeLog = 19U;
|
|
||||||
#else
|
|
||||||
static const uptr RegionSizeLog = 30U;
|
|
||||||
static const uptr GroupSizeLog = 21U;
|
|
||||||
#endif
|
|
||||||
typedef u32 CompactPtrT;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
};
|
|
||||||
template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
|
|
||||||
|
|
||||||
struct Secondary {
|
|
||||||
template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
|
|
||||||
};
|
|
||||||
template <typename Config> using SecondaryT = MapAllocator<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TrustyConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = true;
|
|
||||||
template <class A>
|
|
||||||
using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = TrustySizeClassMap;
|
|
||||||
static const uptr RegionSizeLog = 28U;
|
|
||||||
static const uptr GroupSizeLog = 20U;
|
|
||||||
typedef u32 CompactPtrT;
|
|
||||||
static const bool EnableRandomOffset = false;
|
|
||||||
static const uptr MapSizeIncrement = 1UL << 12;
|
|
||||||
static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
};
|
|
||||||
template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
|
|
||||||
|
|
||||||
struct Secondary {
|
|
||||||
template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> using SecondaryT = MapAllocator<Config>;
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef SCUDO_USE_CUSTOM_CONFIG
|
|
||||||
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
typedef AndroidConfig Config;
|
|
||||||
#elif SCUDO_FUCHSIA
|
|
||||||
typedef FuchsiaConfig Config;
|
|
||||||
#elif SCUDO_TRUSTY
|
|
||||||
typedef TrustyConfig Config;
|
|
||||||
#else
|
|
||||||
typedef DefaultConfig Config;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif // SCUDO_USE_CUSTOM_CONFIG
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_ALLOCATOR_CONFIG_H_
|
|
145
Telegram/ThirdParty/scudo/atomic_helpers.h
vendored
145
Telegram/ThirdParty/scudo/atomic_helpers.h
vendored
|
@ -1,145 +0,0 @@
|
||||||
//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_ATOMIC_H_
|
|
||||||
#define SCUDO_ATOMIC_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
enum memory_order {
|
|
||||||
memory_order_relaxed = 0,
|
|
||||||
memory_order_consume = 1,
|
|
||||||
memory_order_acquire = 2,
|
|
||||||
memory_order_release = 3,
|
|
||||||
memory_order_acq_rel = 4,
|
|
||||||
memory_order_seq_cst = 5
|
|
||||||
};
|
|
||||||
static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
|
|
||||||
static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
|
|
||||||
static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
|
|
||||||
static_assert(memory_order_release == __ATOMIC_RELEASE, "");
|
|
||||||
static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
|
|
||||||
static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
|
|
||||||
|
|
||||||
struct atomic_u8 {
|
|
||||||
typedef u8 Type;
|
|
||||||
volatile Type ValDoNotUse;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct atomic_u16 {
|
|
||||||
typedef u16 Type;
|
|
||||||
volatile Type ValDoNotUse;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct atomic_s32 {
|
|
||||||
typedef s32 Type;
|
|
||||||
volatile Type ValDoNotUse;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct atomic_u32 {
|
|
||||||
typedef u32 Type;
|
|
||||||
volatile Type ValDoNotUse;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct atomic_u64 {
|
|
||||||
typedef u64 Type;
|
|
||||||
// On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
|
|
||||||
alignas(8) volatile Type ValDoNotUse;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct atomic_uptr {
|
|
||||||
typedef uptr Type;
|
|
||||||
volatile Type ValDoNotUse;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
|
|
||||||
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
|
|
||||||
typename T::Type V;
|
|
||||||
__atomic_load(&A->ValDoNotUse, &V, MO);
|
|
||||||
return V;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
|
|
||||||
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
|
|
||||||
__atomic_store(&A->ValDoNotUse, &V, MO);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
|
|
||||||
memory_order MO) {
|
|
||||||
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
|
|
||||||
return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
|
|
||||||
memory_order MO) {
|
|
||||||
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
|
|
||||||
return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
|
|
||||||
memory_order MO) {
|
|
||||||
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
|
|
||||||
return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
|
|
||||||
memory_order MO) {
|
|
||||||
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
|
|
||||||
return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
|
|
||||||
memory_order MO) {
|
|
||||||
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
|
|
||||||
typename T::Type R;
|
|
||||||
__atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
|
|
||||||
return R;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
|
|
||||||
typename T::Type Xchg,
|
|
||||||
memory_order MO) {
|
|
||||||
return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
|
|
||||||
__ATOMIC_RELAXED);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clutter-reducing helpers.
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type atomic_load_relaxed(const volatile T *A) {
|
|
||||||
return atomic_load(A, memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
|
|
||||||
atomic_store(A, V, memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline typename T::Type
|
|
||||||
atomic_compare_exchange_strong(volatile T *A, typename T::Type Cmp,
|
|
||||||
typename T::Type Xchg, memory_order MO) {
|
|
||||||
atomic_compare_exchange_strong(A, &Cmp, Xchg, MO);
|
|
||||||
return Cmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_ATOMIC_H_
|
|
|
@ -1,33 +0,0 @@
|
||||||
# To build these benchmarks, build the target "ScudoBenchmarks.$ARCH", where
|
|
||||||
# $ARCH is the name of the target architecture. For example,
|
|
||||||
# ScudoBenchmarks.x86_64 for 64-bit x86. The benchmark executable is then
|
|
||||||
# available under projects/compiler-rt/lib/scudo/standalone/benchmarks/ in the
|
|
||||||
# build directory.
|
|
||||||
|
|
||||||
include(AddLLVM)
|
|
||||||
|
|
||||||
set(SCUDO_BENCHMARK_CFLAGS -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone)
|
|
||||||
if(ANDROID)
|
|
||||||
list(APPEND SCUDO_BENCHMARK_CFLAGS -fno-emulated-tls)
|
|
||||||
endif()
|
|
||||||
string(REPLACE ";" " " SCUDO_BENCHMARK_CFLAGS " ${SCUDO_BENCHMARK_CFLAGS}")
|
|
||||||
|
|
||||||
foreach(arch ${SCUDO_STANDALONE_SUPPORTED_ARCH})
|
|
||||||
add_benchmark(ScudoBenchmarks.${arch}
|
|
||||||
malloc_benchmark.cpp
|
|
||||||
$<TARGET_OBJECTS:RTScudoStandalone.${arch}>)
|
|
||||||
set_property(TARGET ScudoBenchmarks.${arch} APPEND_STRING PROPERTY
|
|
||||||
COMPILE_FLAGS "${SCUDO_BENCHMARK_CFLAGS}")
|
|
||||||
|
|
||||||
if (COMPILER_RT_HAS_GWP_ASAN)
|
|
||||||
add_benchmark(
|
|
||||||
ScudoBenchmarksWithGwpAsan.${arch} malloc_benchmark.cpp
|
|
||||||
$<TARGET_OBJECTS:RTScudoStandalone.${arch}>
|
|
||||||
$<TARGET_OBJECTS:RTGwpAsan.${arch}>
|
|
||||||
$<TARGET_OBJECTS:RTGwpAsanBacktraceLibc.${arch}>
|
|
||||||
$<TARGET_OBJECTS:RTGwpAsanSegvHandler.${arch}>)
|
|
||||||
set_property(
|
|
||||||
TARGET ScudoBenchmarksWithGwpAsan.${arch} APPEND_STRING PROPERTY
|
|
||||||
COMPILE_FLAGS "${SCUDO_BENCHMARK_CFLAGS} -DGWP_ASAN_HOOKS")
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
|
@ -1,105 +0,0 @@
|
||||||
//===-- malloc_benchmark.cpp ------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "allocator_config.h"
|
|
||||||
#include "combined.h"
|
|
||||||
#include "common.h"
|
|
||||||
|
|
||||||
#include "benchmark/benchmark.h"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
void *CurrentAllocator;
|
|
||||||
template <typename Config> void PostInitCallback() {
|
|
||||||
reinterpret_cast<scudo::Allocator<Config> *>(CurrentAllocator)->initGwpAsan();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config> static void BM_malloc_free(benchmark::State &State) {
|
|
||||||
using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
|
|
||||||
auto Deleter = [](AllocatorT *A) {
|
|
||||||
A->unmapTestOnly();
|
|
||||||
delete A;
|
|
||||||
};
|
|
||||||
std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
|
|
||||||
Deleter);
|
|
||||||
CurrentAllocator = Allocator.get();
|
|
||||||
|
|
||||||
const size_t NBytes = State.range(0);
|
|
||||||
size_t PageSize = scudo::getPageSizeCached();
|
|
||||||
|
|
||||||
for (auto _ : State) {
|
|
||||||
void *Ptr = Allocator->allocate(NBytes, scudo::Chunk::Origin::Malloc);
|
|
||||||
auto *Data = reinterpret_cast<uint8_t *>(Ptr);
|
|
||||||
for (size_t I = 0; I < NBytes; I += PageSize)
|
|
||||||
Data[I] = 1;
|
|
||||||
benchmark::DoNotOptimize(Ptr);
|
|
||||||
Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
|
|
||||||
}
|
|
||||||
|
|
||||||
State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NBytes));
|
|
||||||
}
|
|
||||||
|
|
||||||
static const size_t MinSize = 8;
|
|
||||||
static const size_t MaxSize = 128 * 1024;
|
|
||||||
|
|
||||||
// FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
|
|
||||||
// cleanly.
|
|
||||||
BENCHMARK_TEMPLATE(BM_malloc_free, scudo::AndroidConfig)
|
|
||||||
->Range(MinSize, MaxSize);
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
BENCHMARK_TEMPLATE(BM_malloc_free, scudo::FuchsiaConfig)
|
|
||||||
->Range(MinSize, MaxSize);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
static void BM_malloc_free_loop(benchmark::State &State) {
|
|
||||||
using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
|
|
||||||
auto Deleter = [](AllocatorT *A) {
|
|
||||||
A->unmapTestOnly();
|
|
||||||
delete A;
|
|
||||||
};
|
|
||||||
std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
|
|
||||||
Deleter);
|
|
||||||
CurrentAllocator = Allocator.get();
|
|
||||||
|
|
||||||
const size_t NumIters = State.range(0);
|
|
||||||
size_t PageSize = scudo::getPageSizeCached();
|
|
||||||
std::vector<void *> Ptrs(NumIters);
|
|
||||||
|
|
||||||
for (auto _ : State) {
|
|
||||||
size_t SizeLog2 = 0;
|
|
||||||
for (void *&Ptr : Ptrs) {
|
|
||||||
Ptr = Allocator->allocate(1 << SizeLog2, scudo::Chunk::Origin::Malloc);
|
|
||||||
auto *Data = reinterpret_cast<uint8_t *>(Ptr);
|
|
||||||
for (size_t I = 0; I < 1 << SizeLog2; I += PageSize)
|
|
||||||
Data[I] = 1;
|
|
||||||
benchmark::DoNotOptimize(Ptr);
|
|
||||||
SizeLog2 = (SizeLog2 + 1) % 16;
|
|
||||||
}
|
|
||||||
for (void *&Ptr : Ptrs)
|
|
||||||
Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
|
|
||||||
}
|
|
||||||
|
|
||||||
State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NumIters) *
|
|
||||||
8192);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const size_t MinIters = 8;
|
|
||||||
static const size_t MaxIters = 32 * 1024;
|
|
||||||
|
|
||||||
// FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
|
|
||||||
// cleanly.
|
|
||||||
BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::AndroidConfig)
|
|
||||||
->Range(MinIters, MaxIters);
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::FuchsiaConfig)
|
|
||||||
->Range(MinIters, MaxIters);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
BENCHMARK_MAIN();
|
|
43
Telegram/ThirdParty/scudo/bytemap.h
vendored
43
Telegram/ThirdParty/scudo/bytemap.h
vendored
|
@ -1,43 +0,0 @@
|
||||||
//===-- bytemap.h -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_BYTEMAP_H_
|
|
||||||
#define SCUDO_BYTEMAP_H_
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <uptr Size> class FlatByteMap {
|
|
||||||
public:
|
|
||||||
void init() { DCHECK(Size == 0 || Map[0] == 0); }
|
|
||||||
|
|
||||||
void unmapTestOnly() { memset(Map, 0, Size); }
|
|
||||||
|
|
||||||
void set(uptr Index, u8 Value) {
|
|
||||||
DCHECK_LT(Index, Size);
|
|
||||||
DCHECK_EQ(0U, Map[Index]);
|
|
||||||
Map[Index] = Value;
|
|
||||||
}
|
|
||||||
u8 operator[](uptr Index) {
|
|
||||||
DCHECK_LT(Index, Size);
|
|
||||||
return Map[Index];
|
|
||||||
}
|
|
||||||
|
|
||||||
void disable() {}
|
|
||||||
void enable() {}
|
|
||||||
|
|
||||||
private:
|
|
||||||
u8 Map[Size] = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_BYTEMAP_H_
|
|
83
Telegram/ThirdParty/scudo/checksum.cpp
vendored
83
Telegram/ThirdParty/scudo/checksum.cpp
vendored
|
@ -1,83 +0,0 @@
|
||||||
//===-- checksum.cpp --------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "checksum.h"
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "chunk.h"
|
|
||||||
|
|
||||||
#if defined(__x86_64__) || defined(__i386__)
|
|
||||||
#include <cpuid.h>
|
|
||||||
#elif defined(__arm__) || defined(__aarch64__)
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
#include <zircon/features.h>
|
|
||||||
#include <zircon/syscalls.h>
|
|
||||||
#else
|
|
||||||
#include <sys/auxv.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
Checksum HashAlgorithm = {Checksum::BSD};
|
|
||||||
|
|
||||||
#if defined(__x86_64__) || defined(__i386__)
|
|
||||||
// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
|
|
||||||
// CRC32 requires the SSE 4.2 instruction set.
|
|
||||||
#ifndef bit_SSE4_2
|
|
||||||
#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef signature_HYGON_ebx // They are not defined in gcc.
|
|
||||||
// HYGON: "HygonGenuine".
|
|
||||||
#define signature_HYGON_ebx 0x6f677948
|
|
||||||
#define signature_HYGON_edx 0x6e65476e
|
|
||||||
#define signature_HYGON_ecx 0x656e6975
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool hasHardwareCRC32() {
|
|
||||||
u32 Eax, Ebx = 0, Ecx = 0, Edx = 0;
|
|
||||||
__get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
|
|
||||||
const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
|
|
||||||
(Edx == signature_INTEL_edx) &&
|
|
||||||
(Ecx == signature_INTEL_ecx);
|
|
||||||
const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) &&
|
|
||||||
(Ecx == signature_AMD_ecx);
|
|
||||||
const bool IsHygon = (Ebx == signature_HYGON_ebx) &&
|
|
||||||
(Edx == signature_HYGON_edx) &&
|
|
||||||
(Ecx == signature_HYGON_ecx);
|
|
||||||
if (!IsIntel && !IsAMD && !IsHygon)
|
|
||||||
return false;
|
|
||||||
__get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
|
|
||||||
return !!(Ecx & bit_SSE4_2);
|
|
||||||
}
|
|
||||||
#elif defined(__arm__) || defined(__aarch64__)
|
|
||||||
#ifndef AT_HWCAP
|
|
||||||
#define AT_HWCAP 16
|
|
||||||
#endif
|
|
||||||
#ifndef HWCAP_CRC32
|
|
||||||
#define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms.
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool hasHardwareCRC32() {
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
u32 HWCap;
|
|
||||||
const zx_status_t Status =
|
|
||||||
zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
|
|
||||||
if (Status != ZX_OK)
|
|
||||||
return false;
|
|
||||||
return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32);
|
|
||||||
#else
|
|
||||||
return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
|
|
||||||
#endif // SCUDO_FUCHSIA
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
// No hardware CRC32 implemented in Scudo for other architectures.
|
|
||||||
bool hasHardwareCRC32() { return false; }
|
|
||||||
#endif // defined(__x86_64__) || defined(__i386__)
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
59
Telegram/ThirdParty/scudo/checksum.h
vendored
59
Telegram/ThirdParty/scudo/checksum.h
vendored
|
@ -1,59 +0,0 @@
|
||||||
//===-- checksum.h ----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_CHECKSUM_H_
|
|
||||||
#define SCUDO_CHECKSUM_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
// Hardware CRC32 is supported at compilation via the following:
|
|
||||||
// - for i386 & x86_64: -mcrc32 (earlier: -msse4.2)
|
|
||||||
// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
|
|
||||||
// An additional check must be performed at runtime as well to make sure the
|
|
||||||
// emitted instructions are valid on the target host.
|
|
||||||
|
|
||||||
#if defined(__CRC32__)
|
|
||||||
// NB: clang has <crc32intrin.h> but GCC does not
|
|
||||||
#include <smmintrin.h>
|
|
||||||
#define CRC32_INTRINSIC \
|
|
||||||
FIRST_32_SECOND_64(__builtin_ia32_crc32si, __builtin_ia32_crc32di)
|
|
||||||
#elif defined(__SSE4_2__)
|
|
||||||
#include <smmintrin.h>
|
|
||||||
#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
|
|
||||||
#endif
|
|
||||||
#ifdef __ARM_FEATURE_CRC32
|
|
||||||
#include <arm_acle.h>
|
|
||||||
#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
enum class Checksum : u8 {
|
|
||||||
BSD = 0,
|
|
||||||
HardwareCRC32 = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
// BSD checksum, unlike a software CRC32, doesn't use any array lookup. We save
|
|
||||||
// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
|
|
||||||
// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
|
|
||||||
// odds with CRC32, but enough for our needs.
|
|
||||||
inline u16 computeBSDChecksum(u16 Sum, uptr Data) {
|
|
||||||
for (u8 I = 0; I < sizeof(Data); I++) {
|
|
||||||
Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
|
|
||||||
Sum = static_cast<u16>(Sum + (Data & 0xff));
|
|
||||||
Data >>= 8;
|
|
||||||
}
|
|
||||||
return Sum;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool hasHardwareCRC32();
|
|
||||||
WEAK u32 computeHardwareCRC32(u32 Crc, uptr Data);
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_CHECKSUM_H_
|
|
143
Telegram/ThirdParty/scudo/chunk.h
vendored
143
Telegram/ThirdParty/scudo/chunk.h
vendored
|
@ -1,143 +0,0 @@
|
||||||
//===-- chunk.h -------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_CHUNK_H_
|
|
||||||
#define SCUDO_CHUNK_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "checksum.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "report.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
extern Checksum HashAlgorithm;
|
|
||||||
|
|
||||||
inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
|
|
||||||
// If the hardware CRC32 feature is defined here, it was enabled everywhere,
|
|
||||||
// as opposed to only for crc32_hw.cpp. This means that other hardware
|
|
||||||
// specific instructions were likely emitted at other places, and as a result
|
|
||||||
// there is no reason to not use it here.
|
|
||||||
#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
|
|
||||||
u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
|
|
||||||
for (uptr I = 0; I < ArraySize; I++)
|
|
||||||
Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
|
|
||||||
return static_cast<u16>(Crc ^ (Crc >> 16));
|
|
||||||
#else
|
|
||||||
if (HashAlgorithm == Checksum::HardwareCRC32) {
|
|
||||||
u32 Crc = computeHardwareCRC32(Seed, Value);
|
|
||||||
for (uptr I = 0; I < ArraySize; I++)
|
|
||||||
Crc = computeHardwareCRC32(Crc, Array[I]);
|
|
||||||
return static_cast<u16>(Crc ^ (Crc >> 16));
|
|
||||||
} else {
|
|
||||||
u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed), Value);
|
|
||||||
for (uptr I = 0; I < ArraySize; I++)
|
|
||||||
Checksum = computeBSDChecksum(Checksum, Array[I]);
|
|
||||||
return Checksum;
|
|
||||||
}
|
|
||||||
#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
|
|
||||||
// defined(__ARM_FEATURE_CRC32)
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Chunk {
|
|
||||||
|
|
||||||
// Note that in an ideal world, `State` and `Origin` should be `enum class`, and
|
|
||||||
// the associated `UnpackedHeader` fields of their respective enum class type
|
|
||||||
// but https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 prevents it from
|
|
||||||
// happening, as it will error, complaining the number of bits is not enough.
|
|
||||||
enum Origin : u8 {
|
|
||||||
Malloc = 0,
|
|
||||||
New = 1,
|
|
||||||
NewArray = 2,
|
|
||||||
Memalign = 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
|
|
||||||
|
|
||||||
typedef u64 PackedHeader;
|
|
||||||
// Update the 'Mask' constants to reflect changes in this structure.
|
|
||||||
struct UnpackedHeader {
|
|
||||||
uptr ClassId : 8;
|
|
||||||
u8 State : 2;
|
|
||||||
// Origin if State == Allocated, or WasZeroed otherwise.
|
|
||||||
u8 OriginOrWasZeroed : 2;
|
|
||||||
uptr SizeOrUnusedBytes : 20;
|
|
||||||
uptr Offset : 16;
|
|
||||||
uptr Checksum : 16;
|
|
||||||
};
|
|
||||||
typedef atomic_u64 AtomicPackedHeader;
|
|
||||||
static_assert(sizeof(UnpackedHeader) == sizeof(PackedHeader), "");
|
|
||||||
|
|
||||||
// Those constants are required to silence some -Werror=conversion errors when
|
|
||||||
// assigning values to the related bitfield variables.
|
|
||||||
constexpr uptr ClassIdMask = (1UL << 8) - 1;
|
|
||||||
constexpr u8 StateMask = (1U << 2) - 1;
|
|
||||||
constexpr u8 OriginMask = (1U << 2) - 1;
|
|
||||||
constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
|
|
||||||
constexpr uptr OffsetMask = (1UL << 16) - 1;
|
|
||||||
constexpr uptr ChecksumMask = (1UL << 16) - 1;
|
|
||||||
|
|
||||||
constexpr uptr getHeaderSize() {
|
|
||||||
return roundUp(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
|
|
||||||
return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
|
|
||||||
getHeaderSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
inline const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
|
|
||||||
return reinterpret_cast<const AtomicPackedHeader *>(
|
|
||||||
reinterpret_cast<uptr>(Ptr) - getHeaderSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
// We do not need a cryptographically strong hash for the checksum, but a CRC
|
|
||||||
// type function that can alert us in the event a header is invalid or
|
|
||||||
// corrupted. Ideally slightly better than a simple xor of all fields.
|
|
||||||
static inline u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
|
|
||||||
UnpackedHeader *Header) {
|
|
||||||
UnpackedHeader ZeroChecksumHeader = *Header;
|
|
||||||
ZeroChecksumHeader.Checksum = 0;
|
|
||||||
uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
|
|
||||||
memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
|
|
||||||
return computeChecksum(Cookie, reinterpret_cast<uptr>(Ptr), HeaderHolder,
|
|
||||||
ARRAY_SIZE(HeaderHolder));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void storeHeader(u32 Cookie, void *Ptr,
|
|
||||||
UnpackedHeader *NewUnpackedHeader) {
|
|
||||||
NewUnpackedHeader->Checksum =
|
|
||||||
computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
|
|
||||||
PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
|
|
||||||
atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void loadHeader(u32 Cookie, const void *Ptr,
|
|
||||||
UnpackedHeader *NewUnpackedHeader) {
|
|
||||||
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
|
|
||||||
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
|
|
||||||
if (UNLIKELY(NewUnpackedHeader->Checksum !=
|
|
||||||
computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader)))
|
|
||||||
reportHeaderCorruption(const_cast<void *>(Ptr));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool isValid(u32 Cookie, const void *Ptr,
|
|
||||||
UnpackedHeader *NewUnpackedHeader) {
|
|
||||||
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
|
|
||||||
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
|
|
||||||
return NewUnpackedHeader->Checksum ==
|
|
||||||
computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Chunk
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_CHUNK_H_
|
|
1538
Telegram/ThirdParty/scudo/combined.h
vendored
1538
Telegram/ThirdParty/scudo/combined.h
vendored
File diff suppressed because it is too large
Load diff
24
Telegram/ThirdParty/scudo/common.cpp
vendored
24
Telegram/ThirdParty/scudo/common.cpp
vendored
|
@ -1,24 +0,0 @@
|
||||||
//===-- common.cpp ----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
uptr PageSizeCached;
|
|
||||||
uptr getPageSize();
|
|
||||||
|
|
||||||
uptr getPageSizeSlow() {
|
|
||||||
PageSizeCached = getPageSize();
|
|
||||||
CHECK_NE(PageSizeCached, 0);
|
|
||||||
return PageSizeCached;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
232
Telegram/ThirdParty/scudo/common.h
vendored
232
Telegram/ThirdParty/scudo/common.h
vendored
|
@ -1,232 +0,0 @@
|
||||||
//===-- common.h ------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_COMMON_H_
|
|
||||||
#define SCUDO_COMMON_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
#include "fuchsia.h"
|
|
||||||
#include "linux.h"
|
|
||||||
#include "trusty.h"
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
|
|
||||||
static_assert(sizeof(Dest) == sizeof(Source), "");
|
|
||||||
Dest D;
|
|
||||||
memcpy(&D, &S, sizeof(D));
|
|
||||||
return D;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline constexpr bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
|
|
||||||
|
|
||||||
inline constexpr uptr roundUp(uptr X, uptr Boundary) {
|
|
||||||
DCHECK(isPowerOfTwo(Boundary));
|
|
||||||
return (X + Boundary - 1) & ~(Boundary - 1);
|
|
||||||
}
|
|
||||||
inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) {
|
|
||||||
return ((X + Boundary - 1) / Boundary) * Boundary;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline constexpr uptr roundDown(uptr X, uptr Boundary) {
|
|
||||||
DCHECK(isPowerOfTwo(Boundary));
|
|
||||||
return X & ~(Boundary - 1);
|
|
||||||
}
|
|
||||||
inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) {
|
|
||||||
return (X / Boundary) * Boundary;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline constexpr bool isAligned(uptr X, uptr Alignment) {
|
|
||||||
DCHECK(isPowerOfTwo(Alignment));
|
|
||||||
return (X & (Alignment - 1)) == 0;
|
|
||||||
}
|
|
||||||
inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) {
|
|
||||||
return X % Alignment == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
|
|
||||||
|
|
||||||
template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
|
|
||||||
|
|
||||||
template <class T> void Swap(T &A, T &B) {
|
|
||||||
T Tmp = A;
|
|
||||||
A = B;
|
|
||||||
B = Tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uptr getMostSignificantSetBitIndex(uptr X) {
|
|
||||||
DCHECK_NE(X, 0U);
|
|
||||||
return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uptr roundUpPowerOfTwo(uptr Size) {
|
|
||||||
DCHECK(Size);
|
|
||||||
if (isPowerOfTwo(Size))
|
|
||||||
return Size;
|
|
||||||
const uptr Up = getMostSignificantSetBitIndex(Size);
|
|
||||||
DCHECK_LT(Size, (1UL << (Up + 1)));
|
|
||||||
DCHECK_GT(Size, (1UL << Up));
|
|
||||||
return 1UL << (Up + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uptr getLeastSignificantSetBitIndex(uptr X) {
|
|
||||||
DCHECK_NE(X, 0U);
|
|
||||||
return static_cast<uptr>(__builtin_ctzl(X));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uptr getLog2(uptr X) {
|
|
||||||
DCHECK(isPowerOfTwo(X));
|
|
||||||
return getLeastSignificantSetBitIndex(X);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline u32 getRandomU32(u32 *State) {
|
|
||||||
// ANSI C linear congruential PRNG (16-bit output).
|
|
||||||
// return (*State = *State * 1103515245 + 12345) >> 16;
|
|
||||||
// XorShift (32-bit output).
|
|
||||||
*State ^= *State << 13;
|
|
||||||
*State ^= *State >> 17;
|
|
||||||
*State ^= *State << 5;
|
|
||||||
return *State;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline u32 getRandomModN(u32 *State, u32 N) {
|
|
||||||
return getRandomU32(State) % N; // [0, N)
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
|
|
||||||
if (N <= 1)
|
|
||||||
return;
|
|
||||||
u32 State = *RandState;
|
|
||||||
for (u32 I = N - 1; I > 0; I--)
|
|
||||||
Swap(A[I], A[getRandomModN(&State, I + 1)]);
|
|
||||||
*RandState = State;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void computePercentage(uptr Numerator, uptr Denominator, uptr *Integral,
|
|
||||||
uptr *Fractional) {
|
|
||||||
constexpr uptr Digits = 100;
|
|
||||||
if (Denominator == 0) {
|
|
||||||
*Integral = 100;
|
|
||||||
*Fractional = 0;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
*Integral = Numerator * Digits / Denominator;
|
|
||||||
*Fractional =
|
|
||||||
(((Numerator * Digits) % Denominator) * Digits + Denominator / 2) /
|
|
||||||
Denominator;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Platform specific functions.
|
|
||||||
|
|
||||||
extern uptr PageSizeCached;
|
|
||||||
uptr getPageSizeSlow();
|
|
||||||
inline uptr getPageSizeCached() {
|
|
||||||
#if SCUDO_ANDROID && defined(PAGE_SIZE)
|
|
||||||
// Most Android builds have a build-time constant page size.
|
|
||||||
return PAGE_SIZE;
|
|
||||||
#endif
|
|
||||||
if (LIKELY(PageSizeCached))
|
|
||||||
return PageSizeCached;
|
|
||||||
return getPageSizeSlow();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns 0 if the number of CPUs could not be determined.
|
|
||||||
u32 getNumberOfCPUs();
|
|
||||||
|
|
||||||
const char *getEnv(const char *Name);
|
|
||||||
|
|
||||||
u64 getMonotonicTime();
|
|
||||||
// Gets the time faster but with less accuracy. Can call getMonotonicTime
|
|
||||||
// if no fast version is available.
|
|
||||||
u64 getMonotonicTimeFast();
|
|
||||||
|
|
||||||
u32 getThreadID();
|
|
||||||
|
|
||||||
// Our randomness gathering function is limited to 256 bytes to ensure we get
|
|
||||||
// as many bytes as requested, and avoid interruptions (on Linux).
|
|
||||||
constexpr uptr MaxRandomLength = 256U;
|
|
||||||
bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
|
|
||||||
|
|
||||||
// Platform memory mapping functions.
|
|
||||||
|
|
||||||
#define MAP_ALLOWNOMEM (1U << 0)
|
|
||||||
#define MAP_NOACCESS (1U << 1)
|
|
||||||
#define MAP_RESIZABLE (1U << 2)
|
|
||||||
#define MAP_MEMTAG (1U << 3)
|
|
||||||
#define MAP_PRECOMMIT (1U << 4)
|
|
||||||
|
|
||||||
// Our platform memory mapping use is restricted to 3 scenarios:
|
|
||||||
// - reserve memory at a random address (MAP_NOACCESS);
|
|
||||||
// - commit memory in a previously reserved space;
|
|
||||||
// - commit memory at a random address.
|
|
||||||
// As such, only a subset of parameters combinations is valid, which is checked
|
|
||||||
// by the function implementation. The Data parameter allows to pass opaque
|
|
||||||
// platform specific data to the function.
|
|
||||||
// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
|
|
||||||
void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
|
|
||||||
MapPlatformData *Data = nullptr);
|
|
||||||
|
|
||||||
// Indicates that we are getting rid of the whole mapping, which might have
|
|
||||||
// further consequences on Data, depending on the platform.
|
|
||||||
#define UNMAP_ALL (1U << 0)
|
|
||||||
|
|
||||||
void unmap(void *Addr, uptr Size, uptr Flags = 0,
|
|
||||||
MapPlatformData *Data = nullptr);
|
|
||||||
|
|
||||||
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
|
|
||||||
MapPlatformData *Data = nullptr);
|
|
||||||
|
|
||||||
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
|
|
||||||
MapPlatformData *Data = nullptr);
|
|
||||||
|
|
||||||
// Logging related functions.
|
|
||||||
|
|
||||||
void setAbortMessage(const char *Message);
|
|
||||||
|
|
||||||
struct BlockInfo {
|
|
||||||
uptr BlockBegin;
|
|
||||||
uptr BlockSize;
|
|
||||||
uptr RegionBegin;
|
|
||||||
uptr RegionEnd;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class Option : u8 {
|
|
||||||
ReleaseInterval, // Release to OS interval in milliseconds.
|
|
||||||
MemtagTuning, // Whether to tune tagging for UAF or overflow.
|
|
||||||
ThreadDisableMemInit, // Whether to disable automatic heap initialization and,
|
|
||||||
// where possible, memory tagging, on this thread.
|
|
||||||
MaxCacheEntriesCount, // Maximum number of blocks that can be cached.
|
|
||||||
MaxCacheEntrySize, // Maximum size of a block that can be cached.
|
|
||||||
MaxTSDsCount, // Number of usable TSDs for the shared registry.
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class ReleaseToOS : u8 {
|
|
||||||
Normal, // Follow the normal rules for releasing pages to the OS
|
|
||||||
Force, // Force release pages to the OS, but avoid cases that take too long.
|
|
||||||
ForceAll, // Force release every page possible regardless of how long it will
|
|
||||||
// take.
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr unsigned char PatternFillByte = 0xAB;
|
|
||||||
|
|
||||||
enum FillContentsMode {
|
|
||||||
NoFill = 0,
|
|
||||||
ZeroFill = 1,
|
|
||||||
PatternOrZeroFill = 2 // Pattern fill unless the memory is known to be
|
|
||||||
// zero-initialized already.
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_COMMON_H_
|
|
60
Telegram/ThirdParty/scudo/condition_variable.h
vendored
60
Telegram/ThirdParty/scudo/condition_variable.h
vendored
|
@ -1,60 +0,0 @@
|
||||||
//===-- condition_variable.h ------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_CONDITION_VARIABLE_H_
|
|
||||||
#define SCUDO_CONDITION_VARIABLE_H_
|
|
||||||
|
|
||||||
#include "condition_variable_base.h"
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#include "condition_variable_linux.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// A default implementation of default condition variable. It doesn't do a real
|
|
||||||
// `wait`, instead it spins a short amount of time only.
|
|
||||||
class ConditionVariableDummy
|
|
||||||
: public ConditionVariableBase<ConditionVariableDummy> {
|
|
||||||
public:
|
|
||||||
void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {}
|
|
||||||
|
|
||||||
void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) {
|
|
||||||
M.unlock();
|
|
||||||
|
|
||||||
constexpr u32 SpinTimes = 64;
|
|
||||||
volatile u32 V = 0;
|
|
||||||
for (u32 I = 0; I < SpinTimes; ++I) {
|
|
||||||
u32 Tmp = V + 1;
|
|
||||||
V = Tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
M.lock();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config, typename = const bool>
|
|
||||||
struct ConditionVariableState {
|
|
||||||
static constexpr bool enabled() { return false; }
|
|
||||||
// This is only used for compilation purpose so that we won't end up having
|
|
||||||
// many conditional compilations. If you want to use `ConditionVariableDummy`,
|
|
||||||
// define `ConditionVariableT` in your allocator configuration. See
|
|
||||||
// allocator_config.h for more details.
|
|
||||||
using ConditionVariableT = ConditionVariableDummy;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
|
|
||||||
static constexpr bool enabled() { return Config::UseConditionVariable; }
|
|
||||||
using ConditionVariableT = typename Config::ConditionVariableT;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_CONDITION_VARIABLE_H_
|
|
|
@ -1,56 +0,0 @@
|
||||||
//===-- condition_variable_base.h -------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_
|
|
||||||
#define SCUDO_CONDITION_VARIABLE_BASE_H_
|
|
||||||
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <typename Derived> class ConditionVariableBase {
|
|
||||||
public:
|
|
||||||
constexpr ConditionVariableBase() = default;
|
|
||||||
|
|
||||||
void bindTestOnly(HybridMutex &Mutex) {
|
|
||||||
#if SCUDO_DEBUG
|
|
||||||
boundMutex = &Mutex;
|
|
||||||
#else
|
|
||||||
(void)Mutex;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void notifyAll(HybridMutex &M) REQUIRES(M) {
|
|
||||||
#if SCUDO_DEBUG
|
|
||||||
CHECK_EQ(&M, boundMutex);
|
|
||||||
#endif
|
|
||||||
getDerived()->notifyAllImpl(M);
|
|
||||||
}
|
|
||||||
|
|
||||||
void wait(HybridMutex &M) REQUIRES(M) {
|
|
||||||
#if SCUDO_DEBUG
|
|
||||||
CHECK_EQ(&M, boundMutex);
|
|
||||||
#endif
|
|
||||||
getDerived()->waitImpl(M);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
Derived *getDerived() { return static_cast<Derived *>(this); }
|
|
||||||
|
|
||||||
#if SCUDO_DEBUG
|
|
||||||
// Because thread-safety analysis doesn't support pointer aliasing, we are not
|
|
||||||
// able to mark the proper annotations without false positive. Instead, we
|
|
||||||
// pass the lock and do the same-lock check separately.
|
|
||||||
HybridMutex *boundMutex = nullptr;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_CONDITION_VARIABLE_BASE_H_
|
|
|
@ -1,52 +0,0 @@
|
||||||
//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
|
|
||||||
#include "condition_variable_linux.h"
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
|
|
||||||
#include <limits.h>
|
|
||||||
#include <linux/futex.h>
|
|
||||||
#include <sys/syscall.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) {
|
|
||||||
const u32 V = atomic_load_relaxed(&Counter);
|
|
||||||
atomic_store_relaxed(&Counter, V + 1);
|
|
||||||
|
|
||||||
// TODO(chiahungduan): Move the waiters from the futex waiting queue
|
|
||||||
// `Counter` to futex waiting queue `M` so that the awoken threads won't be
|
|
||||||
// blocked again due to locked `M` by current thread.
|
|
||||||
if (LastNotifyAll != V) {
|
|
||||||
syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAKE_PRIVATE,
|
|
||||||
INT_MAX, nullptr, nullptr, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
LastNotifyAll = V + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConditionVariableLinux::waitImpl(HybridMutex &M) {
|
|
||||||
const u32 V = atomic_load_relaxed(&Counter) + 1;
|
|
||||||
atomic_store_relaxed(&Counter, V);
|
|
||||||
|
|
||||||
// TODO: Use ScopedUnlock when it's supported.
|
|
||||||
M.unlock();
|
|
||||||
syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAIT_PRIVATE, V,
|
|
||||||
nullptr, nullptr, 0);
|
|
||||||
M.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX
|
|
|
@ -1,38 +0,0 @@
|
||||||
//===-- condition_variable_linux.h ------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_
|
|
||||||
#define SCUDO_CONDITION_VARIABLE_LINUX_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "condition_variable_base.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class ConditionVariableLinux
|
|
||||||
: public ConditionVariableBase<ConditionVariableLinux> {
|
|
||||||
public:
|
|
||||||
void notifyAllImpl(HybridMutex &M) REQUIRES(M);
|
|
||||||
|
|
||||||
void waitImpl(HybridMutex &M) REQUIRES(M);
|
|
||||||
|
|
||||||
private:
|
|
||||||
u32 LastNotifyAll = 0;
|
|
||||||
atomic_u32 Counter = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX
|
|
||||||
|
|
||||||
#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_
|
|
20
Telegram/ThirdParty/scudo/crc32_hw.cpp
vendored
20
Telegram/ThirdParty/scudo/crc32_hw.cpp
vendored
|
@ -1,20 +0,0 @@
|
||||||
//===-- crc32_hw.cpp --------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "checksum.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
|
|
||||||
u32 computeHardwareCRC32(u32 Crc, uptr Data) {
|
|
||||||
return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
|
|
||||||
}
|
|
||||||
#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
|
|
||||||
// defined(__ARM_FEATURE_CRC32)
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
76
Telegram/ThirdParty/scudo/flags.cpp
vendored
76
Telegram/ThirdParty/scudo/flags.cpp
vendored
|
@ -1,76 +0,0 @@
|
||||||
//===-- flags.cpp -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "flags.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "flags_parser.h"
|
|
||||||
|
|
||||||
#include "scudo/interface.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
Flags *getFlags() {
|
|
||||||
static Flags F;
|
|
||||||
return &F;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Flags::setDefaults() {
|
|
||||||
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
|
||||||
#include "flags.inc"
|
|
||||||
#undef SCUDO_FLAG
|
|
||||||
|
|
||||||
#ifdef GWP_ASAN_HOOKS
|
|
||||||
#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
|
|
||||||
GWP_ASAN_##Name = DefaultValue;
|
|
||||||
#include "gwp_asan/options.inc"
|
|
||||||
#undef GWP_ASAN_OPTION
|
|
||||||
#endif // GWP_ASAN_HOOKS
|
|
||||||
}
|
|
||||||
|
|
||||||
void registerFlags(FlagParser *Parser, Flags *F) {
|
|
||||||
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
|
|
||||||
Parser->registerFlag(#Name, Description, FlagType::FT_##Type, \
|
|
||||||
reinterpret_cast<void *>(&F->Name));
|
|
||||||
#include "flags.inc"
|
|
||||||
#undef SCUDO_FLAG
|
|
||||||
|
|
||||||
#ifdef GWP_ASAN_HOOKS
|
|
||||||
#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
|
|
||||||
Parser->registerFlag("GWP_ASAN_" #Name, Description, FlagType::FT_##Type, \
|
|
||||||
reinterpret_cast<void *>(&F->GWP_ASAN_##Name));
|
|
||||||
#include "gwp_asan/options.inc"
|
|
||||||
#undef GWP_ASAN_OPTION
|
|
||||||
#endif // GWP_ASAN_HOOKS
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *getCompileDefinitionScudoDefaultOptions() {
|
|
||||||
#ifdef SCUDO_DEFAULT_OPTIONS
|
|
||||||
return STRINGIFY(SCUDO_DEFAULT_OPTIONS);
|
|
||||||
#else
|
|
||||||
return "";
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *getScudoDefaultOptions() {
|
|
||||||
return (&__scudo_default_options) ? __scudo_default_options() : "";
|
|
||||||
}
|
|
||||||
|
|
||||||
void initFlags() {
|
|
||||||
Flags *F = getFlags();
|
|
||||||
F->setDefaults();
|
|
||||||
FlagParser Parser;
|
|
||||||
registerFlags(&Parser, F);
|
|
||||||
Parser.parseString(getCompileDefinitionScudoDefaultOptions());
|
|
||||||
Parser.parseString(getScudoDefaultOptions());
|
|
||||||
Parser.parseString(getEnv("SCUDO_OPTIONS"));
|
|
||||||
if (const char *V = getEnv("SCUDO_ALLOCATION_RING_BUFFER_SIZE")) {
|
|
||||||
Parser.parseStringPair("allocation_ring_buffer_size", V);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
38
Telegram/ThirdParty/scudo/flags.h
vendored
38
Telegram/ThirdParty/scudo/flags.h
vendored
|
@ -1,38 +0,0 @@
|
||||||
//===-- flags.h -------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_FLAGS_H_
|
|
||||||
#define SCUDO_FLAGS_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
struct Flags {
|
|
||||||
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
|
||||||
#include "flags.inc"
|
|
||||||
#undef SCUDO_FLAG
|
|
||||||
|
|
||||||
#ifdef GWP_ASAN_HOOKS
|
|
||||||
#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
|
|
||||||
Type GWP_ASAN_##Name;
|
|
||||||
#include "gwp_asan/options.inc"
|
|
||||||
#undef GWP_ASAN_OPTION
|
|
||||||
#endif // GWP_ASAN_HOOKS
|
|
||||||
|
|
||||||
void setDefaults();
|
|
||||||
};
|
|
||||||
|
|
||||||
Flags *getFlags();
|
|
||||||
void initFlags();
|
|
||||||
class FlagParser;
|
|
||||||
void registerFlags(FlagParser *Parser, Flags *F);
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_FLAGS_H_
|
|
51
Telegram/ThirdParty/scudo/flags.inc
vendored
51
Telegram/ThirdParty/scudo/flags.inc
vendored
|
@ -1,51 +0,0 @@
|
||||||
//===-- flags.inc -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_FLAG
|
|
||||||
#error "Define SCUDO_FLAG prior to including this file!"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
SCUDO_FLAG(int, quarantine_size_kb, 0,
|
|
||||||
"Size (in kilobytes) of quarantine used to delay the actual "
|
|
||||||
"deallocation of chunks. Lower value may reduce memory usage but "
|
|
||||||
"decrease the effectiveness of the mitigation.")
|
|
||||||
|
|
||||||
SCUDO_FLAG(int, thread_local_quarantine_size_kb, 0,
|
|
||||||
"Size (in kilobytes) of per-thread cache used to offload the global "
|
|
||||||
"quarantine. Lower value may reduce memory usage but might increase "
|
|
||||||
"the contention on the global quarantine.")
|
|
||||||
|
|
||||||
SCUDO_FLAG(int, quarantine_max_chunk_size, 0,
|
|
||||||
"Size (in bytes) up to which chunks will be quarantined (if lower "
|
|
||||||
"than or equal to).")
|
|
||||||
|
|
||||||
SCUDO_FLAG(bool, dealloc_type_mismatch, false,
|
|
||||||
"Terminate on a type mismatch in allocation-deallocation functions, "
|
|
||||||
"eg: malloc/delete, new/free, new/delete[], etc.")
|
|
||||||
|
|
||||||
SCUDO_FLAG(bool, delete_size_mismatch, true,
|
|
||||||
"Terminate on a size mismatch between a sized-delete and the actual "
|
|
||||||
"size of a chunk (as provided to new/new[]).")
|
|
||||||
|
|
||||||
SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
|
|
||||||
|
|
||||||
SCUDO_FLAG(bool, pattern_fill_contents, false,
|
|
||||||
"Pattern fill chunk contents on allocation.")
|
|
||||||
|
|
||||||
SCUDO_FLAG(bool, may_return_null, true,
|
|
||||||
"Indicate whether the allocator should terminate instead of "
|
|
||||||
"returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
|
|
||||||
"invalid allocation alignments, etc.")
|
|
||||||
|
|
||||||
SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
|
|
||||||
"Interval (in milliseconds) at which to attempt release of unused "
|
|
||||||
"memory to the OS. Negative values disable the feature.")
|
|
||||||
|
|
||||||
SCUDO_FLAG(int, allocation_ring_buffer_size, 32768,
|
|
||||||
"Entries to keep in the allocation ring buffer for scudo. "
|
|
||||||
"Values less or equal to zero disable the buffer.")
|
|
178
Telegram/ThirdParty/scudo/flags_parser.cpp
vendored
178
Telegram/ThirdParty/scudo/flags_parser.cpp
vendored
|
@ -1,178 +0,0 @@
|
||||||
//===-- flags_parser.cpp ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "flags_parser.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "report.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class UnknownFlagsRegistry {
|
|
||||||
static const u32 MaxUnknownFlags = 16;
|
|
||||||
const char *UnknownFlagsNames[MaxUnknownFlags];
|
|
||||||
u32 NumberOfUnknownFlags;
|
|
||||||
|
|
||||||
public:
|
|
||||||
void add(const char *Name) {
|
|
||||||
CHECK_LT(NumberOfUnknownFlags, MaxUnknownFlags);
|
|
||||||
UnknownFlagsNames[NumberOfUnknownFlags++] = Name;
|
|
||||||
}
|
|
||||||
|
|
||||||
void report() {
|
|
||||||
if (!NumberOfUnknownFlags)
|
|
||||||
return;
|
|
||||||
Printf("Scudo WARNING: found %d unrecognized flag(s):\n",
|
|
||||||
NumberOfUnknownFlags);
|
|
||||||
for (u32 I = 0; I < NumberOfUnknownFlags; ++I)
|
|
||||||
Printf(" %s\n", UnknownFlagsNames[I]);
|
|
||||||
NumberOfUnknownFlags = 0;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
static UnknownFlagsRegistry UnknownFlags;
|
|
||||||
|
|
||||||
void reportUnrecognizedFlags() { UnknownFlags.report(); }
|
|
||||||
|
|
||||||
void FlagParser::printFlagDescriptions() {
|
|
||||||
Printf("Available flags for Scudo:\n");
|
|
||||||
for (u32 I = 0; I < NumberOfFlags; ++I)
|
|
||||||
Printf("\t%s\n\t\t- %s\n", Flags[I].Name, Flags[I].Desc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool isSeparator(char C) {
|
|
||||||
return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
|
|
||||||
C == '\r';
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
|
|
||||||
|
|
||||||
void FlagParser::skipWhitespace() {
|
|
||||||
while (isSeparator(Buffer[Pos]))
|
|
||||||
++Pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlagParser::parseFlag() {
|
|
||||||
const uptr NameStart = Pos;
|
|
||||||
while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
|
|
||||||
++Pos;
|
|
||||||
if (Buffer[Pos] != '=')
|
|
||||||
reportError("expected '='");
|
|
||||||
const char *Name = Buffer + NameStart;
|
|
||||||
const uptr ValueStart = ++Pos;
|
|
||||||
const char *Value;
|
|
||||||
if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
|
|
||||||
const char Quote = Buffer[Pos++];
|
|
||||||
while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
|
|
||||||
++Pos;
|
|
||||||
if (Buffer[Pos] == 0)
|
|
||||||
reportError("unterminated string");
|
|
||||||
Value = Buffer + ValueStart + 1;
|
|
||||||
++Pos; // consume the closing quote
|
|
||||||
} else {
|
|
||||||
while (!isSeparatorOrNull(Buffer[Pos]))
|
|
||||||
++Pos;
|
|
||||||
Value = Buffer + ValueStart;
|
|
||||||
}
|
|
||||||
if (!runHandler(Name, Value, '='))
|
|
||||||
reportError("flag parsing failed.");
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlagParser::parseFlags() {
|
|
||||||
while (true) {
|
|
||||||
skipWhitespace();
|
|
||||||
if (Buffer[Pos] == 0)
|
|
||||||
break;
|
|
||||||
parseFlag();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlagParser::parseString(const char *S) {
|
|
||||||
if (!S)
|
|
||||||
return;
|
|
||||||
// Backup current parser state to allow nested parseString() calls.
|
|
||||||
const char *OldBuffer = Buffer;
|
|
||||||
const uptr OldPos = Pos;
|
|
||||||
Buffer = S;
|
|
||||||
Pos = 0;
|
|
||||||
|
|
||||||
parseFlags();
|
|
||||||
|
|
||||||
Buffer = OldBuffer;
|
|
||||||
Pos = OldPos;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool parseBool(const char *Value, bool *b) {
|
|
||||||
if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
|
|
||||||
strncmp(Value, "false", 5) == 0) {
|
|
||||||
*b = false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
|
|
||||||
strncmp(Value, "true", 4) == 0) {
|
|
||||||
*b = true;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlagParser::parseStringPair(const char *Name, const char *Value) {
|
|
||||||
if (!runHandler(Name, Value, '\0'))
|
|
||||||
reportError("flag parsing failed.");
|
|
||||||
}
|
|
||||||
|
|
||||||
bool FlagParser::runHandler(const char *Name, const char *Value,
|
|
||||||
const char Sep) {
|
|
||||||
for (u32 I = 0; I < NumberOfFlags; ++I) {
|
|
||||||
const uptr Len = strlen(Flags[I].Name);
|
|
||||||
if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != Sep)
|
|
||||||
continue;
|
|
||||||
bool Ok = false;
|
|
||||||
switch (Flags[I].Type) {
|
|
||||||
case FlagType::FT_bool:
|
|
||||||
Ok = parseBool(Value, reinterpret_cast<bool *>(Flags[I].Var));
|
|
||||||
if (!Ok)
|
|
||||||
reportInvalidFlag("bool", Value);
|
|
||||||
break;
|
|
||||||
case FlagType::FT_int:
|
|
||||||
char *ValueEnd;
|
|
||||||
errno = 0;
|
|
||||||
long V = strtol(Value, &ValueEnd, 10);
|
|
||||||
if (errno != 0 || // strtol failed (over or underflow)
|
|
||||||
V > INT_MAX || V < INT_MIN || // overflows integer
|
|
||||||
// contains unexpected characters
|
|
||||||
(*ValueEnd != '"' && *ValueEnd != '\'' &&
|
|
||||||
!isSeparatorOrNull(*ValueEnd))) {
|
|
||||||
reportInvalidFlag("int", Value);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
*reinterpret_cast<int *>(Flags[I].Var) = static_cast<int>(V);
|
|
||||||
Ok = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return Ok;
|
|
||||||
}
|
|
||||||
// Unrecognized flag. This is not a fatal error, we may print a warning later.
|
|
||||||
UnknownFlags.add(Name);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlagParser::registerFlag(const char *Name, const char *Desc, FlagType Type,
|
|
||||||
void *Var) {
|
|
||||||
CHECK_LT(NumberOfFlags, MaxFlags);
|
|
||||||
Flags[NumberOfFlags].Name = Name;
|
|
||||||
Flags[NumberOfFlags].Desc = Desc;
|
|
||||||
Flags[NumberOfFlags].Type = Type;
|
|
||||||
Flags[NumberOfFlags].Var = Var;
|
|
||||||
++NumberOfFlags;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
56
Telegram/ThirdParty/scudo/flags_parser.h
vendored
56
Telegram/ThirdParty/scudo/flags_parser.h
vendored
|
@ -1,56 +0,0 @@
|
||||||
//===-- flags_parser.h ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_FLAGS_PARSER_H_
|
|
||||||
#define SCUDO_FLAGS_PARSER_H_
|
|
||||||
|
|
||||||
#include "report.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
enum class FlagType : u8 {
|
|
||||||
FT_bool,
|
|
||||||
FT_int,
|
|
||||||
};
|
|
||||||
|
|
||||||
class FlagParser {
|
|
||||||
public:
|
|
||||||
void registerFlag(const char *Name, const char *Desc, FlagType Type,
|
|
||||||
void *Var);
|
|
||||||
void parseString(const char *S);
|
|
||||||
void printFlagDescriptions();
|
|
||||||
void parseStringPair(const char *Name, const char *Value);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static const u32 MaxFlags = 20;
|
|
||||||
struct Flag {
|
|
||||||
const char *Name;
|
|
||||||
const char *Desc;
|
|
||||||
FlagType Type;
|
|
||||||
void *Var;
|
|
||||||
} Flags[MaxFlags];
|
|
||||||
|
|
||||||
u32 NumberOfFlags = 0;
|
|
||||||
const char *Buffer = nullptr;
|
|
||||||
uptr Pos = 0;
|
|
||||||
|
|
||||||
void reportFatalError(const char *Error);
|
|
||||||
void skipWhitespace();
|
|
||||||
void parseFlags();
|
|
||||||
void parseFlag();
|
|
||||||
bool runHandler(const char *Name, const char *Value, char Sep);
|
|
||||||
};
|
|
||||||
|
|
||||||
void reportUnrecognizedFlags();
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_FLAGS_PARSER_H_
|
|
237
Telegram/ThirdParty/scudo/fuchsia.cpp
vendored
237
Telegram/ThirdParty/scudo/fuchsia.cpp
vendored
|
@ -1,237 +0,0 @@
|
||||||
//===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#include <lib/sync/mutex.h> // for sync_mutex_t
|
|
||||||
#include <stdlib.h> // for getenv()
|
|
||||||
#include <zircon/compiler.h>
|
|
||||||
#include <zircon/process.h>
|
|
||||||
#include <zircon/sanitizer.h>
|
|
||||||
#include <zircon/status.h>
|
|
||||||
#include <zircon/syscalls.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
uptr getPageSize() { return _zx_system_get_page_size(); }
|
|
||||||
|
|
||||||
void NORETURN die() { __builtin_trap(); }
|
|
||||||
|
|
||||||
// We zero-initialize the Extra parameter of map(), make sure this is consistent
|
|
||||||
// with ZX_HANDLE_INVALID.
|
|
||||||
static_assert(ZX_HANDLE_INVALID == 0, "");
|
|
||||||
|
|
||||||
static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
|
|
||||||
uptr Size) {
|
|
||||||
char Error[128];
|
|
||||||
formatString(Error, sizeof(Error),
|
|
||||||
"SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
|
|
||||||
Size >> 10, zx_status_get_string(Status));
|
|
||||||
outputRaw(Error);
|
|
||||||
die();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
|
|
||||||
// Only scenario so far.
|
|
||||||
DCHECK(Data);
|
|
||||||
DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
|
|
||||||
|
|
||||||
const zx_status_t Status = _zx_vmar_allocate(
|
|
||||||
_zx_vmar_root_self(),
|
|
||||||
ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
|
|
||||||
Size, &Data->Vmar, &Data->VmarBase);
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmar_allocate", Size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return reinterpret_cast<void *>(Data->VmarBase);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
|
|
||||||
MapPlatformData *Data) {
|
|
||||||
DCHECK_EQ(Size % getPageSizeCached(), 0);
|
|
||||||
const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
|
|
||||||
|
|
||||||
// For MAP_NOACCESS, just allocate a Vmar and return.
|
|
||||||
if (Flags & MAP_NOACCESS)
|
|
||||||
return allocateVmar(Size, Data, AllowNoMem);
|
|
||||||
|
|
||||||
const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
|
|
||||||
? Data->Vmar
|
|
||||||
: _zx_vmar_root_self();
|
|
||||||
|
|
||||||
zx_status_t Status;
|
|
||||||
zx_handle_t Vmo;
|
|
||||||
uint64_t VmoSize = 0;
|
|
||||||
if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
|
|
||||||
// If a Vmo was specified, it's a resize operation.
|
|
||||||
CHECK(Addr);
|
|
||||||
DCHECK(Flags & MAP_RESIZABLE);
|
|
||||||
Vmo = Data->Vmo;
|
|
||||||
VmoSize = Data->VmoSize;
|
|
||||||
Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
|
|
||||||
if (Status != ZX_OK) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmo_set_size", VmoSize + Size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Otherwise, create a Vmo and set its name.
|
|
||||||
Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmo_create", Size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
_zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t P;
|
|
||||||
zx_vm_option_t MapFlags =
|
|
||||||
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
|
|
||||||
if (Addr)
|
|
||||||
DCHECK(Data);
|
|
||||||
const uint64_t Offset =
|
|
||||||
Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
|
|
||||||
if (Offset)
|
|
||||||
MapFlags |= ZX_VM_SPECIFIC;
|
|
||||||
Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmar_map", Size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Flags & MAP_PRECOMMIT) {
|
|
||||||
Status = _zx_vmar_op_range(Vmar, ZX_VMAR_OP_COMMIT, P, Size,
|
|
||||||
/*buffer=*/nullptr, /*buffer_size=*/0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// No need to track the Vmo if we don't intend on resizing it. Close it.
|
|
||||||
if (Flags & MAP_RESIZABLE) {
|
|
||||||
DCHECK(Data);
|
|
||||||
if (Data->Vmo == ZX_HANDLE_INVALID)
|
|
||||||
Data->Vmo = Vmo;
|
|
||||||
else
|
|
||||||
DCHECK_EQ(Data->Vmo, Vmo);
|
|
||||||
} else {
|
|
||||||
CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
|
|
||||||
}
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmar_op_range", Size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Data)
|
|
||||||
Data->VmoSize += Size;
|
|
||||||
|
|
||||||
return reinterpret_cast<void *>(P);
|
|
||||||
}
|
|
||||||
|
|
||||||
void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
|
|
||||||
if (Flags & UNMAP_ALL) {
|
|
||||||
DCHECK_NE(Data, nullptr);
|
|
||||||
const zx_handle_t Vmar = Data->Vmar;
|
|
||||||
DCHECK_NE(Vmar, _zx_vmar_root_self());
|
|
||||||
// Destroying the vmar effectively unmaps the whole mapping.
|
|
||||||
CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
|
|
||||||
CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
|
|
||||||
} else {
|
|
||||||
const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
|
|
||||||
? Data->Vmar
|
|
||||||
: _zx_vmar_root_self();
|
|
||||||
const zx_status_t Status =
|
|
||||||
_zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
|
|
||||||
if (UNLIKELY(Status != ZX_OK))
|
|
||||||
dieOnError(Status, "zx_vmar_unmap", Size);
|
|
||||||
}
|
|
||||||
if (Data) {
|
|
||||||
if (Data->Vmo != ZX_HANDLE_INVALID)
|
|
||||||
CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
|
|
||||||
memset(Data, 0, sizeof(*Data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
|
|
||||||
UNUSED MapPlatformData *Data) {
|
|
||||||
const zx_vm_option_t Prot =
|
|
||||||
(Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
|
|
||||||
DCHECK(Data);
|
|
||||||
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
|
|
||||||
const zx_status_t Status = _zx_vmar_protect(Data->Vmar, Prot, Addr, Size);
|
|
||||||
if (Status != ZX_OK)
|
|
||||||
dieOnError(Status, "zx_vmar_protect", Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
|
|
||||||
MapPlatformData *Data) {
|
|
||||||
// TODO: DCHECK the BaseAddress is consistent with the data in
|
|
||||||
// MapPlatformData.
|
|
||||||
DCHECK(Data);
|
|
||||||
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
|
|
||||||
DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
|
|
||||||
const zx_status_t Status =
|
|
||||||
_zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
const char *getEnv(const char *Name) { return getenv(Name); }
|
|
||||||
|
|
||||||
// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
|
|
||||||
// because the Fuchsia implementation of sync_mutex_t has clang thread safety
|
|
||||||
// annotations. Were we to apply proper capability annotations to the top level
|
|
||||||
// HybridMutex class itself, they would not be needed. As it stands, the
|
|
||||||
// thread analysis thinks that we are locking the mutex and accidentally leaving
|
|
||||||
// it locked on the way out.
|
|
||||||
bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
// Size and alignment must be compatible between both types.
|
|
||||||
return sync_mutex_trylock(&M) == ZX_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
sync_mutex_lock(&M);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
sync_mutex_unlock(&M);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {}
|
|
||||||
|
|
||||||
u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
|
|
||||||
u64 getMonotonicTimeFast() { return _zx_clock_get_monotonic(); }
|
|
||||||
|
|
||||||
u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
|
|
||||||
|
|
||||||
u32 getThreadID() { return 0; }
|
|
||||||
|
|
||||||
bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
|
|
||||||
static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, "");
|
|
||||||
if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
|
|
||||||
return false;
|
|
||||||
_zx_cprng_draw(Buffer, Length);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void outputRaw(const char *Buffer) {
|
|
||||||
__sanitizer_log_write(Buffer, strlen(Buffer));
|
|
||||||
}
|
|
||||||
|
|
||||||
void setAbortMessage(const char *Message) {}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_FUCHSIA
|
|
32
Telegram/ThirdParty/scudo/fuchsia.h
vendored
32
Telegram/ThirdParty/scudo/fuchsia.h
vendored
|
@ -1,32 +0,0 @@
|
||||||
//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_FUCHSIA_H_
|
|
||||||
#define SCUDO_FUCHSIA_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <zircon/types.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
struct MapPlatformData {
|
|
||||||
zx_handle_t Vmar;
|
|
||||||
zx_handle_t Vmo;
|
|
||||||
uintptr_t VmarBase;
|
|
||||||
uint64_t VmoSize;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_FUCHSIA
|
|
||||||
|
|
||||||
#endif // SCUDO_FUCHSIA_H_
|
|
12
Telegram/ThirdParty/scudo/fuzz/CMakeLists.txt
vendored
12
Telegram/ThirdParty/scudo/fuzz/CMakeLists.txt
vendored
|
@ -1,12 +0,0 @@
|
||||||
if (LLVM_USE_SANITIZE_COVERAGE)
|
|
||||||
add_executable(get_error_info_fuzzer
|
|
||||||
get_error_info_fuzzer.cpp)
|
|
||||||
set_target_properties(
|
|
||||||
get_error_info_fuzzer PROPERTIES FOLDER "Fuzzers")
|
|
||||||
target_compile_options(
|
|
||||||
get_error_info_fuzzer PRIVATE -fsanitize=fuzzer)
|
|
||||||
set_target_properties(
|
|
||||||
get_error_info_fuzzer PROPERTIES LINK_FLAGS -fsanitize=fuzzer)
|
|
||||||
target_include_directories(
|
|
||||||
get_error_info_fuzzer PRIVATE .. ../include)
|
|
||||||
endif()
|
|
|
@ -1,56 +0,0 @@
|
||||||
//===-- get_error_info_fuzzer.cpp -----------------------------------------===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#define SCUDO_FUZZ
|
|
||||||
#include "allocator_config.h"
|
|
||||||
#include "combined.h"
|
|
||||||
|
|
||||||
#include <fuzzer/FuzzedDataProvider.h>
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
|
|
||||||
using AllocatorT = scudo::Allocator<scudo::AndroidConfig>;
|
|
||||||
FuzzedDataProvider FDP(Data, Size);
|
|
||||||
|
|
||||||
uintptr_t FaultAddr = FDP.ConsumeIntegral<uintptr_t>();
|
|
||||||
uintptr_t MemoryAddr = FDP.ConsumeIntegral<uintptr_t>();
|
|
||||||
|
|
||||||
std::string MemoryAndTags =
|
|
||||||
FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
|
|
||||||
const char *Memory = MemoryAndTags.c_str();
|
|
||||||
// Assume 16-byte alignment.
|
|
||||||
size_t MemorySize = (MemoryAndTags.length() / 17) * 16;
|
|
||||||
const char *MemoryTags = Memory + MemorySize;
|
|
||||||
|
|
||||||
std::string StackDepotBytes =
|
|
||||||
FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
|
|
||||||
std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0);
|
|
||||||
for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size();
|
|
||||||
++i) {
|
|
||||||
StackDepot[i] = StackDepotBytes[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string RegionInfoBytes =
|
|
||||||
FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
|
|
||||||
std::vector<char> RegionInfo(AllocatorT::getRegionInfoArraySize(), 0);
|
|
||||||
for (size_t i = 0; i < RegionInfoBytes.length() && i < RegionInfo.size();
|
|
||||||
++i) {
|
|
||||||
RegionInfo[i] = RegionInfoBytes[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
|
|
||||||
|
|
||||||
scudo_error_info ErrorInfo;
|
|
||||||
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
|
|
||||||
RegionInfo.data(), RingBufferBytes.data(),
|
|
||||||
RingBufferBytes.size(), Memory, MemoryTags,
|
|
||||||
MemoryAddr, MemorySize);
|
|
||||||
return 0;
|
|
||||||
}
|
|
182
Telegram/ThirdParty/scudo/include/scudo/interface.h
vendored
182
Telegram/ThirdParty/scudo/include/scudo/interface.h
vendored
|
@ -1,182 +0,0 @@
|
||||||
//===-- scudo/interface.h ---------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_INTERFACE_H_
|
|
||||||
#define SCUDO_INTERFACE_H_
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
|
|
||||||
__attribute__((weak)) const char *__scudo_default_options(void);
|
|
||||||
|
|
||||||
// Post-allocation & pre-deallocation hooks.
|
|
||||||
__attribute__((weak)) void __scudo_allocate_hook(void *ptr, size_t size);
|
|
||||||
__attribute__((weak)) void __scudo_deallocate_hook(void *ptr);
|
|
||||||
|
|
||||||
// `realloc` involves both deallocation and allocation but they are not reported
|
|
||||||
// atomically. In one specific case which may keep taking a snapshot right in
|
|
||||||
// the middle of `realloc` reporting the deallocation and allocation, it may
|
|
||||||
// confuse the user by missing memory from `realloc`. To alleviate that case,
|
|
||||||
// define the two `realloc` hooks to get the knowledge of the bundled hook
|
|
||||||
// calls. These hooks are optional and should only be used when a hooks user
|
|
||||||
// wants to track reallocs more closely.
|
|
||||||
//
|
|
||||||
// See more details in the comment of `realloc` in wrapper_c.inc.
|
|
||||||
__attribute__((weak)) void
|
|
||||||
__scudo_realloc_allocate_hook(void *old_ptr, void *new_ptr, size_t size);
|
|
||||||
__attribute__((weak)) void __scudo_realloc_deallocate_hook(void *old_ptr);
|
|
||||||
|
|
||||||
void __scudo_print_stats(void);
|
|
||||||
|
|
||||||
typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
|
|
||||||
|
|
||||||
// Determine the likely cause of a tag check fault or other memory protection
|
|
||||||
// error on a system with memory tagging support. The results are returned via
|
|
||||||
// the error_info data structure. Up to three possible causes are returned in
|
|
||||||
// the reports array, in decreasing order of probability. The remaining elements
|
|
||||||
// of reports are zero-initialized.
|
|
||||||
//
|
|
||||||
// This function may be called from a different process from the one that
|
|
||||||
// crashed. In this case, various data structures must be copied from the
|
|
||||||
// crashing process to the process that analyzes the crash.
|
|
||||||
//
|
|
||||||
// This interface is not guaranteed to be stable and may change at any time.
|
|
||||||
// Furthermore, the version of scudo in the crashing process must be the same as
|
|
||||||
// the version in the process that analyzes the crash.
|
|
||||||
//
|
|
||||||
// fault_addr is the fault address. On aarch64 this is available in the system
|
|
||||||
// register FAR_ELx, or siginfo.si_addr in Linux 5.11 or above. This address
|
|
||||||
// must include the pointer tag; this is available if SA_EXPOSE_TAGBITS was set
|
|
||||||
// in sigaction.sa_flags when the signal handler was registered. Note that the
|
|
||||||
// kernel strips the tag from the field sigcontext.fault_address, so this
|
|
||||||
// address is not suitable to be passed as fault_addr.
|
|
||||||
//
|
|
||||||
// stack_depot is a pointer to the stack depot data structure, which may be
|
|
||||||
// obtained by calling the function __scudo_get_stack_depot_addr() in the
|
|
||||||
// crashing process. The size of the stack depot is available by calling the
|
|
||||||
// function __scudo_get_stack_depot_size().
|
|
||||||
//
|
|
||||||
// region_info is a pointer to the region info data structure, which may be
|
|
||||||
// obtained by calling the function __scudo_get_region_info_addr() in the
|
|
||||||
// crashing process. The size of the region info is available by calling the
|
|
||||||
// function __scudo_get_region_info_size().
|
|
||||||
//
|
|
||||||
// memory is a pointer to a region of memory surrounding the fault address.
|
|
||||||
// The more memory available via this pointer, the more likely it is that the
|
|
||||||
// function will be able to analyze a crash correctly. It is recommended to
|
|
||||||
// provide an amount of memory equal to 16 * the primary allocator's largest
|
|
||||||
// size class either side of the fault address.
|
|
||||||
//
|
|
||||||
// memory_tags is a pointer to an array of memory tags for the memory accessed
|
|
||||||
// via memory. Each byte of this array corresponds to a region of memory of size
|
|
||||||
// equal to the architecturally defined memory tag granule size (16 on aarch64).
|
|
||||||
//
|
|
||||||
// memory_addr is the start address of memory in the crashing process's address
|
|
||||||
// space.
|
|
||||||
//
|
|
||||||
// memory_size is the size of the memory region referred to by the memory
|
|
||||||
// pointer.
|
|
||||||
void __scudo_get_error_info(struct scudo_error_info *error_info,
|
|
||||||
uintptr_t fault_addr, const char *stack_depot,
|
|
||||||
size_t stack_depot_size, const char *region_info,
|
|
||||||
const char *ring_buffer, size_t ring_buffer_size,
|
|
||||||
const char *memory, const char *memory_tags,
|
|
||||||
uintptr_t memory_addr, size_t memory_size);
|
|
||||||
|
|
||||||
enum scudo_error_type {
|
|
||||||
UNKNOWN,
|
|
||||||
USE_AFTER_FREE,
|
|
||||||
BUFFER_OVERFLOW,
|
|
||||||
BUFFER_UNDERFLOW,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct scudo_error_report {
|
|
||||||
enum scudo_error_type error_type;
|
|
||||||
|
|
||||||
uintptr_t allocation_address;
|
|
||||||
uintptr_t allocation_size;
|
|
||||||
|
|
||||||
uint32_t allocation_tid;
|
|
||||||
uintptr_t allocation_trace[64];
|
|
||||||
|
|
||||||
uint32_t deallocation_tid;
|
|
||||||
uintptr_t deallocation_trace[64];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct scudo_error_info {
|
|
||||||
struct scudo_error_report reports[3];
|
|
||||||
};
|
|
||||||
|
|
||||||
const char *__scudo_get_stack_depot_addr(void);
|
|
||||||
size_t __scudo_get_stack_depot_size(void);
|
|
||||||
|
|
||||||
const char *__scudo_get_region_info_addr(void);
|
|
||||||
size_t __scudo_get_region_info_size(void);
|
|
||||||
|
|
||||||
const char *__scudo_get_ring_buffer_addr(void);
|
|
||||||
size_t __scudo_get_ring_buffer_size(void);
|
|
||||||
|
|
||||||
#ifndef M_DECAY_TIME
|
|
||||||
#define M_DECAY_TIME -100
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef M_PURGE
|
|
||||||
#define M_PURGE -101
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef M_PURGE_ALL
|
|
||||||
#define M_PURGE_ALL -104
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Tune the allocator's choice of memory tags to make it more likely that
|
|
||||||
// a certain class of memory errors will be detected. The value argument should
|
|
||||||
// be one of the M_MEMTAG_TUNING_* constants below.
|
|
||||||
#ifndef M_MEMTAG_TUNING
|
|
||||||
#define M_MEMTAG_TUNING -102
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Per-thread memory initialization tuning. The value argument should be one of:
|
|
||||||
// 1: Disable automatic heap initialization and, where possible, memory tagging,
|
|
||||||
// on this thread.
|
|
||||||
// 0: Normal behavior.
|
|
||||||
#ifndef M_THREAD_DISABLE_MEM_INIT
|
|
||||||
#define M_THREAD_DISABLE_MEM_INIT -103
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef M_CACHE_COUNT_MAX
|
|
||||||
#define M_CACHE_COUNT_MAX -200
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef M_CACHE_SIZE_MAX
|
|
||||||
#define M_CACHE_SIZE_MAX -201
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef M_TSDS_COUNT_MAX
|
|
||||||
#define M_TSDS_COUNT_MAX -202
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Tune for buffer overflows.
|
|
||||||
#ifndef M_MEMTAG_TUNING_BUFFER_OVERFLOW
|
|
||||||
#define M_MEMTAG_TUNING_BUFFER_OVERFLOW 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Tune for use-after-free.
|
|
||||||
#ifndef M_MEMTAG_TUNING_UAF
|
|
||||||
#define M_MEMTAG_TUNING_UAF 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Print internal stats to the log.
|
|
||||||
#ifndef M_LOG_STATS
|
|
||||||
#define M_LOG_STATS -205
|
|
||||||
#endif
|
|
||||||
|
|
||||||
} // extern "C"
|
|
||||||
|
|
||||||
#endif // SCUDO_INTERFACE_H_
|
|
166
Telegram/ThirdParty/scudo/internal_defs.h
vendored
166
Telegram/ThirdParty/scudo/internal_defs.h
vendored
|
@ -1,166 +0,0 @@
|
||||||
//===-- internal_defs.h -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_INTERNAL_DEFS_H_
|
|
||||||
#define SCUDO_INTERNAL_DEFS_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
#ifndef SCUDO_DEBUG
|
|
||||||
#define SCUDO_DEBUG 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
|
|
||||||
|
|
||||||
// String related macros.
|
|
||||||
|
|
||||||
#define STRINGIFY_(S) #S
|
|
||||||
#define STRINGIFY(S) STRINGIFY_(S)
|
|
||||||
#define CONCATENATE_(S, C) S##C
|
|
||||||
#define CONCATENATE(S, C) CONCATENATE_(S, C)
|
|
||||||
|
|
||||||
// Attributes & builtins related macros.
|
|
||||||
|
|
||||||
#define INTERFACE __attribute__((visibility("default")))
|
|
||||||
#define HIDDEN __attribute__((visibility("hidden")))
|
|
||||||
#define WEAK __attribute__((weak))
|
|
||||||
#define ALWAYS_INLINE inline __attribute__((always_inline))
|
|
||||||
#define ALIAS(X) __attribute__((alias(X)))
|
|
||||||
#define FORMAT(F, A) __attribute__((format(printf, F, A)))
|
|
||||||
#define NOINLINE __attribute__((noinline))
|
|
||||||
#define NORETURN __attribute__((noreturn))
|
|
||||||
#define LIKELY(X) __builtin_expect(!!(X), 1)
|
|
||||||
#define UNLIKELY(X) __builtin_expect(!!(X), 0)
|
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
|
||||||
// __builtin_prefetch(X) generates prefetchnt0 on x86
|
|
||||||
#define PREFETCH(X) __asm__("prefetchnta (%0)" : : "r"(X))
|
|
||||||
#else
|
|
||||||
#define PREFETCH(X) __builtin_prefetch(X)
|
|
||||||
#endif
|
|
||||||
#define UNUSED __attribute__((unused))
|
|
||||||
#define USED __attribute__((used))
|
|
||||||
#define NOEXCEPT noexcept
|
|
||||||
|
|
||||||
// This check is only available on Clang. This is essentially an alias of
|
|
||||||
// C++20's 'constinit' specifier which will take care of this when (if?) we can
|
|
||||||
// ask all libc's that use Scudo to compile us with C++20. Dynamic
|
|
||||||
// initialization is bad; Scudo is designed to be lazy-initializated on the
|
|
||||||
// first call to malloc/free (and friends), and this generally happens in the
|
|
||||||
// loader somewhere in libdl's init. After the loader is done, control is
|
|
||||||
// transferred to libc's initialization, and the dynamic initializers are run.
|
|
||||||
// If there's a dynamic initializer for Scudo, then it will clobber the
|
|
||||||
// already-initialized Scudo, and re-initialize all its members back to default
|
|
||||||
// values, causing various explosions. Unfortunately, marking
|
|
||||||
// scudo::Allocator<>'s constructor as 'constexpr' isn't sufficient to prevent
|
|
||||||
// dynamic initialization, as default initialization is fine under 'constexpr'
|
|
||||||
// (but not 'constinit'). Clang at -O0, and gcc at all opt levels will emit a
|
|
||||||
// dynamic initializer for any constant-initialized variables if there is a mix
|
|
||||||
// of default-initialized and constant-initialized variables.
|
|
||||||
//
|
|
||||||
// If you're looking at this because your build failed, you probably introduced
|
|
||||||
// a new member to scudo::Allocator<> (possibly transiently) that didn't have an
|
|
||||||
// initializer. The fix is easy - just add one.
|
|
||||||
#if defined(__has_attribute)
|
|
||||||
#if __has_attribute(require_constant_initialization)
|
|
||||||
#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION \
|
|
||||||
__attribute__((__require_constant_initialization__))
|
|
||||||
#else
|
|
||||||
#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
typedef uintptr_t uptr;
|
|
||||||
typedef uint8_t u8;
|
|
||||||
typedef uint16_t u16;
|
|
||||||
typedef uint32_t u32;
|
|
||||||
typedef uint64_t u64;
|
|
||||||
typedef intptr_t sptr;
|
|
||||||
typedef int8_t s8;
|
|
||||||
typedef int16_t s16;
|
|
||||||
typedef int32_t s32;
|
|
||||||
typedef int64_t s64;
|
|
||||||
|
|
||||||
// The following two functions have platform specific implementations.
|
|
||||||
void outputRaw(const char *Buffer);
|
|
||||||
void NORETURN die();
|
|
||||||
|
|
||||||
#define RAW_CHECK_MSG(Expr, Msg) \
|
|
||||||
do { \
|
|
||||||
if (UNLIKELY(!(Expr))) { \
|
|
||||||
outputRaw(Msg); \
|
|
||||||
die(); \
|
|
||||||
} \
|
|
||||||
} while (false)
|
|
||||||
|
|
||||||
#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr)
|
|
||||||
|
|
||||||
void NORETURN reportCheckFailed(const char *File, int Line,
|
|
||||||
const char *Condition, u64 Value1, u64 Value2);
|
|
||||||
#define CHECK_IMPL(C1, Op, C2) \
|
|
||||||
do { \
|
|
||||||
if (UNLIKELY(!(C1 Op C2))) { \
|
|
||||||
scudo::reportCheckFailed(__FILE__, __LINE__, #C1 " " #Op " " #C2, \
|
|
||||||
(scudo::u64)C1, (scudo::u64)C2); \
|
|
||||||
scudo::die(); \
|
|
||||||
} \
|
|
||||||
} while (false)
|
|
||||||
|
|
||||||
#define CHECK(A) CHECK_IMPL((A), !=, 0)
|
|
||||||
#define CHECK_EQ(A, B) CHECK_IMPL((A), ==, (B))
|
|
||||||
#define CHECK_NE(A, B) CHECK_IMPL((A), !=, (B))
|
|
||||||
#define CHECK_LT(A, B) CHECK_IMPL((A), <, (B))
|
|
||||||
#define CHECK_LE(A, B) CHECK_IMPL((A), <=, (B))
|
|
||||||
#define CHECK_GT(A, B) CHECK_IMPL((A), >, (B))
|
|
||||||
#define CHECK_GE(A, B) CHECK_IMPL((A), >=, (B))
|
|
||||||
|
|
||||||
#if SCUDO_DEBUG
|
|
||||||
#define DCHECK(A) CHECK(A)
|
|
||||||
#define DCHECK_EQ(A, B) CHECK_EQ(A, B)
|
|
||||||
#define DCHECK_NE(A, B) CHECK_NE(A, B)
|
|
||||||
#define DCHECK_LT(A, B) CHECK_LT(A, B)
|
|
||||||
#define DCHECK_LE(A, B) CHECK_LE(A, B)
|
|
||||||
#define DCHECK_GT(A, B) CHECK_GT(A, B)
|
|
||||||
#define DCHECK_GE(A, B) CHECK_GE(A, B)
|
|
||||||
#else
|
|
||||||
#define DCHECK(A) \
|
|
||||||
do { \
|
|
||||||
} while (false && (A))
|
|
||||||
#define DCHECK_EQ(A, B) \
|
|
||||||
do { \
|
|
||||||
} while (false && (A) == (B))
|
|
||||||
#define DCHECK_NE(A, B) \
|
|
||||||
do { \
|
|
||||||
} while (false && (A) != (B))
|
|
||||||
#define DCHECK_LT(A, B) \
|
|
||||||
do { \
|
|
||||||
} while (false && (A) < (B))
|
|
||||||
#define DCHECK_LE(A, B) \
|
|
||||||
do { \
|
|
||||||
} while (false && (A) <= (B))
|
|
||||||
#define DCHECK_GT(A, B) \
|
|
||||||
do { \
|
|
||||||
} while (false && (A) > (B))
|
|
||||||
#define DCHECK_GE(A, B) \
|
|
||||||
do { \
|
|
||||||
} while (false && (A) >= (B))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// The superfluous die() call effectively makes this macro NORETURN.
|
|
||||||
#define UNREACHABLE(Msg) \
|
|
||||||
do { \
|
|
||||||
CHECK(0 && Msg); \
|
|
||||||
die(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_INTERNAL_DEFS_H_
|
|
242
Telegram/ThirdParty/scudo/linux.cpp
vendored
242
Telegram/ThirdParty/scudo/linux.cpp
vendored
|
@ -1,242 +0,0 @@
|
||||||
//===-- linux.cpp -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "internal_defs.h"
|
|
||||||
#include "linux.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "report_linux.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <linux/futex.h>
|
|
||||||
#include <sched.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/syscall.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <time.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
#include <sys/prctl.h>
|
|
||||||
// Definitions of prctl arguments to set a vma name in Android kernels.
|
|
||||||
#define ANDROID_PR_SET_VMA 0x53564d41
|
|
||||||
#define ANDROID_PR_SET_VMA_ANON_NAME 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
|
|
||||||
|
|
||||||
void NORETURN die() { abort(); }
|
|
||||||
|
|
||||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
|
||||||
void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
|
|
||||||
UNUSED MapPlatformData *Data) {
|
|
||||||
int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
||||||
int MmapProt;
|
|
||||||
if (Flags & MAP_NOACCESS) {
|
|
||||||
MmapFlags |= MAP_NORESERVE;
|
|
||||||
MmapProt = PROT_NONE;
|
|
||||||
} else {
|
|
||||||
MmapProt = PROT_READ | PROT_WRITE;
|
|
||||||
}
|
|
||||||
#if defined(__aarch64__)
|
|
||||||
#ifndef PROT_MTE
|
|
||||||
#define PROT_MTE 0x20
|
|
||||||
#endif
|
|
||||||
if (Flags & MAP_MEMTAG)
|
|
||||||
MmapProt |= PROT_MTE;
|
|
||||||
#endif
|
|
||||||
if (Addr)
|
|
||||||
MmapFlags |= MAP_FIXED;
|
|
||||||
void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
|
|
||||||
if (P == MAP_FAILED) {
|
|
||||||
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
|
|
||||||
reportMapError(errno == ENOMEM ? Size : 0);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
if (Name)
|
|
||||||
prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
|
|
||||||
#endif
|
|
||||||
return P;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
|
||||||
void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
|
|
||||||
UNUSED MapPlatformData *Data) {
|
|
||||||
if (munmap(Addr, Size) != 0)
|
|
||||||
reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
|
||||||
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
|
|
||||||
UNUSED MapPlatformData *Data) {
|
|
||||||
int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
|
|
||||||
if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
|
|
||||||
reportProtectError(Addr, Size, Prot);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
|
||||||
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
|
|
||||||
UNUSED MapPlatformData *Data) {
|
|
||||||
void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
|
|
||||||
|
|
||||||
while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calling getenv should be fine (c)(tm) at any time.
|
|
||||||
const char *getEnv(const char *Name) { return getenv(Name); }
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
|
|
||||||
}
|
|
||||||
|
|
||||||
bool HybridMutex::tryLock() {
|
|
||||||
return atomic_compare_exchange_strong(&M, Unlocked, Locked,
|
|
||||||
memory_order_acquire) == Unlocked;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following is based on https://akkadia.org/drepper/futex.pdf.
|
|
||||||
void HybridMutex::lockSlow() {
|
|
||||||
u32 V = atomic_compare_exchange_strong(&M, Unlocked, Locked,
|
|
||||||
memory_order_acquire);
|
|
||||||
if (V == Unlocked)
|
|
||||||
return;
|
|
||||||
if (V != Sleeping)
|
|
||||||
V = atomic_exchange(&M, Sleeping, memory_order_acquire);
|
|
||||||
while (V != Unlocked) {
|
|
||||||
syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
|
|
||||||
nullptr, nullptr, 0);
|
|
||||||
V = atomic_exchange(&M, Sleeping, memory_order_acquire);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void HybridMutex::unlock() {
|
|
||||||
if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
|
|
||||||
atomic_store(&M, Unlocked, memory_order_release);
|
|
||||||
syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
|
|
||||||
nullptr, nullptr, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void HybridMutex::assertHeldImpl() {
|
|
||||||
CHECK(atomic_load(&M, memory_order_acquire) != Unlocked);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 getMonotonicTime() {
|
|
||||||
timespec TS;
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &TS);
|
|
||||||
return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
|
|
||||||
static_cast<u64>(TS.tv_nsec);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 getMonotonicTimeFast() {
|
|
||||||
#if defined(CLOCK_MONOTONIC_COARSE)
|
|
||||||
timespec TS;
|
|
||||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
|
|
||||||
return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
|
|
||||||
static_cast<u64>(TS.tv_nsec);
|
|
||||||
#else
|
|
||||||
return getMonotonicTime();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 getNumberOfCPUs() {
|
|
||||||
cpu_set_t CPUs;
|
|
||||||
// sched_getaffinity can fail for a variety of legitimate reasons (lack of
|
|
||||||
// CAP_SYS_NICE, syscall filtering, etc), in which case we shall return 0.
|
|
||||||
if (sched_getaffinity(0, sizeof(cpu_set_t), &CPUs) != 0)
|
|
||||||
return 0;
|
|
||||||
return static_cast<u32>(CPU_COUNT(&CPUs));
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 getThreadID() {
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
return static_cast<u32>(gettid());
|
|
||||||
#else
|
|
||||||
return static_cast<u32>(syscall(SYS_gettid));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
// Blocking is possibly unused if the getrandom block is not compiled in.
|
|
||||||
bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
|
|
||||||
if (!Buffer || !Length || Length > MaxRandomLength)
|
|
||||||
return false;
|
|
||||||
ssize_t ReadBytes;
|
|
||||||
#if defined(SYS_getrandom)
|
|
||||||
#if !defined(GRND_NONBLOCK)
|
|
||||||
#define GRND_NONBLOCK 1
|
|
||||||
#endif
|
|
||||||
// Up to 256 bytes, getrandom will not be interrupted.
|
|
||||||
ReadBytes =
|
|
||||||
syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK);
|
|
||||||
if (ReadBytes == static_cast<ssize_t>(Length))
|
|
||||||
return true;
|
|
||||||
#endif // defined(SYS_getrandom)
|
|
||||||
// Up to 256 bytes, a read off /dev/urandom will not be interrupted.
|
|
||||||
// Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
|
|
||||||
const int FileDesc = open("/dev/urandom", O_RDONLY);
|
|
||||||
if (FileDesc == -1)
|
|
||||||
return false;
|
|
||||||
ReadBytes = read(FileDesc, Buffer, Length);
|
|
||||||
close(FileDesc);
|
|
||||||
return (ReadBytes == static_cast<ssize_t>(Length));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocation free syslog-like API.
|
|
||||||
extern "C" WEAK int async_safe_write_log(int pri, const char *tag,
|
|
||||||
const char *msg);
|
|
||||||
|
|
||||||
void outputRaw(const char *Buffer) {
|
|
||||||
if (&async_safe_write_log) {
|
|
||||||
constexpr s32 AndroidLogInfo = 4;
|
|
||||||
constexpr uptr MaxLength = 1024U;
|
|
||||||
char LocalBuffer[MaxLength];
|
|
||||||
while (strlen(Buffer) > MaxLength) {
|
|
||||||
uptr P;
|
|
||||||
for (P = MaxLength - 1; P > 0; P--) {
|
|
||||||
if (Buffer[P] == '\n') {
|
|
||||||
memcpy(LocalBuffer, Buffer, P);
|
|
||||||
LocalBuffer[P] = '\0';
|
|
||||||
async_safe_write_log(AndroidLogInfo, "scudo", LocalBuffer);
|
|
||||||
Buffer = &Buffer[P + 1];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If no newline was found, just log the buffer.
|
|
||||||
if (P == 0)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
async_safe_write_log(AndroidLogInfo, "scudo", Buffer);
|
|
||||||
} else {
|
|
||||||
(void)write(2, Buffer, strlen(Buffer));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extern "C" WEAK void android_set_abort_message(const char *);
|
|
||||||
|
|
||||||
void setAbortMessage(const char *Message) {
|
|
||||||
if (&android_set_abort_message)
|
|
||||||
android_set_abort_message(Message);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX
|
|
25
Telegram/ThirdParty/scudo/linux.h
vendored
25
Telegram/ThirdParty/scudo/linux.h
vendored
|
@ -1,25 +0,0 @@
|
||||||
//===-- linux.h -------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_LINUX_H_
|
|
||||||
#define SCUDO_LINUX_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// MapPlatformData is unused on Linux, define it as a minimally sized structure.
|
|
||||||
struct MapPlatformData {};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX_H_
|
|
240
Telegram/ThirdParty/scudo/list.h
vendored
240
Telegram/ThirdParty/scudo/list.h
vendored
|
@ -1,240 +0,0 @@
|
||||||
//===-- list.h --------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_LIST_H_
|
|
||||||
#define SCUDO_LIST_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// Intrusive POD singly and doubly linked list.
|
|
||||||
// An object with all zero fields should represent a valid empty list. clear()
|
|
||||||
// should be called on all non-zero-initialized objects before using.
|
|
||||||
|
|
||||||
template <class T> class IteratorBase {
|
|
||||||
public:
|
|
||||||
explicit IteratorBase(T *CurrentT) : Current(CurrentT) {}
|
|
||||||
IteratorBase &operator++() {
|
|
||||||
Current = Current->Next;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
bool operator!=(IteratorBase Other) const { return Current != Other.Current; }
|
|
||||||
T &operator*() { return *Current; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
T *Current;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class T> struct IntrusiveList {
|
|
||||||
bool empty() const { return Size == 0; }
|
|
||||||
uptr size() const { return Size; }
|
|
||||||
|
|
||||||
T *front() { return First; }
|
|
||||||
const T *front() const { return First; }
|
|
||||||
T *back() { return Last; }
|
|
||||||
const T *back() const { return Last; }
|
|
||||||
|
|
||||||
void clear() {
|
|
||||||
First = Last = nullptr;
|
|
||||||
Size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef IteratorBase<T> Iterator;
|
|
||||||
typedef IteratorBase<const T> ConstIterator;
|
|
||||||
|
|
||||||
Iterator begin() { return Iterator(First); }
|
|
||||||
Iterator end() { return Iterator(nullptr); }
|
|
||||||
|
|
||||||
ConstIterator begin() const { return ConstIterator(First); }
|
|
||||||
ConstIterator end() const { return ConstIterator(nullptr); }
|
|
||||||
|
|
||||||
void checkConsistency() const;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
uptr Size = 0;
|
|
||||||
T *First = nullptr;
|
|
||||||
T *Last = nullptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class T> void IntrusiveList<T>::checkConsistency() const {
|
|
||||||
if (Size == 0) {
|
|
||||||
CHECK_EQ(First, nullptr);
|
|
||||||
CHECK_EQ(Last, nullptr);
|
|
||||||
} else {
|
|
||||||
uptr Count = 0;
|
|
||||||
for (T *I = First;; I = I->Next) {
|
|
||||||
Count++;
|
|
||||||
if (I == Last)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
CHECK_EQ(this->size(), Count);
|
|
||||||
CHECK_EQ(Last->Next, nullptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class T> struct SinglyLinkedList : public IntrusiveList<T> {
|
|
||||||
using IntrusiveList<T>::First;
|
|
||||||
using IntrusiveList<T>::Last;
|
|
||||||
using IntrusiveList<T>::Size;
|
|
||||||
using IntrusiveList<T>::empty;
|
|
||||||
|
|
||||||
void push_back(T *X) {
|
|
||||||
X->Next = nullptr;
|
|
||||||
if (empty())
|
|
||||||
First = X;
|
|
||||||
else
|
|
||||||
Last->Next = X;
|
|
||||||
Last = X;
|
|
||||||
Size++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void push_front(T *X) {
|
|
||||||
if (empty())
|
|
||||||
Last = X;
|
|
||||||
X->Next = First;
|
|
||||||
First = X;
|
|
||||||
Size++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void pop_front() {
|
|
||||||
DCHECK(!empty());
|
|
||||||
First = First->Next;
|
|
||||||
if (!First)
|
|
||||||
Last = nullptr;
|
|
||||||
Size--;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert X next to Prev
|
|
||||||
void insert(T *Prev, T *X) {
|
|
||||||
DCHECK(!empty());
|
|
||||||
DCHECK_NE(Prev, nullptr);
|
|
||||||
DCHECK_NE(X, nullptr);
|
|
||||||
X->Next = Prev->Next;
|
|
||||||
Prev->Next = X;
|
|
||||||
if (Last == Prev)
|
|
||||||
Last = X;
|
|
||||||
++Size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void extract(T *Prev, T *X) {
|
|
||||||
DCHECK(!empty());
|
|
||||||
DCHECK_NE(Prev, nullptr);
|
|
||||||
DCHECK_NE(X, nullptr);
|
|
||||||
DCHECK_EQ(Prev->Next, X);
|
|
||||||
Prev->Next = X->Next;
|
|
||||||
if (Last == X)
|
|
||||||
Last = Prev;
|
|
||||||
Size--;
|
|
||||||
}
|
|
||||||
|
|
||||||
void append_back(SinglyLinkedList<T> *L) {
|
|
||||||
DCHECK_NE(this, L);
|
|
||||||
if (L->empty())
|
|
||||||
return;
|
|
||||||
if (empty()) {
|
|
||||||
*this = *L;
|
|
||||||
} else {
|
|
||||||
Last->Next = L->First;
|
|
||||||
Last = L->Last;
|
|
||||||
Size += L->size();
|
|
||||||
}
|
|
||||||
L->clear();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class T> struct DoublyLinkedList : IntrusiveList<T> {
|
|
||||||
using IntrusiveList<T>::First;
|
|
||||||
using IntrusiveList<T>::Last;
|
|
||||||
using IntrusiveList<T>::Size;
|
|
||||||
using IntrusiveList<T>::empty;
|
|
||||||
|
|
||||||
void push_front(T *X) {
|
|
||||||
X->Prev = nullptr;
|
|
||||||
if (empty()) {
|
|
||||||
Last = X;
|
|
||||||
} else {
|
|
||||||
DCHECK_EQ(First->Prev, nullptr);
|
|
||||||
First->Prev = X;
|
|
||||||
}
|
|
||||||
X->Next = First;
|
|
||||||
First = X;
|
|
||||||
Size++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inserts X before Y.
|
|
||||||
void insert(T *X, T *Y) {
|
|
||||||
if (Y == First)
|
|
||||||
return push_front(X);
|
|
||||||
T *Prev = Y->Prev;
|
|
||||||
// This is a hard CHECK to ensure consistency in the event of an intentional
|
|
||||||
// corruption of Y->Prev, to prevent a potential write-{4,8}.
|
|
||||||
CHECK_EQ(Prev->Next, Y);
|
|
||||||
Prev->Next = X;
|
|
||||||
X->Prev = Prev;
|
|
||||||
X->Next = Y;
|
|
||||||
Y->Prev = X;
|
|
||||||
Size++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void push_back(T *X) {
|
|
||||||
X->Next = nullptr;
|
|
||||||
if (empty()) {
|
|
||||||
First = X;
|
|
||||||
} else {
|
|
||||||
DCHECK_EQ(Last->Next, nullptr);
|
|
||||||
Last->Next = X;
|
|
||||||
}
|
|
||||||
X->Prev = Last;
|
|
||||||
Last = X;
|
|
||||||
Size++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void pop_front() {
|
|
||||||
DCHECK(!empty());
|
|
||||||
First = First->Next;
|
|
||||||
if (!First)
|
|
||||||
Last = nullptr;
|
|
||||||
else
|
|
||||||
First->Prev = nullptr;
|
|
||||||
Size--;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The consistency of the adjacent links is aggressively checked in order to
|
|
||||||
// catch potential corruption attempts, that could yield a mirrored
|
|
||||||
// write-{4,8} primitive. nullptr checks are deemed less vital.
|
|
||||||
void remove(T *X) {
|
|
||||||
T *Prev = X->Prev;
|
|
||||||
T *Next = X->Next;
|
|
||||||
if (Prev) {
|
|
||||||
CHECK_EQ(Prev->Next, X);
|
|
||||||
Prev->Next = Next;
|
|
||||||
}
|
|
||||||
if (Next) {
|
|
||||||
CHECK_EQ(Next->Prev, X);
|
|
||||||
Next->Prev = Prev;
|
|
||||||
}
|
|
||||||
if (First == X) {
|
|
||||||
DCHECK_EQ(Prev, nullptr);
|
|
||||||
First = Next;
|
|
||||||
} else {
|
|
||||||
DCHECK_NE(Prev, nullptr);
|
|
||||||
}
|
|
||||||
if (Last == X) {
|
|
||||||
DCHECK_EQ(Next, nullptr);
|
|
||||||
Last = Prev;
|
|
||||||
} else {
|
|
||||||
DCHECK_NE(Next, nullptr);
|
|
||||||
}
|
|
||||||
Size--;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LIST_H_
|
|
189
Telegram/ThirdParty/scudo/local_cache.h
vendored
189
Telegram/ThirdParty/scudo/local_cache.h
vendored
|
@ -1,189 +0,0 @@
|
||||||
//===-- local_cache.h -------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_LOCAL_CACHE_H_
|
|
||||||
#define SCUDO_LOCAL_CACHE_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
#include "list.h"
|
|
||||||
#include "platform.h"
|
|
||||||
#include "report.h"
|
|
||||||
#include "stats.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
|
|
||||||
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
|
|
||||||
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
|
|
||||||
|
|
||||||
void init(GlobalStats *S, SizeClassAllocator *A) {
|
|
||||||
DCHECK(isEmpty());
|
|
||||||
Stats.init();
|
|
||||||
if (LIKELY(S))
|
|
||||||
S->link(&Stats);
|
|
||||||
Allocator = A;
|
|
||||||
initCache();
|
|
||||||
}
|
|
||||||
|
|
||||||
void destroy(GlobalStats *S) {
|
|
||||||
drain();
|
|
||||||
if (LIKELY(S))
|
|
||||||
S->unlink(&Stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *allocate(uptr ClassId) {
|
|
||||||
DCHECK_LT(ClassId, NumClasses);
|
|
||||||
PerClass *C = &PerClassArray[ClassId];
|
|
||||||
if (C->Count == 0) {
|
|
||||||
// Refill half of the number of max cached.
|
|
||||||
DCHECK_GT(C->MaxCount / 2, 0U);
|
|
||||||
if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
|
|
||||||
return nullptr;
|
|
||||||
DCHECK_GT(C->Count, 0);
|
|
||||||
}
|
|
||||||
// We read ClassSize first before accessing Chunks because it's adjacent to
|
|
||||||
// Count, while Chunks might be further off (depending on Count). That keeps
|
|
||||||
// the memory accesses in close quarters.
|
|
||||||
const uptr ClassSize = C->ClassSize;
|
|
||||||
CompactPtrT CompactP = C->Chunks[--C->Count];
|
|
||||||
Stats.add(StatAllocated, ClassSize);
|
|
||||||
Stats.sub(StatFree, ClassSize);
|
|
||||||
return Allocator->decompactPtr(ClassId, CompactP);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool deallocate(uptr ClassId, void *P) {
|
|
||||||
CHECK_LT(ClassId, NumClasses);
|
|
||||||
PerClass *C = &PerClassArray[ClassId];
|
|
||||||
|
|
||||||
// If the cache is full, drain half of blocks back to the main allocator.
|
|
||||||
const bool NeedToDrainCache = C->Count == C->MaxCount;
|
|
||||||
if (NeedToDrainCache)
|
|
||||||
drain(C, ClassId);
|
|
||||||
// See comment in allocate() about memory accesses.
|
|
||||||
const uptr ClassSize = C->ClassSize;
|
|
||||||
C->Chunks[C->Count++] =
|
|
||||||
Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
|
|
||||||
Stats.sub(StatAllocated, ClassSize);
|
|
||||||
Stats.add(StatFree, ClassSize);
|
|
||||||
|
|
||||||
return NeedToDrainCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isEmpty() const {
|
|
||||||
for (uptr I = 0; I < NumClasses; ++I)
|
|
||||||
if (PerClassArray[I].Count)
|
|
||||||
return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void drain() {
|
|
||||||
// Drain BatchClassId last as it may be needed while draining normal blocks.
|
|
||||||
for (uptr I = 0; I < NumClasses; ++I) {
|
|
||||||
if (I == BatchClassId)
|
|
||||||
continue;
|
|
||||||
while (PerClassArray[I].Count > 0)
|
|
||||||
drain(&PerClassArray[I], I);
|
|
||||||
}
|
|
||||||
while (PerClassArray[BatchClassId].Count > 0)
|
|
||||||
drain(&PerClassArray[BatchClassId], BatchClassId);
|
|
||||||
DCHECK(isEmpty());
|
|
||||||
}
|
|
||||||
|
|
||||||
void *getBatchClassBlock() {
|
|
||||||
void *B = allocate(BatchClassId);
|
|
||||||
if (UNLIKELY(!B))
|
|
||||||
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
|
|
||||||
return B;
|
|
||||||
}
|
|
||||||
|
|
||||||
LocalStats &getStats() { return Stats; }
|
|
||||||
|
|
||||||
void getStats(ScopedString *Str) {
|
|
||||||
bool EmptyCache = true;
|
|
||||||
for (uptr I = 0; I < NumClasses; ++I) {
|
|
||||||
if (PerClassArray[I].Count == 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
EmptyCache = false;
|
|
||||||
// The size of BatchClass is set to 0 intentionally. See the comment in
|
|
||||||
// initCache() for more details.
|
|
||||||
const uptr ClassSize = I == BatchClassId
|
|
||||||
? SizeClassAllocator::getSizeByClassId(I)
|
|
||||||
: PerClassArray[I].ClassSize;
|
|
||||||
// Note that the string utils don't support printing u16 thus we cast it
|
|
||||||
// to a common use type uptr.
|
|
||||||
Str->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
|
|
||||||
static_cast<uptr>(PerClassArray[I].Count),
|
|
||||||
static_cast<uptr>(PerClassArray[I].MaxCount));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (EmptyCache)
|
|
||||||
Str->append(" No block is cached.\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static u16 getMaxCached(uptr Size) {
|
|
||||||
return Min(SizeClassMap::MaxNumCachedHint,
|
|
||||||
SizeClassMap::getMaxCachedHint(Size));
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
static const uptr NumClasses = SizeClassMap::NumClasses;
|
|
||||||
static const uptr BatchClassId = SizeClassMap::BatchClassId;
|
|
||||||
struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
|
|
||||||
u16 Count;
|
|
||||||
u16 MaxCount;
|
|
||||||
// Note: ClassSize is zero for the transfer batch.
|
|
||||||
uptr ClassSize;
|
|
||||||
CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
|
|
||||||
};
|
|
||||||
PerClass PerClassArray[NumClasses] = {};
|
|
||||||
LocalStats Stats;
|
|
||||||
SizeClassAllocator *Allocator = nullptr;
|
|
||||||
|
|
||||||
NOINLINE void initCache() {
|
|
||||||
for (uptr I = 0; I < NumClasses; I++) {
|
|
||||||
PerClass *P = &PerClassArray[I];
|
|
||||||
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
|
|
||||||
P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
|
|
||||||
if (I != BatchClassId) {
|
|
||||||
P->ClassSize = Size;
|
|
||||||
} else {
|
|
||||||
// ClassSize in this struct is only used for malloc/free stats, which
|
|
||||||
// should only track user allocations, not internal movements.
|
|
||||||
P->ClassSize = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void destroyBatch(uptr ClassId, void *B) {
|
|
||||||
if (ClassId != BatchClassId)
|
|
||||||
deallocate(BatchClassId, B);
|
|
||||||
}
|
|
||||||
|
|
||||||
NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
|
|
||||||
const u16 NumBlocksRefilled =
|
|
||||||
Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
|
|
||||||
DCHECK_LE(NumBlocksRefilled, MaxRefill);
|
|
||||||
C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
|
|
||||||
return NumBlocksRefilled != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
NOINLINE void drain(PerClass *C, uptr ClassId) {
|
|
||||||
const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
|
|
||||||
Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
|
|
||||||
// u16 will be promoted to int by arithmetic type conversion.
|
|
||||||
C->Count = static_cast<u16>(C->Count - Count);
|
|
||||||
for (u16 I = 0; I < C->Count; I++)
|
|
||||||
C->Chunks[I] = C->Chunks[I + Count];
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LOCAL_CACHE_H_
|
|
84
Telegram/ThirdParty/scudo/mem_map.cpp
vendored
84
Telegram/ThirdParty/scudo/mem_map.cpp
vendored
|
@ -1,84 +0,0 @@
|
||||||
//===-- mem_map.cpp ---------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "mem_map.h"
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
bool MemMapDefault::mapImpl(uptr Addr, uptr Size, const char *Name,
|
|
||||||
uptr Flags) {
|
|
||||||
void *MappedAddr =
|
|
||||||
::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
|
|
||||||
if (MappedAddr == nullptr)
|
|
||||||
return false;
|
|
||||||
Base = reinterpret_cast<uptr>(MappedAddr);
|
|
||||||
MappedBase = Base;
|
|
||||||
Capacity = Size;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapDefault::unmapImpl(uptr Addr, uptr Size) {
|
|
||||||
if (Size == Capacity) {
|
|
||||||
Base = MappedBase = Capacity = 0;
|
|
||||||
} else {
|
|
||||||
if (Base == Addr) {
|
|
||||||
Base = Addr + Size;
|
|
||||||
MappedBase = MappedBase == 0 ? Base : Max(MappedBase, Base);
|
|
||||||
}
|
|
||||||
Capacity -= Size;
|
|
||||||
}
|
|
||||||
|
|
||||||
::scudo::unmap(reinterpret_cast<void *>(Addr), Size, UNMAP_ALL, &Data);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemMapDefault::remapImpl(uptr Addr, uptr Size, const char *Name,
|
|
||||||
uptr Flags) {
|
|
||||||
void *RemappedPtr =
|
|
||||||
::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
|
|
||||||
const uptr RemappedAddr = reinterpret_cast<uptr>(RemappedPtr);
|
|
||||||
MappedBase = MappedBase == 0 ? RemappedAddr : Min(MappedBase, RemappedAddr);
|
|
||||||
return RemappedAddr == Addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapDefault::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
|
|
||||||
DCHECK_NE(MappedBase, 0U);
|
|
||||||
DCHECK_GE(From, MappedBase);
|
|
||||||
return ::scudo::releasePagesToOS(MappedBase, From - MappedBase, Size, &Data);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapDefault::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
|
|
||||||
return ::scudo::setMemoryPermission(Addr, Size, Flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReservedMemoryDefault::releaseImpl() {
|
|
||||||
::scudo::unmap(reinterpret_cast<void *>(Base), Capacity, UNMAP_ALL, &Data);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReservedMemoryDefault::createImpl(uptr Addr, uptr Size, const char *Name,
|
|
||||||
uptr Flags) {
|
|
||||||
void *Reserved = ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name,
|
|
||||||
Flags | MAP_NOACCESS, &Data);
|
|
||||||
if (Reserved == nullptr)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
Base = reinterpret_cast<uptr>(Reserved);
|
|
||||||
Capacity = Size;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
ReservedMemoryDefault::MemMapT ReservedMemoryDefault::dispatchImpl(uptr Addr,
|
|
||||||
uptr Size) {
|
|
||||||
ReservedMemoryDefault::MemMapT NewMap(Addr, Size);
|
|
||||||
NewMap.setMapPlatformData(Data);
|
|
||||||
return NewMap;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
92
Telegram/ThirdParty/scudo/mem_map.h
vendored
92
Telegram/ThirdParty/scudo/mem_map.h
vendored
|
@ -1,92 +0,0 @@
|
||||||
//===-- mem_map.h -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_MEM_MAP_H_
|
|
||||||
#define SCUDO_MEM_MAP_H_
|
|
||||||
|
|
||||||
#include "mem_map_base.h"
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
// TODO: This is only used for `MapPlatformData`. Remove these includes when we
|
|
||||||
// have all three platform specific `MemMap` and `ReservedMemory`
|
|
||||||
// implementations.
|
|
||||||
#include "fuchsia.h"
|
|
||||||
#include "linux.h"
|
|
||||||
#include "trusty.h"
|
|
||||||
|
|
||||||
#include "mem_map_fuchsia.h"
|
|
||||||
#include "mem_map_linux.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// This will be deprecated when every allocator has been supported by each
|
|
||||||
// platform's `MemMap` implementation.
|
|
||||||
class MemMapDefault final : public MemMapBase<MemMapDefault> {
|
|
||||||
public:
|
|
||||||
constexpr MemMapDefault() = default;
|
|
||||||
MemMapDefault(uptr Base, uptr Capacity) : Base(Base), Capacity(Capacity) {}
|
|
||||||
|
|
||||||
// Impls for base functions.
|
|
||||||
bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
|
||||||
void unmapImpl(uptr Addr, uptr Size);
|
|
||||||
bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
|
||||||
void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
|
|
||||||
void releasePagesToOSImpl(uptr From, uptr Size) {
|
|
||||||
return releaseAndZeroPagesToOSImpl(From, Size);
|
|
||||||
}
|
|
||||||
void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
|
|
||||||
uptr getBaseImpl() { return Base; }
|
|
||||||
uptr getCapacityImpl() { return Capacity; }
|
|
||||||
|
|
||||||
void setMapPlatformData(MapPlatformData &NewData) { Data = NewData; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr Base = 0;
|
|
||||||
uptr Capacity = 0;
|
|
||||||
uptr MappedBase = 0;
|
|
||||||
MapPlatformData Data = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
// This will be deprecated when every allocator has been supported by each
|
|
||||||
// platform's `MemMap` implementation.
|
|
||||||
class ReservedMemoryDefault final
|
|
||||||
: public ReservedMemory<ReservedMemoryDefault, MemMapDefault> {
|
|
||||||
public:
|
|
||||||
constexpr ReservedMemoryDefault() = default;
|
|
||||||
|
|
||||||
bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
|
||||||
void releaseImpl();
|
|
||||||
MemMapT dispatchImpl(uptr Addr, uptr Size);
|
|
||||||
uptr getBaseImpl() { return Base; }
|
|
||||||
uptr getCapacityImpl() { return Capacity; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr Base = 0;
|
|
||||||
uptr Capacity = 0;
|
|
||||||
MapPlatformData Data = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
using ReservedMemoryT = ReservedMemoryLinux;
|
|
||||||
using MemMapT = ReservedMemoryT::MemMapT;
|
|
||||||
#elif SCUDO_FUCHSIA
|
|
||||||
using ReservedMemoryT = ReservedMemoryFuchsia;
|
|
||||||
using MemMapT = ReservedMemoryT::MemMapT;
|
|
||||||
#elif SCUDO_TRUSTY
|
|
||||||
using ReservedMemoryT = ReservedMemoryDefault;
|
|
||||||
using MemMapT = ReservedMemoryT::MemMapT;
|
|
||||||
#else
|
|
||||||
#error \
|
|
||||||
"Unsupported platform, please implement the ReservedMemory for your platform!"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_MEM_MAP_H_
|
|
129
Telegram/ThirdParty/scudo/mem_map_base.h
vendored
129
Telegram/ThirdParty/scudo/mem_map_base.h
vendored
|
@ -1,129 +0,0 @@
|
||||||
//===-- mem_map_base.h ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_MEM_MAP_BASE_H_
|
|
||||||
#define SCUDO_MEM_MAP_BASE_H_
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// In Scudo, every memory operation will be fulfilled through a
|
|
||||||
// platform-specific `MemMap` instance. The essential APIs are listed in the
|
|
||||||
// `MemMapBase` below. This is implemented in CRTP, so for each implementation,
|
|
||||||
// it has to implement all of the 'Impl' named functions.
|
|
||||||
template <class Derived> class MemMapBase {
|
|
||||||
public:
|
|
||||||
constexpr MemMapBase() = default;
|
|
||||||
|
|
||||||
// This is used to map a new set of contiguous pages. Note that the `Addr` is
|
|
||||||
// only a suggestion to the system.
|
|
||||||
bool map(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
|
|
||||||
DCHECK(!isAllocated());
|
|
||||||
return invokeImpl(&Derived::mapImpl, Addr, Size, Name, Flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is used to unmap partial/full pages from the beginning or the end.
|
|
||||||
// I.e., the result pages are expected to be still contiguous.
|
|
||||||
void unmap(uptr Addr, uptr Size) {
|
|
||||||
DCHECK(isAllocated());
|
|
||||||
DCHECK((Addr == getBase()) || (Addr + Size == getBase() + getCapacity()));
|
|
||||||
invokeImpl(&Derived::unmapImpl, Addr, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is used to remap a mapped range (either from map() or dispatched from
|
|
||||||
// ReservedMemory). For example, we have reserved several pages and then we
|
|
||||||
// want to remap them with different accessibility.
|
|
||||||
bool remap(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
|
|
||||||
DCHECK(isAllocated());
|
|
||||||
DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
|
|
||||||
return invokeImpl(&Derived::remapImpl, Addr, Size, Name, Flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is used to update the pages' access permission. For example, mark
|
|
||||||
// pages as no read/write permission.
|
|
||||||
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags) {
|
|
||||||
DCHECK(isAllocated());
|
|
||||||
DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
|
|
||||||
return invokeImpl(&Derived::setMemoryPermissionImpl, Addr, Size, Flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Suggest releasing a set of contiguous physical pages back to the OS. Note
|
|
||||||
// that only physical pages are supposed to be released. Any release of
|
|
||||||
// virtual pages may lead to undefined behavior.
|
|
||||||
void releasePagesToOS(uptr From, uptr Size) {
|
|
||||||
DCHECK(isAllocated());
|
|
||||||
DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
|
|
||||||
invokeImpl(&Derived::releasePagesToOSImpl, From, Size);
|
|
||||||
}
|
|
||||||
// This is similar to the above one except that any subsequent access to the
|
|
||||||
// released pages will return with zero-filled pages.
|
|
||||||
void releaseAndZeroPagesToOS(uptr From, uptr Size) {
|
|
||||||
DCHECK(isAllocated());
|
|
||||||
DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
|
|
||||||
invokeImpl(&Derived::releaseAndZeroPagesToOSImpl, From, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
|
|
||||||
uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
|
|
||||||
|
|
||||||
bool isAllocated() { return getBase() != 0U; }
|
|
||||||
|
|
||||||
protected:
|
|
||||||
template <typename R, typename... Args>
|
|
||||||
R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
|
|
||||||
return (static_cast<Derived *>(this)->*MemFn)(args...);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// `ReservedMemory` is a special memory handle which can be viewed as a page
|
|
||||||
// allocator. `ReservedMemory` will reserve a contiguous pages and the later
|
|
||||||
// page request can be fulfilled at the designated address. This is used when
|
|
||||||
// we want to ensure the virtual address of the MemMap will be in a known range.
|
|
||||||
// This is implemented in CRTP, so for each
|
|
||||||
// implementation, it has to implement all of the 'Impl' named functions.
|
|
||||||
template <class Derived, typename MemMapTy> class ReservedMemory {
|
|
||||||
public:
|
|
||||||
using MemMapT = MemMapTy;
|
|
||||||
constexpr ReservedMemory() = default;
|
|
||||||
|
|
||||||
// Reserve a chunk of memory at a suggested address.
|
|
||||||
bool create(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
|
|
||||||
DCHECK(!isCreated());
|
|
||||||
return invokeImpl(&Derived::createImpl, Addr, Size, Name, Flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release the entire reserved memory.
|
|
||||||
void release() {
|
|
||||||
DCHECK(isCreated());
|
|
||||||
invokeImpl(&Derived::releaseImpl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dispatch a sub-range of reserved memory. Note that any fragmentation of
|
|
||||||
// the reserved pages is managed by each implementation.
|
|
||||||
MemMapT dispatch(uptr Addr, uptr Size) {
|
|
||||||
DCHECK(isCreated());
|
|
||||||
DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
|
|
||||||
return invokeImpl(&Derived::dispatchImpl, Addr, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
|
|
||||||
uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
|
|
||||||
|
|
||||||
bool isCreated() { return getBase() != 0U; }
|
|
||||||
|
|
||||||
protected:
|
|
||||||
template <typename R, typename... Args>
|
|
||||||
R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
|
|
||||||
return (static_cast<Derived *>(this)->*MemFn)(args...);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_MEM_MAP_BASE_H_
|
|
252
Telegram/ThirdParty/scudo/mem_map_fuchsia.cpp
vendored
252
Telegram/ThirdParty/scudo/mem_map_fuchsia.cpp
vendored
|
@ -1,252 +0,0 @@
|
||||||
//===-- mem_map_fuchsia.cpp -------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "mem_map_fuchsia.h"
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
|
|
||||||
#include <zircon/process.h>
|
|
||||||
#include <zircon/status.h>
|
|
||||||
#include <zircon/syscalls.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
|
|
||||||
uptr Size) {
|
|
||||||
char Error[128];
|
|
||||||
formatString(Error, sizeof(Error),
|
|
||||||
"SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
|
|
||||||
Size >> 10, _zx_status_get_string(Status));
|
|
||||||
outputRaw(Error);
|
|
||||||
die();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void setVmoName(zx_handle_t Vmo, const char *Name) {
|
|
||||||
size_t Len = strlen(Name);
|
|
||||||
DCHECK_LT(Len, ZX_MAX_NAME_LEN);
|
|
||||||
zx_status_t Status = _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, Len);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the (cached) base address of the root VMAR.
|
|
||||||
static uptr getRootVmarBase() {
|
|
||||||
static atomic_uptr CachedResult = {0};
|
|
||||||
|
|
||||||
uptr Result = atomic_load(&CachedResult, memory_order_acquire);
|
|
||||||
if (UNLIKELY(!Result)) {
|
|
||||||
zx_info_vmar_t VmarInfo;
|
|
||||||
zx_status_t Status =
|
|
||||||
_zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &VmarInfo,
|
|
||||||
sizeof(VmarInfo), nullptr, nullptr);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
CHECK_NE(VmarInfo.base, 0);
|
|
||||||
|
|
||||||
atomic_store(&CachedResult, VmarInfo.base, memory_order_release);
|
|
||||||
Result = VmarInfo.base;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lazily creates and then always returns the same zero-sized VMO.
|
|
||||||
static zx_handle_t getPlaceholderVmo() {
|
|
||||||
static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
|
|
||||||
|
|
||||||
zx_handle_t Vmo = atomic_load(&StoredVmo, memory_order_acquire);
|
|
||||||
if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
|
|
||||||
// Create a zero-sized placeholder VMO.
|
|
||||||
zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
|
|
||||||
if (UNLIKELY(Status != ZX_OK))
|
|
||||||
dieOnError(Status, "zx_vmo_create", 0);
|
|
||||||
|
|
||||||
setVmoName(Vmo, "scudo:reserved");
|
|
||||||
|
|
||||||
// Atomically store its handle. If some other thread wins the race, use its
|
|
||||||
// handle and discard ours.
|
|
||||||
zx_handle_t OldValue = atomic_compare_exchange_strong(
|
|
||||||
&StoredVmo, ZX_HANDLE_INVALID, Vmo, memory_order_acq_rel);
|
|
||||||
if (UNLIKELY(OldValue != ZX_HANDLE_INVALID)) {
|
|
||||||
Status = _zx_handle_close(Vmo);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
|
|
||||||
Vmo = OldValue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return Vmo;
|
|
||||||
}
|
|
||||||
|
|
||||||
MemMapFuchsia::MemMapFuchsia(uptr Base, uptr Capacity)
|
|
||||||
: MapAddr(Base), WindowBase(Base), WindowSize(Capacity) {
|
|
||||||
// Create the VMO.
|
|
||||||
zx_status_t Status = _zx_vmo_create(Capacity, 0, &Vmo);
|
|
||||||
if (UNLIKELY(Status != ZX_OK))
|
|
||||||
dieOnError(Status, "zx_vmo_create", Capacity);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemMapFuchsia::mapImpl(UNUSED uptr Addr, uptr Size, const char *Name,
|
|
||||||
uptr Flags) {
|
|
||||||
const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
|
|
||||||
const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
|
|
||||||
const bool NoAccess = !!(Flags & MAP_NOACCESS);
|
|
||||||
|
|
||||||
// Create the VMO.
|
|
||||||
zx_status_t Status = _zx_vmo_create(Size, 0, &Vmo);
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmo_create", Size);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Name != nullptr)
|
|
||||||
setVmoName(Vmo, Name);
|
|
||||||
|
|
||||||
// Map it.
|
|
||||||
zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS;
|
|
||||||
if (!NoAccess)
|
|
||||||
MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
|
|
||||||
Status =
|
|
||||||
_zx_vmar_map(_zx_vmar_root_self(), MapFlags, 0, Vmo, 0, Size, &MapAddr);
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmar_map", Size);
|
|
||||||
|
|
||||||
Status = _zx_handle_close(Vmo);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
|
|
||||||
MapAddr = 0;
|
|
||||||
Vmo = ZX_HANDLE_INVALID;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (PreCommit) {
|
|
||||||
Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
|
|
||||||
Size, nullptr, 0);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
WindowBase = MapAddr;
|
|
||||||
WindowSize = Size;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapFuchsia::unmapImpl(uptr Addr, uptr Size) {
|
|
||||||
zx_status_t Status;
|
|
||||||
|
|
||||||
if (Size == WindowSize) {
|
|
||||||
// NOTE: Closing first and then unmapping seems slightly faster than doing
|
|
||||||
// the same operations in the opposite order.
|
|
||||||
Status = _zx_handle_close(Vmo);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
|
|
||||||
MapAddr = WindowBase = WindowSize = 0;
|
|
||||||
Vmo = ZX_HANDLE_INVALID;
|
|
||||||
} else {
|
|
||||||
// Unmap the subrange.
|
|
||||||
Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
|
|
||||||
// Decommit the pages that we just unmapped.
|
|
||||||
Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, Addr - MapAddr, Size,
|
|
||||||
nullptr, 0);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
|
|
||||||
if (Addr == WindowBase)
|
|
||||||
WindowBase += Size;
|
|
||||||
WindowSize -= Size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemMapFuchsia::remapImpl(uptr Addr, uptr Size, const char *Name,
|
|
||||||
uptr Flags) {
|
|
||||||
const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
|
|
||||||
const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
|
|
||||||
const bool NoAccess = !!(Flags & MAP_NOACCESS);
|
|
||||||
|
|
||||||
// NOTE: This will rename the *whole* VMO, not only the requested portion of
|
|
||||||
// it. But we cannot do better than this given the MemMap API. In practice,
|
|
||||||
// the upper layers of Scudo always pass the same Name for a given MemMap.
|
|
||||||
if (Name != nullptr)
|
|
||||||
setVmoName(Vmo, Name);
|
|
||||||
|
|
||||||
uptr MappedAddr;
|
|
||||||
zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS | ZX_VM_SPECIFIC_OVERWRITE;
|
|
||||||
if (!NoAccess)
|
|
||||||
MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
|
|
||||||
zx_status_t Status =
|
|
||||||
_zx_vmar_map(_zx_vmar_root_self(), MapFlags, Addr - getRootVmarBase(),
|
|
||||||
Vmo, Addr - MapAddr, Size, &MappedAddr);
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmar_map", Size);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
DCHECK_EQ(Addr, MappedAddr);
|
|
||||||
|
|
||||||
if (PreCommit) {
|
|
||||||
Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
|
|
||||||
Size, nullptr, 0);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapFuchsia::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
|
|
||||||
zx_status_t Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, From - MapAddr,
|
|
||||||
Size, nullptr, 0);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapFuchsia::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
|
|
||||||
const bool NoAccess = !!(Flags & MAP_NOACCESS);
|
|
||||||
|
|
||||||
zx_vm_option_t MapFlags = 0;
|
|
||||||
if (!NoAccess)
|
|
||||||
MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
|
|
||||||
zx_status_t Status =
|
|
||||||
_zx_vmar_protect(_zx_vmar_root_self(), MapFlags, Addr, Size);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReservedMemoryFuchsia::createImpl(UNUSED uptr Addr, uptr Size,
|
|
||||||
UNUSED const char *Name, uptr Flags) {
|
|
||||||
const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
|
|
||||||
|
|
||||||
// Reserve memory by mapping the placeholder VMO without any permission.
|
|
||||||
zx_status_t Status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_ALLOW_FAULTS, 0,
|
|
||||||
getPlaceholderVmo(), 0, Size, &Base);
|
|
||||||
if (UNLIKELY(Status != ZX_OK)) {
|
|
||||||
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
|
|
||||||
dieOnError(Status, "zx_vmar_map", Size);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Capacity = Size;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReservedMemoryFuchsia::releaseImpl() {
|
|
||||||
zx_status_t Status = _zx_vmar_unmap(_zx_vmar_root_self(), Base, Capacity);
|
|
||||||
CHECK_EQ(Status, ZX_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
ReservedMemoryFuchsia::MemMapT ReservedMemoryFuchsia::dispatchImpl(uptr Addr,
|
|
||||||
uptr Size) {
|
|
||||||
return ReservedMemoryFuchsia::MemMapT(Addr, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_FUCHSIA
|
|
75
Telegram/ThirdParty/scudo/mem_map_fuchsia.h
vendored
75
Telegram/ThirdParty/scudo/mem_map_fuchsia.h
vendored
|
@ -1,75 +0,0 @@
|
||||||
//===-- mem_map_fuchsia.h ---------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_MEM_MAP_FUCHSIA_H_
|
|
||||||
#define SCUDO_MEM_MAP_FUCHSIA_H_
|
|
||||||
|
|
||||||
#include "mem_map_base.h"
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <zircon/types.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class MemMapFuchsia final : public MemMapBase<MemMapFuchsia> {
|
|
||||||
public:
|
|
||||||
constexpr MemMapFuchsia() = default;
|
|
||||||
|
|
||||||
// Impls for base functions.
|
|
||||||
bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
|
||||||
void unmapImpl(uptr Addr, uptr Size);
|
|
||||||
bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
|
||||||
void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
|
|
||||||
void releasePagesToOSImpl(uptr From, uptr Size) {
|
|
||||||
return releaseAndZeroPagesToOSImpl(From, Size);
|
|
||||||
}
|
|
||||||
void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
|
|
||||||
uptr getBaseImpl() { return WindowBase; }
|
|
||||||
uptr getCapacityImpl() { return WindowSize; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
friend class ReservedMemoryFuchsia;
|
|
||||||
|
|
||||||
// Used by ReservedMemoryFuchsia::dispatch.
|
|
||||||
MemMapFuchsia(uptr Base, uptr Capacity);
|
|
||||||
|
|
||||||
// Virtual memory address corresponding to VMO offset 0.
|
|
||||||
uptr MapAddr = 0;
|
|
||||||
|
|
||||||
// Virtual memory base address and size of the VMO subrange that is still in
|
|
||||||
// use. unmapImpl() can shrink this range, either at the beginning or at the
|
|
||||||
// end.
|
|
||||||
uptr WindowBase = 0;
|
|
||||||
uptr WindowSize = 0;
|
|
||||||
|
|
||||||
zx_handle_t Vmo = ZX_HANDLE_INVALID;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ReservedMemoryFuchsia final
|
|
||||||
: public ReservedMemory<ReservedMemoryFuchsia, MemMapFuchsia> {
|
|
||||||
public:
|
|
||||||
constexpr ReservedMemoryFuchsia() = default;
|
|
||||||
|
|
||||||
bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
|
||||||
void releaseImpl();
|
|
||||||
MemMapT dispatchImpl(uptr Addr, uptr Size);
|
|
||||||
uptr getBaseImpl() { return Base; }
|
|
||||||
uptr getCapacityImpl() { return Capacity; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr Base = 0;
|
|
||||||
uptr Capacity = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_FUCHSIA
|
|
||||||
|
|
||||||
#endif // SCUDO_MEM_MAP_FUCHSIA_H_
|
|
153
Telegram/ThirdParty/scudo/mem_map_linux.cpp
vendored
153
Telegram/ThirdParty/scudo/mem_map_linux.cpp
vendored
|
@ -1,153 +0,0 @@
|
||||||
//===-- mem_map_linux.cpp ---------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
|
|
||||||
#include "mem_map_linux.h"
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "internal_defs.h"
|
|
||||||
#include "linux.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "report_linux.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <linux/futex.h>
|
|
||||||
#include <sched.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/syscall.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <time.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
// TODO(chiahungduan): Review if we still need the followings macros.
|
|
||||||
#include <sys/prctl.h>
|
|
||||||
// Definitions of prctl arguments to set a vma name in Android kernels.
|
|
||||||
#define ANDROID_PR_SET_VMA 0x53564d41
|
|
||||||
#define ANDROID_PR_SET_VMA_ANON_NAME 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
static void *mmapWrapper(uptr Addr, uptr Size, const char *Name, uptr Flags) {
|
|
||||||
int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
||||||
int MmapProt;
|
|
||||||
if (Flags & MAP_NOACCESS) {
|
|
||||||
MmapFlags |= MAP_NORESERVE;
|
|
||||||
MmapProt = PROT_NONE;
|
|
||||||
} else {
|
|
||||||
MmapProt = PROT_READ | PROT_WRITE;
|
|
||||||
}
|
|
||||||
#if defined(__aarch64__)
|
|
||||||
#ifndef PROT_MTE
|
|
||||||
#define PROT_MTE 0x20
|
|
||||||
#endif
|
|
||||||
if (Flags & MAP_MEMTAG)
|
|
||||||
MmapProt |= PROT_MTE;
|
|
||||||
#endif
|
|
||||||
if (Addr)
|
|
||||||
MmapFlags |= MAP_FIXED;
|
|
||||||
void *P =
|
|
||||||
mmap(reinterpret_cast<void *>(Addr), Size, MmapProt, MmapFlags, -1, 0);
|
|
||||||
if (P == MAP_FAILED) {
|
|
||||||
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
|
|
||||||
reportMapError(errno == ENOMEM ? Size : 0);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
if (Name)
|
|
||||||
prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
|
|
||||||
#else
|
|
||||||
(void)Name;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return P;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemMapLinux::mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags) {
|
|
||||||
void *P = mmapWrapper(Addr, Size, Name, Flags);
|
|
||||||
if (P == nullptr)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
MapBase = reinterpret_cast<uptr>(P);
|
|
||||||
MapCapacity = Size;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapLinux::unmapImpl(uptr Addr, uptr Size) {
|
|
||||||
// If we unmap all the pages, also mark `MapBase` to 0 to indicate invalid
|
|
||||||
// status.
|
|
||||||
if (Size == MapCapacity) {
|
|
||||||
MapBase = MapCapacity = 0;
|
|
||||||
} else {
|
|
||||||
// This is partial unmap and is unmapping the pages from the beginning,
|
|
||||||
// shift `MapBase` to the new base.
|
|
||||||
if (MapBase == Addr)
|
|
||||||
MapBase = Addr + Size;
|
|
||||||
MapCapacity -= Size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (munmap(reinterpret_cast<void *>(Addr), Size) != 0)
|
|
||||||
reportUnmapError(Addr, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemMapLinux::remapImpl(uptr Addr, uptr Size, const char *Name,
|
|
||||||
uptr Flags) {
|
|
||||||
void *P = mmapWrapper(Addr, Size, Name, Flags);
|
|
||||||
if (reinterpret_cast<uptr>(P) != Addr)
|
|
||||||
reportMapError();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapLinux::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
|
|
||||||
int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
|
|
||||||
if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
|
|
||||||
reportProtectError(Addr, Size, Prot);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
|
|
||||||
void *Addr = reinterpret_cast<void *>(From);
|
|
||||||
|
|
||||||
while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReservedMemoryLinux::createImpl(uptr Addr, uptr Size, const char *Name,
|
|
||||||
uptr Flags) {
|
|
||||||
ReservedMemoryLinux::MemMapT MemMap;
|
|
||||||
if (!MemMap.map(Addr, Size, Name, Flags | MAP_NOACCESS))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
MapBase = MemMap.getBase();
|
|
||||||
MapCapacity = MemMap.getCapacity();
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReservedMemoryLinux::releaseImpl() {
|
|
||||||
if (munmap(reinterpret_cast<void *>(getBase()), getCapacity()) != 0)
|
|
||||||
reportUnmapError(getBase(), getCapacity());
|
|
||||||
}
|
|
||||||
|
|
||||||
ReservedMemoryLinux::MemMapT ReservedMemoryLinux::dispatchImpl(uptr Addr,
|
|
||||||
uptr Size) {
|
|
||||||
return ReservedMemoryLinux::MemMapT(Addr, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX
|
|
67
Telegram/ThirdParty/scudo/mem_map_linux.h
vendored
67
Telegram/ThirdParty/scudo/mem_map_linux.h
vendored
|
@ -1,67 +0,0 @@
|
||||||
//===-- mem_map_linux.h -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_MEM_MAP_LINUX_H_
|
|
||||||
#define SCUDO_MEM_MAP_LINUX_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "mem_map_base.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class MemMapLinux final : public MemMapBase<MemMapLinux> {
|
|
||||||
public:
|
|
||||||
constexpr MemMapLinux() = default;
|
|
||||||
MemMapLinux(uptr Base, uptr Capacity)
|
|
||||||
: MapBase(Base), MapCapacity(Capacity) {}
|
|
||||||
|
|
||||||
// Impls for base functions.
|
|
||||||
bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
|
|
||||||
void unmapImpl(uptr Addr, uptr Size);
|
|
||||||
bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
|
|
||||||
void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
|
|
||||||
void releasePagesToOSImpl(uptr From, uptr Size) {
|
|
||||||
return releaseAndZeroPagesToOSImpl(From, Size);
|
|
||||||
}
|
|
||||||
void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
|
|
||||||
uptr getBaseImpl() { return MapBase; }
|
|
||||||
uptr getCapacityImpl() { return MapCapacity; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr MapBase = 0;
|
|
||||||
uptr MapCapacity = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
// This will be deprecated when every allocator has been supported by each
|
|
||||||
// platform's `MemMap` implementation.
|
|
||||||
class ReservedMemoryLinux final
|
|
||||||
: public ReservedMemory<ReservedMemoryLinux, MemMapLinux> {
|
|
||||||
public:
|
|
||||||
// The following two are the Impls for function in `MemMapBase`.
|
|
||||||
uptr getBaseImpl() { return MapBase; }
|
|
||||||
uptr getCapacityImpl() { return MapCapacity; }
|
|
||||||
|
|
||||||
// These threes are specific to `ReservedMemory`.
|
|
||||||
bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
|
||||||
void releaseImpl();
|
|
||||||
MemMapT dispatchImpl(uptr Addr, uptr Size);
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr MapBase = 0;
|
|
||||||
uptr MapCapacity = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX
|
|
||||||
|
|
||||||
#endif // SCUDO_MEM_MAP_LINUX_H_
|
|
335
Telegram/ThirdParty/scudo/memtag.h
vendored
335
Telegram/ThirdParty/scudo/memtag.h
vendored
|
@ -1,335 +0,0 @@
|
||||||
//===-- memtag.h ------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_MEMTAG_H_
|
|
||||||
#define SCUDO_MEMTAG_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
#if SCUDO_CAN_USE_MTE
|
|
||||||
#include <sys/auxv.h>
|
|
||||||
#include <sys/prctl.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
#if (__clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)) || \
|
|
||||||
defined(SCUDO_FUZZ)
|
|
||||||
|
|
||||||
// We assume that Top-Byte Ignore is enabled if the architecture supports memory
|
|
||||||
// tagging. Not all operating systems enable TBI, so we only claim architectural
|
|
||||||
// support for memory tagging if the operating system enables TBI.
|
|
||||||
// HWASan uses the top byte for its own purpose and Scudo should not touch it.
|
|
||||||
#if SCUDO_CAN_USE_MTE && !defined(SCUDO_DISABLE_TBI) && \
|
|
||||||
!__has_feature(hwaddress_sanitizer)
|
|
||||||
inline constexpr bool archSupportsMemoryTagging() { return true; }
|
|
||||||
#else
|
|
||||||
inline constexpr bool archSupportsMemoryTagging() { return false; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
|
|
||||||
|
|
||||||
inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
|
|
||||||
|
|
||||||
inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
inline constexpr bool archSupportsMemoryTagging() { return false; }
|
|
||||||
|
|
||||||
inline NORETURN uptr archMemoryTagGranuleSize() {
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN uptr untagPointer(uptr Ptr) {
|
|
||||||
(void)Ptr;
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN uint8_t extractTag(uptr Ptr) {
|
|
||||||
(void)Ptr;
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if __clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)
|
|
||||||
|
|
||||||
#if SCUDO_CAN_USE_MTE
|
|
||||||
|
|
||||||
inline bool systemSupportsMemoryTagging() {
|
|
||||||
#ifndef HWCAP2_MTE
|
|
||||||
#define HWCAP2_MTE (1 << 18)
|
|
||||||
#endif
|
|
||||||
return getauxval(AT_HWCAP2) & HWCAP2_MTE;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool systemDetectsMemoryTagFaultsTestOnly() {
|
|
||||||
#ifndef PR_SET_TAGGED_ADDR_CTRL
|
|
||||||
#define PR_SET_TAGGED_ADDR_CTRL 54
|
|
||||||
#endif
|
|
||||||
#ifndef PR_GET_TAGGED_ADDR_CTRL
|
|
||||||
#define PR_GET_TAGGED_ADDR_CTRL 56
|
|
||||||
#endif
|
|
||||||
#ifndef PR_TAGGED_ADDR_ENABLE
|
|
||||||
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
|
||||||
#endif
|
|
||||||
#ifndef PR_MTE_TCF_SHIFT
|
|
||||||
#define PR_MTE_TCF_SHIFT 1
|
|
||||||
#endif
|
|
||||||
#ifndef PR_MTE_TAG_SHIFT
|
|
||||||
#define PR_MTE_TAG_SHIFT 3
|
|
||||||
#endif
|
|
||||||
#ifndef PR_MTE_TCF_NONE
|
|
||||||
#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
|
|
||||||
#endif
|
|
||||||
#ifndef PR_MTE_TCF_SYNC
|
|
||||||
#define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
|
|
||||||
#endif
|
|
||||||
#ifndef PR_MTE_TCF_MASK
|
|
||||||
#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
|
|
||||||
#endif
|
|
||||||
int res = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
|
|
||||||
if (res == -1)
|
|
||||||
return false;
|
|
||||||
return (static_cast<unsigned long>(res) & PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void enableSystemMemoryTaggingTestOnly() {
|
|
||||||
prctl(PR_SET_TAGGED_ADDR_CTRL,
|
|
||||||
PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | (0xfffe << PR_MTE_TAG_SHIFT),
|
|
||||||
0, 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else // !SCUDO_CAN_USE_MTE
|
|
||||||
|
|
||||||
inline bool systemSupportsMemoryTagging() { return false; }
|
|
||||||
|
|
||||||
inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN void enableSystemMemoryTaggingTestOnly() {
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // SCUDO_CAN_USE_MTE
|
|
||||||
|
|
||||||
class ScopedDisableMemoryTagChecks {
|
|
||||||
uptr PrevTCO;
|
|
||||||
|
|
||||||
public:
|
|
||||||
ScopedDisableMemoryTagChecks() {
|
|
||||||
__asm__ __volatile__(
|
|
||||||
R"(
|
|
||||||
.arch_extension memtag
|
|
||||||
mrs %0, tco
|
|
||||||
msr tco, #1
|
|
||||||
)"
|
|
||||||
: "=r"(PrevTCO));
|
|
||||||
}
|
|
||||||
|
|
||||||
~ScopedDisableMemoryTagChecks() {
|
|
||||||
__asm__ __volatile__(
|
|
||||||
R"(
|
|
||||||
.arch_extension memtag
|
|
||||||
msr tco, %0
|
|
||||||
)"
|
|
||||||
:
|
|
||||||
: "r"(PrevTCO));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
|
|
||||||
ExcludeMask |= 1; // Always exclude Tag 0.
|
|
||||||
uptr TaggedPtr;
|
|
||||||
__asm__ __volatile__(
|
|
||||||
R"(
|
|
||||||
.arch_extension memtag
|
|
||||||
irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
|
|
||||||
)"
|
|
||||||
: [TaggedPtr] "=r"(TaggedPtr)
|
|
||||||
: [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
|
|
||||||
return TaggedPtr;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uptr addFixedTag(uptr Ptr, uptr Tag) {
|
|
||||||
DCHECK_LT(Tag, 16);
|
|
||||||
DCHECK_EQ(untagPointer(Ptr), Ptr);
|
|
||||||
return Ptr | (Tag << 56);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uptr storeTags(uptr Begin, uptr End) {
|
|
||||||
DCHECK_EQ(0, Begin % 16);
|
|
||||||
uptr LineSize, Next, Tmp;
|
|
||||||
__asm__ __volatile__(
|
|
||||||
R"(
|
|
||||||
.arch_extension memtag
|
|
||||||
|
|
||||||
// Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
|
|
||||||
// of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
|
|
||||||
// indicates that the DC instructions are unavailable.
|
|
||||||
DCZID .req %[Tmp]
|
|
||||||
mrs DCZID, dczid_el0
|
|
||||||
tbnz DCZID, #4, 3f
|
|
||||||
and DCZID, DCZID, #15
|
|
||||||
mov %[LineSize], #4
|
|
||||||
lsl %[LineSize], %[LineSize], DCZID
|
|
||||||
.unreq DCZID
|
|
||||||
|
|
||||||
// Our main loop doesn't handle the case where we don't need to perform any
|
|
||||||
// DC GZVA operations. If the size of our tagged region is less than
|
|
||||||
// twice the cache line size, bail out to the slow path since it's not
|
|
||||||
// guaranteed that we'll be able to do a DC GZVA.
|
|
||||||
Size .req %[Tmp]
|
|
||||||
sub Size, %[End], %[Cur]
|
|
||||||
cmp Size, %[LineSize], lsl #1
|
|
||||||
b.lt 3f
|
|
||||||
.unreq Size
|
|
||||||
|
|
||||||
LineMask .req %[Tmp]
|
|
||||||
sub LineMask, %[LineSize], #1
|
|
||||||
|
|
||||||
// STZG until the start of the next cache line.
|
|
||||||
orr %[Next], %[Cur], LineMask
|
|
||||||
1:
|
|
||||||
stzg %[Cur], [%[Cur]], #16
|
|
||||||
cmp %[Cur], %[Next]
|
|
||||||
b.lt 1b
|
|
||||||
|
|
||||||
// DC GZVA cache lines until we have no more full cache lines.
|
|
||||||
bic %[Next], %[End], LineMask
|
|
||||||
.unreq LineMask
|
|
||||||
2:
|
|
||||||
dc gzva, %[Cur]
|
|
||||||
add %[Cur], %[Cur], %[LineSize]
|
|
||||||
cmp %[Cur], %[Next]
|
|
||||||
b.lt 2b
|
|
||||||
|
|
||||||
// STZG until the end of the tagged region. This loop is also used to handle
|
|
||||||
// slow path cases.
|
|
||||||
3:
|
|
||||||
cmp %[Cur], %[End]
|
|
||||||
b.ge 4f
|
|
||||||
stzg %[Cur], [%[Cur]], #16
|
|
||||||
b 3b
|
|
||||||
|
|
||||||
4:
|
|
||||||
)"
|
|
||||||
: [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next),
|
|
||||||
[Tmp] "=&r"(Tmp)
|
|
||||||
: [End] "r"(End)
|
|
||||||
: "memory");
|
|
||||||
DCHECK_EQ(0, Begin % 16);
|
|
||||||
return Begin;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void storeTag(uptr Ptr) {
|
|
||||||
DCHECK_EQ(0, Ptr % 16);
|
|
||||||
__asm__ __volatile__(R"(
|
|
||||||
.arch_extension memtag
|
|
||||||
stg %0, [%0]
|
|
||||||
)"
|
|
||||||
:
|
|
||||||
: "r"(Ptr)
|
|
||||||
: "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uptr loadTag(uptr Ptr) {
|
|
||||||
DCHECK_EQ(0, Ptr % 16);
|
|
||||||
uptr TaggedPtr = Ptr;
|
|
||||||
__asm__ __volatile__(
|
|
||||||
R"(
|
|
||||||
.arch_extension memtag
|
|
||||||
ldg %0, [%0]
|
|
||||||
)"
|
|
||||||
: "+r"(TaggedPtr)
|
|
||||||
:
|
|
||||||
: "memory");
|
|
||||||
return TaggedPtr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
inline NORETURN bool systemSupportsMemoryTagging() {
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN void enableSystemMemoryTaggingTestOnly() {
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ScopedDisableMemoryTagChecks {
|
|
||||||
ScopedDisableMemoryTagChecks() {}
|
|
||||||
};
|
|
||||||
|
|
||||||
inline NORETURN uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
|
|
||||||
(void)Ptr;
|
|
||||||
(void)ExcludeMask;
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN uptr addFixedTag(uptr Ptr, uptr Tag) {
|
|
||||||
(void)Ptr;
|
|
||||||
(void)Tag;
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN uptr storeTags(uptr Begin, uptr End) {
|
|
||||||
(void)Begin;
|
|
||||||
(void)End;
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN void storeTag(uptr Ptr) {
|
|
||||||
(void)Ptr;
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline NORETURN uptr loadTag(uptr Ptr) {
|
|
||||||
(void)Ptr;
|
|
||||||
UNREACHABLE("memory tagging not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wmissing-noreturn"
|
|
||||||
inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
|
|
||||||
uptr *TaggedBegin, uptr *TaggedEnd) {
|
|
||||||
*TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
|
|
||||||
*TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
|
|
||||||
}
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
|
|
||||||
inline void *untagPointer(void *Ptr) {
|
|
||||||
return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void *loadTag(void *Ptr) {
|
|
||||||
return reinterpret_cast<void *>(loadTag(reinterpret_cast<uptr>(Ptr)));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void *addFixedTag(void *Ptr, uptr Tag) {
|
|
||||||
return reinterpret_cast<void *>(
|
|
||||||
addFixedTag(reinterpret_cast<uptr>(Ptr), Tag));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
inline constexpr bool allocatorSupportsMemoryTagging() {
|
|
||||||
return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging &&
|
|
||||||
(1 << SCUDO_MIN_ALIGNMENT_LOG) >= archMemoryTagGranuleSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif
|
|
97
Telegram/ThirdParty/scudo/mutex.h
vendored
97
Telegram/ThirdParty/scudo/mutex.h
vendored
|
@ -1,97 +0,0 @@
|
||||||
//===-- mutex.h -------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_MUTEX_H_
|
|
||||||
#define SCUDO_MUTEX_H_
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
#include <lib/sync/mutex.h> // for sync_mutex_t
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class CAPABILITY("mutex") HybridMutex {
|
|
||||||
public:
|
|
||||||
bool tryLock() TRY_ACQUIRE(true);
|
|
||||||
NOINLINE void lock() ACQUIRE() {
|
|
||||||
if (LIKELY(tryLock()))
|
|
||||||
return;
|
|
||||||
// The compiler may try to fully unroll the loop, ending up in a
|
|
||||||
// NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
|
|
||||||
// is large, ugly and unneeded, a compact loop is better for our purpose
|
|
||||||
// here. Use a pragma to tell the compiler not to unroll the loop.
|
|
||||||
#ifdef __clang__
|
|
||||||
#pragma nounroll
|
|
||||||
#endif
|
|
||||||
for (u8 I = 0U; I < NumberOfTries; I++) {
|
|
||||||
delayLoop();
|
|
||||||
if (tryLock())
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
lockSlow();
|
|
||||||
}
|
|
||||||
void unlock() RELEASE();
|
|
||||||
|
|
||||||
// TODO(chiahungduan): In general, we may want to assert the owner of lock as
|
|
||||||
// well. Given the current uses of HybridMutex, it's acceptable without
|
|
||||||
// asserting the owner. Re-evaluate this when we have certain scenarios which
|
|
||||||
// requires a more fine-grained lock granularity.
|
|
||||||
ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
|
|
||||||
if (SCUDO_DEBUG)
|
|
||||||
assertHeldImpl();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
void delayLoop() {
|
|
||||||
// The value comes from the average time spent in accessing caches (which
|
|
||||||
// are the fastest operations) so that we are unlikely to wait too long for
|
|
||||||
// fast operations.
|
|
||||||
constexpr u32 SpinTimes = 16;
|
|
||||||
volatile u32 V = 0;
|
|
||||||
for (u32 I = 0; I < SpinTimes; ++I) {
|
|
||||||
u32 Tmp = V + 1;
|
|
||||||
V = Tmp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void assertHeldImpl();
|
|
||||||
|
|
||||||
// TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and
|
|
||||||
// secondary allocator have different allocation times.
|
|
||||||
static constexpr u8 NumberOfTries = 32U;
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
atomic_u32 M = {};
|
|
||||||
#elif SCUDO_FUCHSIA
|
|
||||||
sync_mutex_t M = {};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void lockSlow() ACQUIRE();
|
|
||||||
};
|
|
||||||
|
|
||||||
class SCOPED_CAPABILITY ScopedLock {
|
|
||||||
public:
|
|
||||||
explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
|
|
||||||
~ScopedLock() RELEASE() { Mutex.unlock(); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
HybridMutex &Mutex;
|
|
||||||
|
|
||||||
ScopedLock(const ScopedLock &) = delete;
|
|
||||||
void operator=(const ScopedLock &) = delete;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_MUTEX_H_
|
|
74
Telegram/ThirdParty/scudo/options.h
vendored
74
Telegram/ThirdParty/scudo/options.h
vendored
|
@ -1,74 +0,0 @@
|
||||||
//===-- options.h -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_OPTIONS_H_
|
|
||||||
#define SCUDO_OPTIONS_H_
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "memtag.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
enum class OptionBit {
|
|
||||||
MayReturnNull,
|
|
||||||
FillContents0of2,
|
|
||||||
FillContents1of2,
|
|
||||||
DeallocTypeMismatch,
|
|
||||||
DeleteSizeMismatch,
|
|
||||||
TrackAllocationStacks,
|
|
||||||
UseOddEvenTags,
|
|
||||||
UseMemoryTagging,
|
|
||||||
AddLargeAllocationSlack,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Options {
|
|
||||||
u32 Val;
|
|
||||||
|
|
||||||
bool get(OptionBit Opt) const { return Val & (1U << static_cast<u32>(Opt)); }
|
|
||||||
|
|
||||||
FillContentsMode getFillContentsMode() const {
|
|
||||||
return static_cast<FillContentsMode>(
|
|
||||||
(Val >> static_cast<u32>(OptionBit::FillContents0of2)) & 3);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> bool useMemoryTagging(const Options &Options) {
|
|
||||||
return allocatorSupportsMemoryTagging<Config>() &&
|
|
||||||
Options.get(OptionBit::UseMemoryTagging);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct AtomicOptions {
|
|
||||||
atomic_u32 Val = {};
|
|
||||||
|
|
||||||
Options load() const { return Options{atomic_load_relaxed(&Val)}; }
|
|
||||||
|
|
||||||
void clear(OptionBit Opt) {
|
|
||||||
atomic_fetch_and(&Val, ~(1U << static_cast<u32>(Opt)),
|
|
||||||
memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set(OptionBit Opt) {
|
|
||||||
atomic_fetch_or(&Val, 1U << static_cast<u32>(Opt), memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
void setFillContentsMode(FillContentsMode FillContents) {
|
|
||||||
u32 Opts = atomic_load_relaxed(&Val), NewOpts;
|
|
||||||
do {
|
|
||||||
NewOpts = Opts;
|
|
||||||
NewOpts &= ~(3U << static_cast<u32>(OptionBit::FillContents0of2));
|
|
||||||
NewOpts |= static_cast<u32>(FillContents)
|
|
||||||
<< static_cast<u32>(OptionBit::FillContents0of2);
|
|
||||||
} while (!atomic_compare_exchange_strong(&Val, &Opts, NewOpts,
|
|
||||||
memory_order_relaxed));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_OPTIONS_H_
|
|
104
Telegram/ThirdParty/scudo/platform.h
vendored
104
Telegram/ThirdParty/scudo/platform.h
vendored
|
@ -1,104 +0,0 @@
|
||||||
//===-- platform.h ----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_PLATFORM_H_
|
|
||||||
#define SCUDO_PLATFORM_H_
|
|
||||||
|
|
||||||
// Transitive includes of stdint.h specify some of the defines checked below.
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
#if defined(__linux__) && !defined(__TRUSTY__)
|
|
||||||
#define SCUDO_LINUX 1
|
|
||||||
#else
|
|
||||||
#define SCUDO_LINUX 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// See https://android.googlesource.com/platform/bionic/+/master/docs/defines.md
|
|
||||||
#if defined(__BIONIC__)
|
|
||||||
#define SCUDO_ANDROID 1
|
|
||||||
#else
|
|
||||||
#define SCUDO_ANDROID 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__Fuchsia__)
|
|
||||||
#define SCUDO_FUCHSIA 1
|
|
||||||
#else
|
|
||||||
#define SCUDO_FUCHSIA 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__TRUSTY__)
|
|
||||||
#define SCUDO_TRUSTY 1
|
|
||||||
#else
|
|
||||||
#define SCUDO_TRUSTY 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__riscv) && (__riscv_xlen == 64)
|
|
||||||
#define SCUDO_RISCV64 1
|
|
||||||
#else
|
|
||||||
#define SCUDO_RISCV64 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__LP64__)
|
|
||||||
#define SCUDO_WORDSIZE 64U
|
|
||||||
#else
|
|
||||||
#define SCUDO_WORDSIZE 32U
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SCUDO_WORDSIZE == 64U
|
|
||||||
#define FIRST_32_SECOND_64(a, b) (b)
|
|
||||||
#else
|
|
||||||
#define FIRST_32_SECOND_64(a, b) (a)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef SCUDO_CAN_USE_PRIMARY64
|
|
||||||
#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef SCUDO_CAN_USE_MTE
|
|
||||||
#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Use smaller table sizes for fuzzing in order to reduce input size.
|
|
||||||
// Trusty just has less available memory.
|
|
||||||
#ifndef SCUDO_SMALL_STACK_DEPOT
|
|
||||||
#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
|
|
||||||
#define SCUDO_SMALL_STACK_DEPOT 1
|
|
||||||
#else
|
|
||||||
#define SCUDO_SMALL_STACK_DEPOT 0
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef SCUDO_ENABLE_HOOKS
|
|
||||||
#define SCUDO_ENABLE_HOOKS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef SCUDO_MIN_ALIGNMENT_LOG
|
|
||||||
// We force malloc-type functions to be aligned to std::max_align_t, but there
|
|
||||||
// is no reason why the minimum alignment for all other functions can't be 8
|
|
||||||
// bytes. Except obviously for applications making incorrect assumptions.
|
|
||||||
// TODO(kostyak): define SCUDO_MIN_ALIGNMENT_LOG 3
|
|
||||||
#define SCUDO_MIN_ALIGNMENT_LOG FIRST_32_SECOND_64(3, 4)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__aarch64__)
|
|
||||||
#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
|
|
||||||
#else
|
|
||||||
#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Older gcc have issues aligning to a constexpr, and require an integer.
|
|
||||||
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
|
|
||||||
#if defined(__powerpc__) || defined(__powerpc64__)
|
|
||||||
#define SCUDO_CACHE_LINE_SIZE 128
|
|
||||||
#else
|
|
||||||
#define SCUDO_CACHE_LINE_SIZE 64
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define SCUDO_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
|
|
||||||
|
|
||||||
#endif // SCUDO_PLATFORM_H_
|
|
1170
Telegram/ThirdParty/scudo/primary32.h
vendored
1170
Telegram/ThirdParty/scudo/primary32.h
vendored
File diff suppressed because it is too large
Load diff
1688
Telegram/ThirdParty/scudo/primary64.h
vendored
1688
Telegram/ThirdParty/scudo/primary64.h
vendored
File diff suppressed because it is too large
Load diff
309
Telegram/ThirdParty/scudo/quarantine.h
vendored
309
Telegram/ThirdParty/scudo/quarantine.h
vendored
|
@ -1,309 +0,0 @@
|
||||||
//===-- quarantine.h --------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_QUARANTINE_H_
|
|
||||||
#define SCUDO_QUARANTINE_H_
|
|
||||||
|
|
||||||
#include "list.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
struct QuarantineBatch {
|
|
||||||
// With the following count, a batch (and the header that protects it) occupy
|
|
||||||
// 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
|
|
||||||
static const u32 MaxCount = 1019;
|
|
||||||
QuarantineBatch *Next;
|
|
||||||
uptr Size;
|
|
||||||
u32 Count;
|
|
||||||
void *Batch[MaxCount];
|
|
||||||
|
|
||||||
void init(void *Ptr, uptr Size) {
|
|
||||||
Count = 1;
|
|
||||||
Batch[0] = Ptr;
|
|
||||||
this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The total size of quarantined nodes recorded in this batch.
|
|
||||||
uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
|
|
||||||
|
|
||||||
void push_back(void *Ptr, uptr Size) {
|
|
||||||
DCHECK_LT(Count, MaxCount);
|
|
||||||
Batch[Count++] = Ptr;
|
|
||||||
this->Size += Size;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool canMerge(const QuarantineBatch *const From) const {
|
|
||||||
return Count + From->Count <= MaxCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
void merge(QuarantineBatch *const From) {
|
|
||||||
DCHECK_LE(Count + From->Count, MaxCount);
|
|
||||||
DCHECK_GE(Size, sizeof(QuarantineBatch));
|
|
||||||
|
|
||||||
for (uptr I = 0; I < From->Count; ++I)
|
|
||||||
Batch[Count + I] = From->Batch[I];
|
|
||||||
Count += From->Count;
|
|
||||||
Size += From->getQuarantinedSize();
|
|
||||||
|
|
||||||
From->Count = 0;
|
|
||||||
From->Size = sizeof(QuarantineBatch);
|
|
||||||
}
|
|
||||||
|
|
||||||
void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
|
|
||||||
};
|
|
||||||
|
|
||||||
static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
|
|
||||||
|
|
||||||
// Per-thread cache of memory blocks.
|
|
||||||
template <typename Callback> class QuarantineCache {
|
|
||||||
public:
|
|
||||||
void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
|
|
||||||
|
|
||||||
// Total memory used, including internal accounting.
|
|
||||||
uptr getSize() const { return atomic_load_relaxed(&Size); }
|
|
||||||
// Memory used for internal accounting.
|
|
||||||
uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
|
|
||||||
|
|
||||||
void enqueue(Callback Cb, void *Ptr, uptr Size) {
|
|
||||||
if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
|
|
||||||
QuarantineBatch *B =
|
|
||||||
reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
|
|
||||||
DCHECK(B);
|
|
||||||
B->init(Ptr, Size);
|
|
||||||
enqueueBatch(B);
|
|
||||||
} else {
|
|
||||||
List.back()->push_back(Ptr, Size);
|
|
||||||
addToSize(Size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void transfer(QuarantineCache *From) {
|
|
||||||
List.append_back(&From->List);
|
|
||||||
addToSize(From->getSize());
|
|
||||||
atomic_store_relaxed(&From->Size, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void enqueueBatch(QuarantineBatch *B) {
|
|
||||||
List.push_back(B);
|
|
||||||
addToSize(B->Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
QuarantineBatch *dequeueBatch() {
|
|
||||||
if (List.empty())
|
|
||||||
return nullptr;
|
|
||||||
QuarantineBatch *B = List.front();
|
|
||||||
List.pop_front();
|
|
||||||
subFromSize(B->Size);
|
|
||||||
return B;
|
|
||||||
}
|
|
||||||
|
|
||||||
void mergeBatches(QuarantineCache *ToDeallocate) {
|
|
||||||
uptr ExtractedSize = 0;
|
|
||||||
QuarantineBatch *Current = List.front();
|
|
||||||
while (Current && Current->Next) {
|
|
||||||
if (Current->canMerge(Current->Next)) {
|
|
||||||
QuarantineBatch *Extracted = Current->Next;
|
|
||||||
// Move all the chunks into the current batch.
|
|
||||||
Current->merge(Extracted);
|
|
||||||
DCHECK_EQ(Extracted->Count, 0);
|
|
||||||
DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
|
|
||||||
// Remove the next batch From the list and account for its Size.
|
|
||||||
List.extract(Current, Extracted);
|
|
||||||
ExtractedSize += Extracted->Size;
|
|
||||||
// Add it to deallocation list.
|
|
||||||
ToDeallocate->enqueueBatch(Extracted);
|
|
||||||
} else {
|
|
||||||
Current = Current->Next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
subFromSize(ExtractedSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
void getStats(ScopedString *Str) const {
|
|
||||||
uptr BatchCount = 0;
|
|
||||||
uptr TotalOverheadBytes = 0;
|
|
||||||
uptr TotalBytes = 0;
|
|
||||||
uptr TotalQuarantineChunks = 0;
|
|
||||||
for (const QuarantineBatch &Batch : List) {
|
|
||||||
BatchCount++;
|
|
||||||
TotalBytes += Batch.Size;
|
|
||||||
TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
|
|
||||||
TotalQuarantineChunks += Batch.Count;
|
|
||||||
}
|
|
||||||
const uptr QuarantineChunksCapacity =
|
|
||||||
BatchCount * QuarantineBatch::MaxCount;
|
|
||||||
const uptr ChunksUsagePercent =
|
|
||||||
(QuarantineChunksCapacity == 0)
|
|
||||||
? 0
|
|
||||||
: TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
|
|
||||||
const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
|
|
||||||
const uptr MemoryOverheadPercent =
|
|
||||||
(TotalQuarantinedBytes == 0)
|
|
||||||
? 0
|
|
||||||
: TotalOverheadBytes * 100 / TotalQuarantinedBytes;
|
|
||||||
Str->append(
|
|
||||||
"Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
|
|
||||||
"(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
|
|
||||||
BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
|
|
||||||
QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
SinglyLinkedList<QuarantineBatch> List;
|
|
||||||
atomic_uptr Size = {};
|
|
||||||
|
|
||||||
void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
|
|
||||||
void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
|
|
||||||
};
|
|
||||||
|
|
||||||
// The callback interface is:
|
|
||||||
// void Callback::recycle(Node *Ptr);
|
|
||||||
// void *Callback::allocate(uptr Size);
|
|
||||||
// void Callback::deallocate(void *Ptr);
|
|
||||||
template <typename Callback, typename Node> class GlobalQuarantine {
|
|
||||||
public:
|
|
||||||
typedef QuarantineCache<Callback> CacheT;
|
|
||||||
using ThisT = GlobalQuarantine<Callback, Node>;
|
|
||||||
|
|
||||||
void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
|
|
||||||
DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
|
|
||||||
DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
|
|
||||||
DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
|
|
||||||
// Thread local quarantine size can be zero only when global quarantine size
|
|
||||||
// is zero (it allows us to perform just one atomic read per put() call).
|
|
||||||
CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
|
|
||||||
|
|
||||||
atomic_store_relaxed(&MaxSize, Size);
|
|
||||||
atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
|
|
||||||
atomic_store_relaxed(&MaxCacheSize, CacheSize);
|
|
||||||
|
|
||||||
Cache.init();
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
|
|
||||||
uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
|
|
||||||
|
|
||||||
// This is supposed to be used in test only.
|
|
||||||
bool isEmpty() {
|
|
||||||
ScopedLock L(CacheMutex);
|
|
||||||
return Cache.getSize() == 0U;
|
|
||||||
}
|
|
||||||
|
|
||||||
void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
|
|
||||||
C->enqueue(Cb, Ptr, Size);
|
|
||||||
if (C->getSize() > getCacheSize())
|
|
||||||
drain(C, Cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
|
|
||||||
bool needRecycle = false;
|
|
||||||
{
|
|
||||||
ScopedLock L(CacheMutex);
|
|
||||||
Cache.transfer(C);
|
|
||||||
needRecycle = Cache.getSize() > getMaxSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (needRecycle && RecycleMutex.tryLock())
|
|
||||||
recycle(atomic_load_relaxed(&MinSize), Cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
|
|
||||||
{
|
|
||||||
ScopedLock L(CacheMutex);
|
|
||||||
Cache.transfer(C);
|
|
||||||
}
|
|
||||||
RecycleMutex.lock();
|
|
||||||
recycle(0, Cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {
|
|
||||||
ScopedLock L(CacheMutex);
|
|
||||||
// It assumes that the world is stopped, just as the allocator's printStats.
|
|
||||||
Cache.getStats(Str);
|
|
||||||
Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
|
|
||||||
getMaxSize() >> 10, getCacheSize() >> 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
void disable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
// RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
|
|
||||||
RecycleMutex.lock();
|
|
||||||
CacheMutex.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void enable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
CacheMutex.unlock();
|
|
||||||
RecycleMutex.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
// Read-only data.
|
|
||||||
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
|
|
||||||
CacheT Cache GUARDED_BY(CacheMutex);
|
|
||||||
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
|
|
||||||
atomic_uptr MinSize = {};
|
|
||||||
atomic_uptr MaxSize = {};
|
|
||||||
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
|
|
||||||
|
|
||||||
void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)
|
|
||||||
EXCLUDES(CacheMutex) {
|
|
||||||
CacheT Tmp;
|
|
||||||
Tmp.init();
|
|
||||||
{
|
|
||||||
ScopedLock L(CacheMutex);
|
|
||||||
// Go over the batches and merge partially filled ones to
|
|
||||||
// save some memory, otherwise batches themselves (since the memory used
|
|
||||||
// by them is counted against quarantine limit) can overcome the actual
|
|
||||||
// user's quarantined chunks, which diminishes the purpose of the
|
|
||||||
// quarantine.
|
|
||||||
const uptr CacheSize = Cache.getSize();
|
|
||||||
const uptr OverheadSize = Cache.getOverheadSize();
|
|
||||||
DCHECK_GE(CacheSize, OverheadSize);
|
|
||||||
// Do the merge only when overhead exceeds this predefined limit (might
|
|
||||||
// require some tuning). It saves us merge attempt when the batch list
|
|
||||||
// quarantine is unlikely to contain batches suitable for merge.
|
|
||||||
constexpr uptr OverheadThresholdPercents = 100;
|
|
||||||
if (CacheSize > OverheadSize &&
|
|
||||||
OverheadSize * (100 + OverheadThresholdPercents) >
|
|
||||||
CacheSize * OverheadThresholdPercents) {
|
|
||||||
Cache.mergeBatches(&Tmp);
|
|
||||||
}
|
|
||||||
// Extract enough chunks from the quarantine to get below the max
|
|
||||||
// quarantine size and leave some leeway for the newly quarantined chunks.
|
|
||||||
while (Cache.getSize() > MinSize)
|
|
||||||
Tmp.enqueueBatch(Cache.dequeueBatch());
|
|
||||||
}
|
|
||||||
RecycleMutex.unlock();
|
|
||||||
doRecycle(&Tmp, Cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NOINLINE doRecycle(CacheT *C, Callback Cb) {
|
|
||||||
while (QuarantineBatch *B = C->dequeueBatch()) {
|
|
||||||
const u32 Seed = static_cast<u32>(
|
|
||||||
(reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
|
|
||||||
B->shuffle(Seed);
|
|
||||||
constexpr uptr NumberOfPrefetch = 8UL;
|
|
||||||
CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
|
|
||||||
for (uptr I = 0; I < NumberOfPrefetch; I++)
|
|
||||||
PREFETCH(B->Batch[I]);
|
|
||||||
for (uptr I = 0, Count = B->Count; I < Count; I++) {
|
|
||||||
if (I + NumberOfPrefetch < Count)
|
|
||||||
PREFETCH(B->Batch[I + NumberOfPrefetch]);
|
|
||||||
Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
|
|
||||||
}
|
|
||||||
Cb.deallocate(B);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_QUARANTINE_H_
|
|
17
Telegram/ThirdParty/scudo/release.cpp
vendored
17
Telegram/ThirdParty/scudo/release.cpp
vendored
|
@ -1,17 +0,0 @@
|
||||||
//===-- release.cpp ---------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "release.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
BufferPool<RegionPageMap::StaticBufferCount,
|
|
||||||
RegionPageMap::StaticBufferNumElements>
|
|
||||||
RegionPageMap::Buffers;
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
701
Telegram/ThirdParty/scudo/release.h
vendored
701
Telegram/ThirdParty/scudo/release.h
vendored
|
@ -1,701 +0,0 @@
|
||||||
//===-- release.h -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_RELEASE_H_
|
|
||||||
#define SCUDO_RELEASE_H_
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "list.h"
|
|
||||||
#include "mem_map.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <typename MemMapT> class RegionReleaseRecorder {
|
|
||||||
public:
|
|
||||||
RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0)
|
|
||||||
: RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {}
|
|
||||||
|
|
||||||
uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
|
|
||||||
|
|
||||||
uptr getReleasedBytes() const { return ReleasedBytes; }
|
|
||||||
|
|
||||||
uptr getBase() const { return Base; }
|
|
||||||
|
|
||||||
// Releases [From, To) range of pages back to OS. Note that `From` and `To`
|
|
||||||
// are offseted from `Base` + Offset.
|
|
||||||
void releasePageRangeToOS(uptr From, uptr To) {
|
|
||||||
const uptr Size = To - From;
|
|
||||||
RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size);
|
|
||||||
ReleasedRangesCount++;
|
|
||||||
ReleasedBytes += Size;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr ReleasedRangesCount = 0;
|
|
||||||
uptr ReleasedBytes = 0;
|
|
||||||
MemMapT *RegionMemMap = nullptr;
|
|
||||||
uptr Base = 0;
|
|
||||||
// The release offset from Base. This is used when we know a given range after
|
|
||||||
// Base will not be released.
|
|
||||||
uptr Offset = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ReleaseRecorder {
|
|
||||||
public:
|
|
||||||
ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)
|
|
||||||
: Base(Base), Offset(Offset), Data(Data) {}
|
|
||||||
|
|
||||||
uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
|
|
||||||
|
|
||||||
uptr getReleasedBytes() const { return ReleasedBytes; }
|
|
||||||
|
|
||||||
uptr getBase() const { return Base; }
|
|
||||||
|
|
||||||
// Releases [From, To) range of pages back to OS.
|
|
||||||
void releasePageRangeToOS(uptr From, uptr To) {
|
|
||||||
const uptr Size = To - From;
|
|
||||||
releasePagesToOS(Base, From + Offset, Size, Data);
|
|
||||||
ReleasedRangesCount++;
|
|
||||||
ReleasedBytes += Size;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr ReleasedRangesCount = 0;
|
|
||||||
uptr ReleasedBytes = 0;
|
|
||||||
// The starting address to release. Note that we may want to combine (Base +
|
|
||||||
// Offset) as a new Base. However, the Base is retrieved from
|
|
||||||
// `MapPlatformData` on Fuchsia, which means the offset won't be aware.
|
|
||||||
// Therefore, store them separately to make it work on all the platforms.
|
|
||||||
uptr Base = 0;
|
|
||||||
// The release offset from Base. This is used when we know a given range after
|
|
||||||
// Base will not be released.
|
|
||||||
uptr Offset = 0;
|
|
||||||
MapPlatformData *Data = nullptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
class FragmentationRecorder {
|
|
||||||
public:
|
|
||||||
FragmentationRecorder() = default;
|
|
||||||
|
|
||||||
uptr getReleasedPagesCount() const { return ReleasedPagesCount; }
|
|
||||||
|
|
||||||
void releasePageRangeToOS(uptr From, uptr To) {
|
|
||||||
DCHECK_EQ((To - From) % getPageSizeCached(), 0U);
|
|
||||||
ReleasedPagesCount += (To - From) / getPageSizeCached();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr ReleasedPagesCount = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
// A buffer pool which holds a fixed number of static buffers of `uptr` elements
|
|
||||||
// for fast buffer allocation. If the request size is greater than
|
|
||||||
// `StaticBufferNumElements` or if all the static buffers are in use, it'll
|
|
||||||
// delegate the allocation to map().
|
|
||||||
template <uptr StaticBufferCount, uptr StaticBufferNumElements>
|
|
||||||
class BufferPool {
|
|
||||||
public:
|
|
||||||
// Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
|
|
||||||
// extracting the least significant bit from the `Mask`.
|
|
||||||
static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
|
|
||||||
static_assert(isAligned(StaticBufferNumElements * sizeof(uptr),
|
|
||||||
SCUDO_CACHE_LINE_SIZE),
|
|
||||||
"");
|
|
||||||
|
|
||||||
struct Buffer {
|
|
||||||
// Pointer to the buffer's memory, or nullptr if no buffer was allocated.
|
|
||||||
uptr *Data = nullptr;
|
|
||||||
|
|
||||||
// The index of the underlying static buffer, or StaticBufferCount if this
|
|
||||||
// buffer was dynamically allocated. This value is initially set to a poison
|
|
||||||
// value to aid debugging.
|
|
||||||
uptr BufferIndex = ~static_cast<uptr>(0);
|
|
||||||
|
|
||||||
// Only valid if BufferIndex == StaticBufferCount.
|
|
||||||
MemMapT MemMap = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
// Return a zero-initialized buffer which can contain at least the given
|
|
||||||
// number of elements, or nullptr on failure.
|
|
||||||
Buffer getBuffer(const uptr NumElements) {
|
|
||||||
if (UNLIKELY(NumElements > StaticBufferNumElements))
|
|
||||||
return getDynamicBuffer(NumElements);
|
|
||||||
|
|
||||||
uptr index;
|
|
||||||
{
|
|
||||||
// TODO: In general, we expect this operation should be fast so the
|
|
||||||
// waiting thread won't be put into sleep. The HybridMutex does implement
|
|
||||||
// the busy-waiting but we may want to review the performance and see if
|
|
||||||
// we need an explict spin lock here.
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
index = getLeastSignificantSetBitIndex(Mask);
|
|
||||||
if (index < StaticBufferCount)
|
|
||||||
Mask ^= static_cast<uptr>(1) << index;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (index >= StaticBufferCount)
|
|
||||||
return getDynamicBuffer(NumElements);
|
|
||||||
|
|
||||||
Buffer Buf;
|
|
||||||
Buf.Data = &RawBuffer[index * StaticBufferNumElements];
|
|
||||||
Buf.BufferIndex = index;
|
|
||||||
memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr));
|
|
||||||
return Buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
void releaseBuffer(Buffer Buf) {
|
|
||||||
DCHECK_NE(Buf.Data, nullptr);
|
|
||||||
DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
|
|
||||||
if (Buf.BufferIndex != StaticBufferCount) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);
|
|
||||||
Mask |= static_cast<uptr>(1) << Buf.BufferIndex;
|
|
||||||
} else {
|
|
||||||
Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isStaticBufferTestOnly(const Buffer &Buf) {
|
|
||||||
DCHECK_NE(Buf.Data, nullptr);
|
|
||||||
DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
|
|
||||||
return Buf.BufferIndex != StaticBufferCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Buffer getDynamicBuffer(const uptr NumElements) {
|
|
||||||
// When using a heap-based buffer, precommit the pages backing the
|
|
||||||
// Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
|
|
||||||
// where page fault exceptions are skipped as the allocated memory
|
|
||||||
// is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
|
|
||||||
// performance benefit on other platforms.
|
|
||||||
const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
|
|
||||||
const uptr MappedSize =
|
|
||||||
roundUp(NumElements * sizeof(uptr), getPageSizeCached());
|
|
||||||
Buffer Buf;
|
|
||||||
if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) {
|
|
||||||
Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase());
|
|
||||||
Buf.BufferIndex = StaticBufferCount;
|
|
||||||
}
|
|
||||||
return Buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
HybridMutex Mutex;
|
|
||||||
// '1' means that buffer index is not used. '0' means the buffer is in use.
|
|
||||||
uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
|
|
||||||
uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
// A Region page map is used to record the usage of pages in the regions. It
|
|
||||||
// implements a packed array of Counters. Each counter occupies 2^N bits, enough
|
|
||||||
// to store counter's MaxValue. Ctor will try to use a static buffer first, and
|
|
||||||
// if that fails (the buffer is too small or already locked), will allocate the
|
|
||||||
// required Buffer via map(). The caller is expected to check whether the
|
|
||||||
// initialization was successful by checking isAllocated() result. For
|
|
||||||
// performance sake, none of the accessors check the validity of the arguments,
|
|
||||||
// It is assumed that Index is always in [0, N) range and the value is not
|
|
||||||
// incremented past MaxValue.
|
|
||||||
class RegionPageMap {
|
|
||||||
public:
|
|
||||||
RegionPageMap()
|
|
||||||
: Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
|
|
||||||
PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
|
|
||||||
BufferNumElements(0) {}
|
|
||||||
RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
|
|
||||||
reset(NumberOfRegions, CountersPerRegion, MaxValue);
|
|
||||||
}
|
|
||||||
~RegionPageMap() {
|
|
||||||
if (!isAllocated())
|
|
||||||
return;
|
|
||||||
Buffers.releaseBuffer(Buffer);
|
|
||||||
Buffer = {};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
|
|
||||||
// specify the thread-safety attribute properly in current code structure.
|
|
||||||
// Besides, it's the only place we may want to check thread safety. Therefore,
|
|
||||||
// it's fine to bypass the thread-safety analysis now.
|
|
||||||
void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
|
|
||||||
DCHECK_GT(NumberOfRegion, 0);
|
|
||||||
DCHECK_GT(CountersPerRegion, 0);
|
|
||||||
DCHECK_GT(MaxValue, 0);
|
|
||||||
|
|
||||||
Regions = NumberOfRegion;
|
|
||||||
NumCounters = CountersPerRegion;
|
|
||||||
|
|
||||||
constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL;
|
|
||||||
// Rounding counter storage size up to the power of two allows for using
|
|
||||||
// bit shifts calculating particular counter's Index and offset.
|
|
||||||
const uptr CounterSizeBits =
|
|
||||||
roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
|
|
||||||
DCHECK_LE(CounterSizeBits, MaxCounterBits);
|
|
||||||
CounterSizeBitsLog = getLog2(CounterSizeBits);
|
|
||||||
CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
|
|
||||||
|
|
||||||
const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
|
|
||||||
DCHECK_GT(PackingRatio, 0);
|
|
||||||
PackingRatioLog = getLog2(PackingRatio);
|
|
||||||
BitOffsetMask = PackingRatio - 1;
|
|
||||||
|
|
||||||
SizePerRegion =
|
|
||||||
roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
|
|
||||||
PackingRatioLog;
|
|
||||||
BufferNumElements = SizePerRegion * Regions;
|
|
||||||
Buffer = Buffers.getBuffer(BufferNumElements);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isAllocated() const { return Buffer.Data != nullptr; }
|
|
||||||
|
|
||||||
uptr getCount() const { return NumCounters; }
|
|
||||||
|
|
||||||
uptr get(uptr Region, uptr I) const {
|
|
||||||
DCHECK_LT(Region, Regions);
|
|
||||||
DCHECK_LT(I, NumCounters);
|
|
||||||
const uptr Index = I >> PackingRatioLog;
|
|
||||||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
|
||||||
return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) &
|
|
||||||
CounterMask;
|
|
||||||
}
|
|
||||||
|
|
||||||
void inc(uptr Region, uptr I) const {
|
|
||||||
DCHECK_LT(get(Region, I), CounterMask);
|
|
||||||
const uptr Index = I >> PackingRatioLog;
|
|
||||||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
|
||||||
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
|
|
||||||
DCHECK_EQ(isAllCounted(Region, I), false);
|
|
||||||
Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
|
|
||||||
<< BitOffset;
|
|
||||||
}
|
|
||||||
|
|
||||||
void incN(uptr Region, uptr I, uptr N) const {
|
|
||||||
DCHECK_GT(N, 0U);
|
|
||||||
DCHECK_LE(N, CounterMask);
|
|
||||||
DCHECK_LE(get(Region, I), CounterMask - N);
|
|
||||||
const uptr Index = I >> PackingRatioLog;
|
|
||||||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
|
||||||
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
|
|
||||||
DCHECK_EQ(isAllCounted(Region, I), false);
|
|
||||||
Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset;
|
|
||||||
}
|
|
||||||
|
|
||||||
void incRange(uptr Region, uptr From, uptr To) const {
|
|
||||||
DCHECK_LE(From, To);
|
|
||||||
const uptr Top = Min(To + 1, NumCounters);
|
|
||||||
for (uptr I = From; I < Top; I++)
|
|
||||||
inc(Region, I);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the counter to the max value. Note that the max number of blocks in a
|
|
||||||
// page may vary. To provide an easier way to tell if all the blocks are
|
|
||||||
// counted for different pages, set to the same max value to denote the
|
|
||||||
// all-counted status.
|
|
||||||
void setAsAllCounted(uptr Region, uptr I) const {
|
|
||||||
DCHECK_LE(get(Region, I), CounterMask);
|
|
||||||
const uptr Index = I >> PackingRatioLog;
|
|
||||||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
|
||||||
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
|
|
||||||
Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
|
|
||||||
}
|
|
||||||
void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
|
|
||||||
DCHECK_LE(From, To);
|
|
||||||
const uptr Top = Min(To + 1, NumCounters);
|
|
||||||
for (uptr I = From; I < Top; I++)
|
|
||||||
setAsAllCounted(Region, I);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) {
|
|
||||||
const uptr Count = get(Region, I);
|
|
||||||
if (Count == CounterMask)
|
|
||||||
return true;
|
|
||||||
if (Count == MaxCount) {
|
|
||||||
setAsAllCounted(Region, I);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
bool isAllCounted(uptr Region, uptr I) const {
|
|
||||||
return get(Region, I) == CounterMask;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr getBufferNumElements() const { return BufferNumElements; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
// We may consider making this configurable if there are cases which may
|
|
||||||
// benefit from this.
|
|
||||||
static const uptr StaticBufferCount = 2U;
|
|
||||||
static const uptr StaticBufferNumElements = 512U;
|
|
||||||
using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>;
|
|
||||||
static BufferPoolT Buffers;
|
|
||||||
|
|
||||||
uptr Regions;
|
|
||||||
uptr NumCounters;
|
|
||||||
uptr CounterSizeBitsLog;
|
|
||||||
uptr CounterMask;
|
|
||||||
uptr PackingRatioLog;
|
|
||||||
uptr BitOffsetMask;
|
|
||||||
|
|
||||||
uptr SizePerRegion;
|
|
||||||
uptr BufferNumElements;
|
|
||||||
BufferPoolT::Buffer Buffer;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class ReleaseRecorderT> class FreePagesRangeTracker {
|
|
||||||
public:
|
|
||||||
explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
|
|
||||||
: Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
|
|
||||||
|
|
||||||
void processNextPage(bool Released) {
|
|
||||||
if (Released) {
|
|
||||||
if (!InRange) {
|
|
||||||
CurrentRangeStatePage = CurrentPage;
|
|
||||||
InRange = true;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
closeOpenedRange();
|
|
||||||
}
|
|
||||||
CurrentPage++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void skipPages(uptr N) {
|
|
||||||
closeOpenedRange();
|
|
||||||
CurrentPage += N;
|
|
||||||
}
|
|
||||||
|
|
||||||
void finish() { closeOpenedRange(); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
void closeOpenedRange() {
|
|
||||||
if (InRange) {
|
|
||||||
Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
|
|
||||||
(CurrentPage << PageSizeLog));
|
|
||||||
InRange = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ReleaseRecorderT &Recorder;
|
|
||||||
const uptr PageSizeLog;
|
|
||||||
bool InRange = false;
|
|
||||||
uptr CurrentPage = 0;
|
|
||||||
uptr CurrentRangeStatePage = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct PageReleaseContext {
|
|
||||||
PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,
|
|
||||||
uptr ReleaseOffset = 0)
|
|
||||||
: BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {
|
|
||||||
PageSize = getPageSizeCached();
|
|
||||||
if (BlockSize <= PageSize) {
|
|
||||||
if (PageSize % BlockSize == 0) {
|
|
||||||
// Same number of chunks per page, no cross overs.
|
|
||||||
FullPagesBlockCountMax = PageSize / BlockSize;
|
|
||||||
SameBlockCountPerPage = true;
|
|
||||||
} else if (BlockSize % (PageSize % BlockSize) == 0) {
|
|
||||||
// Some chunks are crossing page boundaries, which means that the page
|
|
||||||
// contains one or two partial chunks, but all pages contain the same
|
|
||||||
// number of chunks.
|
|
||||||
FullPagesBlockCountMax = PageSize / BlockSize + 1;
|
|
||||||
SameBlockCountPerPage = true;
|
|
||||||
} else {
|
|
||||||
// Some chunks are crossing page boundaries, which means that the page
|
|
||||||
// contains one or two partial chunks.
|
|
||||||
FullPagesBlockCountMax = PageSize / BlockSize + 2;
|
|
||||||
SameBlockCountPerPage = false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (BlockSize % PageSize == 0) {
|
|
||||||
// One chunk covers multiple pages, no cross overs.
|
|
||||||
FullPagesBlockCountMax = 1;
|
|
||||||
SameBlockCountPerPage = true;
|
|
||||||
} else {
|
|
||||||
// One chunk covers multiple pages, Some chunks are crossing page
|
|
||||||
// boundaries. Some pages contain one chunk, some contain two.
|
|
||||||
FullPagesBlockCountMax = 2;
|
|
||||||
SameBlockCountPerPage = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: For multiple regions, it's more complicated to support partial
|
|
||||||
// region marking (which includes the complexity of how to handle the last
|
|
||||||
// block in a region). We may consider this after markFreeBlocks() accepts
|
|
||||||
// only free blocks from the same region.
|
|
||||||
if (NumberOfRegions != 1)
|
|
||||||
DCHECK_EQ(ReleaseOffset, 0U);
|
|
||||||
|
|
||||||
PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
|
|
||||||
PageSizeLog = getLog2(PageSize);
|
|
||||||
ReleasePageOffset = ReleaseOffset >> PageSizeLog;
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageMap is lazily allocated when markFreeBlocks() is invoked.
|
|
||||||
bool hasBlockMarked() const {
|
|
||||||
return PageMap.isAllocated();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ensurePageMapAllocated() {
|
|
||||||
if (PageMap.isAllocated())
|
|
||||||
return true;
|
|
||||||
PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
|
|
||||||
// TODO: Log some message when we fail on PageMap allocation.
|
|
||||||
return PageMap.isAllocated();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark all the blocks in the given range [From, to). Instead of visiting all
|
|
||||||
// the blocks, we will just mark the page as all counted. Note the `From` and
|
|
||||||
// `To` has to be page aligned but with one exception, if `To` is equal to the
|
|
||||||
// RegionSize, it's not necessary to be aligned with page size.
|
|
||||||
bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,
|
|
||||||
const uptr RegionIndex, const uptr RegionSize) {
|
|
||||||
DCHECK_LT(From, To);
|
|
||||||
DCHECK_LE(To, Base + RegionSize);
|
|
||||||
DCHECK_EQ(From % PageSize, 0U);
|
|
||||||
DCHECK_LE(To - From, RegionSize);
|
|
||||||
|
|
||||||
if (!ensurePageMapAllocated())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
uptr FromInRegion = From - Base;
|
|
||||||
uptr ToInRegion = To - Base;
|
|
||||||
uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);
|
|
||||||
|
|
||||||
// The straddling block sits across entire range.
|
|
||||||
if (FirstBlockInRange >= ToInRegion)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
// First block may not sit at the first pape in the range, move
|
|
||||||
// `FromInRegion` to the first block page.
|
|
||||||
FromInRegion = roundDown(FirstBlockInRange, PageSize);
|
|
||||||
|
|
||||||
// When The first block is not aligned to the range boundary, which means
|
|
||||||
// there is a block sitting acorss `From`, that looks like,
|
|
||||||
//
|
|
||||||
// From To
|
|
||||||
// V V
|
|
||||||
// +-----------------------------------------------+
|
|
||||||
// +-----+-----+-----+-----+
|
|
||||||
// | | | | | ...
|
|
||||||
// +-----+-----+-----+-----+
|
|
||||||
// |- first page -||- second page -||- ...
|
|
||||||
//
|
|
||||||
// Therefore, we can't just mark the first page as all counted. Instead, we
|
|
||||||
// increment the number of blocks in the first page in the page map and
|
|
||||||
// then round up the `From` to the next page.
|
|
||||||
if (FirstBlockInRange != FromInRegion) {
|
|
||||||
DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange);
|
|
||||||
uptr NumBlocksInFirstPage =
|
|
||||||
(FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
|
|
||||||
BlockSize;
|
|
||||||
PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
|
|
||||||
NumBlocksInFirstPage);
|
|
||||||
FromInRegion = roundUp(FromInRegion + 1, PageSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);
|
|
||||||
|
|
||||||
// Note that LastBlockInRange may be smaller than `FromInRegion` at this
|
|
||||||
// point because it may contain only one block in the range.
|
|
||||||
|
|
||||||
// When the last block sits across `To`, we can't just mark the pages
|
|
||||||
// occupied by the last block as all counted. Instead, we increment the
|
|
||||||
// counters of those pages by 1. The exception is that if it's the last
|
|
||||||
// block in the region, it's fine to mark those pages as all counted.
|
|
||||||
if (LastBlockInRange + BlockSize != RegionSize) {
|
|
||||||
DCHECK_EQ(ToInRegion % PageSize, 0U);
|
|
||||||
// The case below is like,
|
|
||||||
//
|
|
||||||
// From To
|
|
||||||
// V V
|
|
||||||
// +----------------------------------------+
|
|
||||||
// +-----+-----+-----+-----+
|
|
||||||
// | | | | | ...
|
|
||||||
// +-----+-----+-----+-----+
|
|
||||||
// ... -||- last page -||- next page -|
|
|
||||||
//
|
|
||||||
// The last block is not aligned to `To`, we need to increment the
|
|
||||||
// counter of `next page` by 1.
|
|
||||||
if (LastBlockInRange + BlockSize != ToInRegion) {
|
|
||||||
PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
|
|
||||||
getPageIndex(LastBlockInRange + BlockSize - 1));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ToInRegion = RegionSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
// After handling the first page and the last block, it's safe to mark any
|
|
||||||
// page in between the range [From, To).
|
|
||||||
if (FromInRegion < ToInRegion) {
|
|
||||||
PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
|
|
||||||
getPageIndex(ToInRegion - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class TransferBatchT, typename DecompactPtrT>
|
|
||||||
bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
|
|
||||||
DecompactPtrT DecompactPtr, const uptr Base,
|
|
||||||
const uptr RegionIndex, const uptr RegionSize,
|
|
||||||
bool MayContainLastBlockInRegion) {
|
|
||||||
if (!ensurePageMapAllocated())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (MayContainLastBlockInRegion) {
|
|
||||||
const uptr LastBlockInRegion =
|
|
||||||
((RegionSize / BlockSize) - 1U) * BlockSize;
|
|
||||||
// The last block in a region may not use the entire page, we mark the
|
|
||||||
// following "pretend" memory block(s) as free in advance.
|
|
||||||
//
|
|
||||||
// Region Boundary
|
|
||||||
// v
|
|
||||||
// -----+-----------------------+
|
|
||||||
// | Last Page | <- Rounded Region Boundary
|
|
||||||
// -----+-----------------------+
|
|
||||||
// |-----||- trailing blocks -|
|
|
||||||
// ^
|
|
||||||
// last block
|
|
||||||
const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
|
|
||||||
const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;
|
|
||||||
// If the difference between `RoundedRegionSize` and
|
|
||||||
// `TrailingBlockBase` is larger than a page, that implies the reported
|
|
||||||
// `RegionSize` may not be accurate.
|
|
||||||
DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);
|
|
||||||
|
|
||||||
// Only the last page touched by the last block needs to mark the trailing
|
|
||||||
// blocks. Note that if the last "pretend" block straddles the boundary,
|
|
||||||
// we still have to count it in so that the logic of counting the number
|
|
||||||
// of blocks on a page is consistent.
|
|
||||||
uptr NumTrailingBlocks =
|
|
||||||
(roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +
|
|
||||||
BlockSize - 1) /
|
|
||||||
BlockSize;
|
|
||||||
if (NumTrailingBlocks > 0) {
|
|
||||||
PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),
|
|
||||||
NumTrailingBlocks);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate over free chunks and count how many free chunks affect each
|
|
||||||
// allocated page.
|
|
||||||
if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
|
|
||||||
// Each chunk affects one page only.
|
|
||||||
for (const auto &It : FreeList) {
|
|
||||||
for (u16 I = 0; I < It.getCount(); I++) {
|
|
||||||
const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
|
|
||||||
DCHECK_LT(PInRegion, RegionSize);
|
|
||||||
PageMap.inc(RegionIndex, getPageIndex(PInRegion));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// In all other cases chunks might affect more than one page.
|
|
||||||
DCHECK_GE(RegionSize, BlockSize);
|
|
||||||
for (const auto &It : FreeList) {
|
|
||||||
for (u16 I = 0; I < It.getCount(); I++) {
|
|
||||||
const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
|
|
||||||
PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
|
|
||||||
getPageIndex(PInRegion + BlockSize - 1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
|
|
||||||
uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; }
|
|
||||||
|
|
||||||
uptr BlockSize;
|
|
||||||
uptr NumberOfRegions;
|
|
||||||
// For partial region marking, some pages in front are not needed to be
|
|
||||||
// counted.
|
|
||||||
uptr ReleasePageOffset;
|
|
||||||
uptr PageSize;
|
|
||||||
uptr PagesCount;
|
|
||||||
uptr PageSizeLog;
|
|
||||||
uptr FullPagesBlockCountMax;
|
|
||||||
bool SameBlockCountPerPage;
|
|
||||||
RegionPageMap PageMap;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try to release the page which doesn't have any in-used block, i.e., they are
|
|
||||||
// all free blocks. The `PageMap` will record the number of free blocks in each
|
|
||||||
// page.
|
|
||||||
template <class ReleaseRecorderT, typename SkipRegionT>
|
|
||||||
NOINLINE void
|
|
||||||
releaseFreeMemoryToOS(PageReleaseContext &Context,
|
|
||||||
ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
|
|
||||||
const uptr PageSize = Context.PageSize;
|
|
||||||
const uptr BlockSize = Context.BlockSize;
|
|
||||||
const uptr PagesCount = Context.PagesCount;
|
|
||||||
const uptr NumberOfRegions = Context.NumberOfRegions;
|
|
||||||
const uptr ReleasePageOffset = Context.ReleasePageOffset;
|
|
||||||
const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
|
|
||||||
const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
|
|
||||||
RegionPageMap &PageMap = Context.PageMap;
|
|
||||||
|
|
||||||
// Iterate over pages detecting ranges of pages with chunk Counters equal
|
|
||||||
// to the expected number of chunks for the particular page.
|
|
||||||
FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
|
|
||||||
if (SameBlockCountPerPage) {
|
|
||||||
// Fast path, every page has the same number of chunks affecting it.
|
|
||||||
for (uptr I = 0; I < NumberOfRegions; I++) {
|
|
||||||
if (SkipRegion(I)) {
|
|
||||||
RangeTracker.skipPages(PagesCount);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
for (uptr J = 0; J < PagesCount; J++) {
|
|
||||||
const bool CanRelease =
|
|
||||||
PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax);
|
|
||||||
RangeTracker.processNextPage(CanRelease);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Slow path, go through the pages keeping count how many chunks affect
|
|
||||||
// each page.
|
|
||||||
const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
|
|
||||||
const uptr Pnc = Pn * BlockSize;
|
|
||||||
// The idea is to increment the current page pointer by the first chunk
|
|
||||||
// size, middle portion size (the portion of the page covered by chunks
|
|
||||||
// except the first and the last one) and then the last chunk size, adding
|
|
||||||
// up the number of chunks on the current page and checking on every step
|
|
||||||
// whether the page boundary was crossed.
|
|
||||||
for (uptr I = 0; I < NumberOfRegions; I++) {
|
|
||||||
if (SkipRegion(I)) {
|
|
||||||
RangeTracker.skipPages(PagesCount);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
uptr PrevPageBoundary = 0;
|
|
||||||
uptr CurrentBoundary = 0;
|
|
||||||
if (ReleasePageOffset > 0) {
|
|
||||||
PrevPageBoundary = ReleasePageOffset * PageSize;
|
|
||||||
CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
|
|
||||||
}
|
|
||||||
for (uptr J = 0; J < PagesCount; J++) {
|
|
||||||
const uptr PageBoundary = PrevPageBoundary + PageSize;
|
|
||||||
uptr BlocksPerPage = Pn;
|
|
||||||
if (CurrentBoundary < PageBoundary) {
|
|
||||||
if (CurrentBoundary > PrevPageBoundary)
|
|
||||||
BlocksPerPage++;
|
|
||||||
CurrentBoundary += Pnc;
|
|
||||||
if (CurrentBoundary < PageBoundary) {
|
|
||||||
BlocksPerPage++;
|
|
||||||
CurrentBoundary += BlockSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrevPageBoundary = PageBoundary;
|
|
||||||
const bool CanRelease =
|
|
||||||
PageMap.updateAsAllCountedIf(I, J, BlocksPerPage);
|
|
||||||
RangeTracker.processNextPage(CanRelease);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RangeTracker.finish();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_RELEASE_H_
|
|
192
Telegram/ThirdParty/scudo/report.cpp
vendored
192
Telegram/ThirdParty/scudo/report.cpp
vendored
|
@ -1,192 +0,0 @@
|
||||||
//===-- report.cpp ----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "report.h"
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#include <stdarg.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class ScopedErrorReport {
|
|
||||||
public:
|
|
||||||
ScopedErrorReport() : Message() { Message.append("Scudo ERROR: "); }
|
|
||||||
void append(const char *Format, ...) {
|
|
||||||
va_list Args;
|
|
||||||
va_start(Args, Format);
|
|
||||||
Message.vappend(Format, Args);
|
|
||||||
va_end(Args);
|
|
||||||
}
|
|
||||||
NORETURN ~ScopedErrorReport() { reportRawError(Message.data()); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
ScopedString Message;
|
|
||||||
};
|
|
||||||
|
|
||||||
inline void NORETURN trap() { __builtin_trap(); }
|
|
||||||
|
|
||||||
// This could potentially be called recursively if a CHECK fails in the reports.
|
|
||||||
void NORETURN reportCheckFailed(const char *File, int Line,
|
|
||||||
const char *Condition, u64 Value1, u64 Value2) {
|
|
||||||
static atomic_u32 NumberOfCalls;
|
|
||||||
if (atomic_fetch_add(&NumberOfCalls, 1, memory_order_relaxed) > 2) {
|
|
||||||
// TODO(kostyak): maybe sleep here?
|
|
||||||
trap();
|
|
||||||
}
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("CHECK failed @ %s:%d %s ((u64)op1=%llu, (u64)op2=%llu)\n",
|
|
||||||
File, Line, Condition, Value1, Value2);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generic string fatal error message.
|
|
||||||
void NORETURN reportError(const char *Message) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("%s\n", Message);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generic fatal error message without ScopedString.
|
|
||||||
void NORETURN reportRawError(const char *Message) {
|
|
||||||
outputRaw(Message);
|
|
||||||
setAbortMessage(Message);
|
|
||||||
die();
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The checksum of a chunk header is invalid. This could be caused by an
|
|
||||||
// {over,under}write of the header, a pointer that is not an actual chunk.
|
|
||||||
void NORETURN reportHeaderCorruption(void *Ptr) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("corrupted chunk header at address %p\n", Ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The allocator was compiled with parameters that conflict with field size
|
|
||||||
// requirements.
|
|
||||||
void NORETURN reportSanityCheckError(const char *Field) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("maximum possible %s doesn't fit in header\n", Field);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We enforce a maximum alignment, to keep fields smaller and generally prevent
|
|
||||||
// integer overflows, or unexpected corner cases.
|
|
||||||
void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("invalid allocation alignment: %zu exceeds maximum supported "
|
|
||||||
"alignment of %zu\n",
|
|
||||||
Alignment, MaxAlignment);
|
|
||||||
}
|
|
||||||
|
|
||||||
// See above, we also enforce a maximum size.
|
|
||||||
void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
|
|
||||||
uptr MaxSize) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("requested allocation size %zu (%zu after adjustments) exceeds "
|
|
||||||
"maximum supported size of %zu\n",
|
|
||||||
UserSize, TotalSize, MaxSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportOutOfBatchClass() {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("BatchClass region is used up, can't hold any free block\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportOutOfMemory(uptr RequestedSize) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *stringifyAction(AllocatorAction Action) {
|
|
||||||
switch (Action) {
|
|
||||||
case AllocatorAction::Recycling:
|
|
||||||
return "recycling";
|
|
||||||
case AllocatorAction::Deallocating:
|
|
||||||
return "deallocating";
|
|
||||||
case AllocatorAction::Reallocating:
|
|
||||||
return "reallocating";
|
|
||||||
case AllocatorAction::Sizing:
|
|
||||||
return "sizing";
|
|
||||||
}
|
|
||||||
return "<invalid action>";
|
|
||||||
}
|
|
||||||
|
|
||||||
// The chunk is not in a state congruent with the operation we want to perform.
|
|
||||||
// This is usually the case with a double-free, a realloc of a freed pointer.
|
|
||||||
void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("invalid chunk state when %s address %p\n",
|
|
||||||
stringifyAction(Action), Ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("misaligned pointer when %s address %p\n",
|
|
||||||
stringifyAction(Action), Ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The deallocation function used is at odds with the one used to allocate the
|
|
||||||
// chunk (eg: new[]/delete or malloc/delete, and so on).
|
|
||||||
void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
|
|
||||||
u8 TypeA, u8 TypeB) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("allocation type mismatch when %s address %p (%d vs %d)\n",
|
|
||||||
stringifyAction(Action), Ptr, TypeA, TypeB);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The size specified to the delete operator does not match the one that was
|
|
||||||
// passed to new when allocating the chunk.
|
|
||||||
void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size,
|
|
||||||
uptr ExpectedSize) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append(
|
|
||||||
"invalid sized delete when deallocating address %p (%zu vs %zu)\n", Ptr,
|
|
||||||
Size, ExpectedSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append(
|
|
||||||
"invalid allocation alignment: %zu, alignment must be a power of two\n",
|
|
||||||
Alignment);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("calloc parameters overflow: count * size (%zu * %zu) cannot "
|
|
||||||
"be represented with type size_t\n",
|
|
||||||
Count, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append(
|
|
||||||
"invalid alignment requested in posix_memalign: %zu, alignment must be a "
|
|
||||||
"power of two and a multiple of sizeof(void *) == %zu\n",
|
|
||||||
Alignment, sizeof(void *));
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportPvallocOverflow(uptr Size) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("pvalloc parameters overflow: size %zu rounded up to system "
|
|
||||||
"page size %zu cannot be represented in type size_t\n",
|
|
||||||
Size, getPageSizeCached());
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportInvalidAlignedAllocAlignment(uptr Alignment, uptr Size) {
|
|
||||||
ScopedErrorReport Report;
|
|
||||||
Report.append("invalid alignment requested in aligned_alloc: %zu, alignment "
|
|
||||||
"must be a power of two and the requested size %zu must be a "
|
|
||||||
"multiple of alignment\n",
|
|
||||||
Alignment, Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
60
Telegram/ThirdParty/scudo/report.h
vendored
60
Telegram/ThirdParty/scudo/report.h
vendored
|
@ -1,60 +0,0 @@
|
||||||
//===-- report.h ------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_REPORT_H_
|
|
||||||
#define SCUDO_REPORT_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// Reports are *fatal* unless stated otherwise.
|
|
||||||
|
|
||||||
// Generic error, adds newline to end of message.
|
|
||||||
void NORETURN reportError(const char *Message);
|
|
||||||
|
|
||||||
// Generic error, but the message is not modified.
|
|
||||||
void NORETURN reportRawError(const char *Message);
|
|
||||||
|
|
||||||
// Flags related errors.
|
|
||||||
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
|
|
||||||
|
|
||||||
// Chunk header related errors.
|
|
||||||
void NORETURN reportHeaderCorruption(void *Ptr);
|
|
||||||
|
|
||||||
// Sanity checks related error.
|
|
||||||
void NORETURN reportSanityCheckError(const char *Field);
|
|
||||||
|
|
||||||
// Combined allocator errors.
|
|
||||||
void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
|
|
||||||
void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
|
|
||||||
uptr MaxSize);
|
|
||||||
void NORETURN reportOutOfBatchClass();
|
|
||||||
void NORETURN reportOutOfMemory(uptr RequestedSize);
|
|
||||||
enum class AllocatorAction : u8 {
|
|
||||||
Recycling,
|
|
||||||
Deallocating,
|
|
||||||
Reallocating,
|
|
||||||
Sizing,
|
|
||||||
};
|
|
||||||
void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr);
|
|
||||||
void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr);
|
|
||||||
void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
|
|
||||||
u8 TypeA, u8 TypeB);
|
|
||||||
void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size, uptr ExpectedSize);
|
|
||||||
|
|
||||||
// C wrappers errors.
|
|
||||||
void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment);
|
|
||||||
void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
|
|
||||||
void NORETURN reportCallocOverflow(uptr Count, uptr Size);
|
|
||||||
void NORETURN reportPvallocOverflow(uptr Size);
|
|
||||||
void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_REPORT_H_
|
|
58
Telegram/ThirdParty/scudo/report_linux.cpp
vendored
58
Telegram/ThirdParty/scudo/report_linux.cpp
vendored
|
@ -1,58 +0,0 @@
|
||||||
//===-- report_linux.cpp ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX || SCUDO_TRUSTY
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "internal_defs.h"
|
|
||||||
#include "report.h"
|
|
||||||
#include "report_linux.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// Fatal internal map() error (potentially OOM related).
|
|
||||||
void NORETURN reportMapError(uptr SizeIfOOM) {
|
|
||||||
char Error[128] = "Scudo ERROR: internal map failure\n";
|
|
||||||
if (SizeIfOOM) {
|
|
||||||
formatString(
|
|
||||||
Error, sizeof(Error),
|
|
||||||
"Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
|
|
||||||
SizeIfOOM >> 10);
|
|
||||||
}
|
|
||||||
reportRawError(Error);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportUnmapError(uptr Addr, uptr Size) {
|
|
||||||
char Error[128];
|
|
||||||
formatString(Error, sizeof(Error),
|
|
||||||
"Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
|
|
||||||
"Size %zu\n",
|
|
||||||
strerror(errno), Addr, Size);
|
|
||||||
reportRawError(Error);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot) {
|
|
||||||
char Error[128];
|
|
||||||
formatString(
|
|
||||||
Error, sizeof(Error),
|
|
||||||
"Scudo ERROR: internal protect failure (error desc=%s) Addr 0x%zx "
|
|
||||||
"Size %zu Prot %x\n",
|
|
||||||
strerror(errno), Addr, Size, Prot);
|
|
||||||
reportRawError(Error);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX || SCUDO_TRUSTY
|
|
34
Telegram/ThirdParty/scudo/report_linux.h
vendored
34
Telegram/ThirdParty/scudo/report_linux.h
vendored
|
@ -1,34 +0,0 @@
|
||||||
//===-- report_linux.h ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_REPORT_LINUX_H_
|
|
||||||
#define SCUDO_REPORT_LINUX_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX || SCUDO_TRUSTY
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// Report a fatal error when a map call fails. SizeIfOOM shall
|
|
||||||
// hold the requested size on an out-of-memory error, 0 otherwise.
|
|
||||||
void NORETURN reportMapError(uptr SizeIfOOM = 0);
|
|
||||||
|
|
||||||
// Report a fatal error when an unmap call fails.
|
|
||||||
void NORETURN reportUnmapError(uptr Addr, uptr Size);
|
|
||||||
|
|
||||||
// Report a fatal error when a mprotect call fails.
|
|
||||||
void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot);
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_LINUX || SCUDO_TRUSTY
|
|
||||||
|
|
||||||
#endif // SCUDO_REPORT_LINUX_H_
|
|
708
Telegram/ThirdParty/scudo/secondary.h
vendored
708
Telegram/ThirdParty/scudo/secondary.h
vendored
|
@ -1,708 +0,0 @@
|
||||||
//===-- secondary.h ---------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_SECONDARY_H_
|
|
||||||
#define SCUDO_SECONDARY_H_
|
|
||||||
|
|
||||||
#include "chunk.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "list.h"
|
|
||||||
#include "mem_map.h"
|
|
||||||
#include "memtag.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "options.h"
|
|
||||||
#include "stats.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// This allocator wraps the platform allocation primitives, and as such is on
|
|
||||||
// the slower side and should preferably be used for larger sized allocations.
|
|
||||||
// Blocks allocated will be preceded and followed by a guard page, and hold
|
|
||||||
// their own header that is not checksummed: the guard pages and the Combined
|
|
||||||
// header should be enough for our purpose.
|
|
||||||
|
|
||||||
namespace LargeBlock {
|
|
||||||
|
|
||||||
struct alignas(Max<uptr>(archSupportsMemoryTagging()
|
|
||||||
? archMemoryTagGranuleSize()
|
|
||||||
: 1,
|
|
||||||
1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
|
|
||||||
LargeBlock::Header *Prev;
|
|
||||||
LargeBlock::Header *Next;
|
|
||||||
uptr CommitBase;
|
|
||||||
uptr CommitSize;
|
|
||||||
MemMapT MemMap;
|
|
||||||
};
|
|
||||||
|
|
||||||
static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
|
|
||||||
static_assert(!archSupportsMemoryTagging() ||
|
|
||||||
sizeof(Header) % archMemoryTagGranuleSize() == 0,
|
|
||||||
"");
|
|
||||||
|
|
||||||
constexpr uptr getHeaderSize() { return sizeof(Header); }
|
|
||||||
|
|
||||||
template <typename Config> static uptr addHeaderTag(uptr Ptr) {
|
|
||||||
if (allocatorSupportsMemoryTagging<Config>())
|
|
||||||
return addFixedTag(Ptr, 1);
|
|
||||||
return Ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config> static Header *getHeader(uptr Ptr) {
|
|
||||||
return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config> static Header *getHeader(const void *Ptr) {
|
|
||||||
return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace LargeBlock
|
|
||||||
|
|
||||||
static inline void unmap(LargeBlock::Header *H) {
|
|
||||||
// Note that the `H->MapMap` is stored on the pages managed by itself. Take
|
|
||||||
// over the ownership before unmap() so that any operation along with unmap()
|
|
||||||
// won't touch inaccessible pages.
|
|
||||||
MemMapT MemMap = H->MemMap;
|
|
||||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
struct CachedBlock {
|
|
||||||
uptr CommitBase = 0;
|
|
||||||
uptr CommitSize = 0;
|
|
||||||
uptr BlockBegin = 0;
|
|
||||||
MemMapT MemMap = {};
|
|
||||||
u64 Time = 0;
|
|
||||||
|
|
||||||
bool isValid() { return CommitBase != 0; }
|
|
||||||
|
|
||||||
void invalidate() { CommitBase = 0; }
|
|
||||||
};
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
template <typename Config> class MapAllocatorNoCache {
|
|
||||||
public:
|
|
||||||
void init(UNUSED s32 ReleaseToOsInterval) {}
|
|
||||||
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
|
|
||||||
UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
|
|
||||||
UNUSED bool *Zeroed) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
|
|
||||||
bool canCache(UNUSED uptr Size) { return false; }
|
|
||||||
void disable() {}
|
|
||||||
void enable() {}
|
|
||||||
void releaseToOS() {}
|
|
||||||
void disableMemoryTagging() {}
|
|
||||||
void unmapTestOnly() {}
|
|
||||||
bool setOption(Option O, UNUSED sptr Value) {
|
|
||||||
if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
|
|
||||||
O == Option::MaxCacheEntrySize)
|
|
||||||
return false;
|
|
||||||
// Not supported by the Secondary Cache, but not an error either.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void getStats(UNUSED ScopedString *Str) {
|
|
||||||
Str->append("Secondary Cache Disabled\n");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const uptr MaxUnusedCachePages = 4U;
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
|
|
||||||
uptr AllocPos, uptr Flags, MemMapT &MemMap) {
|
|
||||||
Flags |= MAP_RESIZABLE;
|
|
||||||
Flags |= MAP_ALLOWNOMEM;
|
|
||||||
|
|
||||||
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
|
|
||||||
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
|
|
||||||
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
|
|
||||||
return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
|
|
||||||
MAP_MEMTAG | Flags) &&
|
|
||||||
MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
|
|
||||||
"scudo:secondary", Flags);
|
|
||||||
} else {
|
|
||||||
const uptr RemapFlags =
|
|
||||||
(useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
|
|
||||||
return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Template specialization to avoid producing zero-length array
|
|
||||||
template <typename T, size_t Size> class NonZeroLengthArray {
|
|
||||||
public:
|
|
||||||
T &operator[](uptr Idx) { return values[Idx]; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
T values[Size];
|
|
||||||
};
|
|
||||||
template <typename T> class NonZeroLengthArray<T, 0> {
|
|
||||||
public:
|
|
||||||
T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> class MapAllocatorCache {
|
|
||||||
public:
|
|
||||||
using CacheConfig = typename Config::Secondary::Cache;
|
|
||||||
|
|
||||||
void getStats(ScopedString *Str) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
uptr Integral;
|
|
||||||
uptr Fractional;
|
|
||||||
computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
|
|
||||||
&Fractional);
|
|
||||||
Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
|
|
||||||
"MaxEntriesCount: %u, MaxEntrySize: %zu\n",
|
|
||||||
EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
|
|
||||||
atomic_load_relaxed(&MaxEntrySize));
|
|
||||||
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
|
|
||||||
"(%zu.%02zu%%)\n",
|
|
||||||
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
|
|
||||||
for (CachedBlock Entry : Entries) {
|
|
||||||
if (!Entry.isValid())
|
|
||||||
continue;
|
|
||||||
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
|
|
||||||
"BlockSize: %zu %s\n",
|
|
||||||
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
|
|
||||||
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the default maximum specified fits the array.
|
|
||||||
static_assert(CacheConfig::DefaultMaxEntriesCount <=
|
|
||||||
CacheConfig::EntriesArraySize,
|
|
||||||
"");
|
|
||||||
|
|
||||||
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
DCHECK_EQ(EntriesCount, 0U);
|
|
||||||
setOption(Option::MaxCacheEntriesCount,
|
|
||||||
static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
|
|
||||||
setOption(Option::MaxCacheEntrySize,
|
|
||||||
static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
|
|
||||||
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
|
|
||||||
}
|
|
||||||
|
|
||||||
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
|
|
||||||
if (!canCache(H->CommitSize))
|
|
||||||
return unmap(H);
|
|
||||||
|
|
||||||
bool EntryCached = false;
|
|
||||||
bool EmptyCache = false;
|
|
||||||
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
|
|
||||||
const u64 Time = getMonotonicTimeFast();
|
|
||||||
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
|
|
||||||
CachedBlock Entry;
|
|
||||||
Entry.CommitBase = H->CommitBase;
|
|
||||||
Entry.CommitSize = H->CommitSize;
|
|
||||||
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
|
|
||||||
Entry.MemMap = H->MemMap;
|
|
||||||
Entry.Time = Time;
|
|
||||||
if (useMemoryTagging<Config>(Options)) {
|
|
||||||
if (Interval == 0 && !SCUDO_FUCHSIA) {
|
|
||||||
// Release the memory and make it inaccessible at the same time by
|
|
||||||
// creating a new MAP_NOACCESS mapping on top of the existing mapping.
|
|
||||||
// Fuchsia does not support replacing mappings by creating a new mapping
|
|
||||||
// on top so we just do the two syscalls there.
|
|
||||||
Entry.Time = 0;
|
|
||||||
mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
|
|
||||||
Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
|
|
||||||
} else {
|
|
||||||
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
|
|
||||||
MAP_NOACCESS);
|
|
||||||
}
|
|
||||||
} else if (Interval == 0) {
|
|
||||||
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
|
|
||||||
Entry.Time = 0;
|
|
||||||
}
|
|
||||||
do {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
|
|
||||||
// If we get here then memory tagging was disabled in between when we
|
|
||||||
// read Options and when we locked Mutex. We can't insert our entry into
|
|
||||||
// the quarantine or the cache because the permissions would be wrong so
|
|
||||||
// just unmap it.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
|
|
||||||
QuarantinePos =
|
|
||||||
(QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
|
|
||||||
if (!Quarantine[QuarantinePos].isValid()) {
|
|
||||||
Quarantine[QuarantinePos] = Entry;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
CachedBlock PrevEntry = Quarantine[QuarantinePos];
|
|
||||||
Quarantine[QuarantinePos] = Entry;
|
|
||||||
if (OldestTime == 0)
|
|
||||||
OldestTime = Entry.Time;
|
|
||||||
Entry = PrevEntry;
|
|
||||||
}
|
|
||||||
if (EntriesCount >= MaxCount) {
|
|
||||||
if (IsFullEvents++ == 4U)
|
|
||||||
EmptyCache = true;
|
|
||||||
} else {
|
|
||||||
for (u32 I = 0; I < MaxCount; I++) {
|
|
||||||
if (Entries[I].isValid())
|
|
||||||
continue;
|
|
||||||
if (I != 0)
|
|
||||||
Entries[I] = Entries[0];
|
|
||||||
Entries[0] = Entry;
|
|
||||||
EntriesCount++;
|
|
||||||
if (OldestTime == 0)
|
|
||||||
OldestTime = Entry.Time;
|
|
||||||
EntryCached = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} while (0);
|
|
||||||
if (EmptyCache)
|
|
||||||
empty();
|
|
||||||
else if (Interval >= 0)
|
|
||||||
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
|
|
||||||
if (!EntryCached)
|
|
||||||
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
|
|
||||||
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
|
|
||||||
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
|
|
||||||
const uptr PageSize = getPageSizeCached();
|
|
||||||
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
|
|
||||||
// 10% of the requested size proved to be the optimal choice for
|
|
||||||
// retrieving cached blocks after testing several options.
|
|
||||||
constexpr u32 FragmentedBytesDivisor = 10;
|
|
||||||
bool Found = false;
|
|
||||||
CachedBlock Entry;
|
|
||||||
uptr EntryHeaderPos = 0;
|
|
||||||
{
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
CallsToRetrieve++;
|
|
||||||
if (EntriesCount == 0)
|
|
||||||
return false;
|
|
||||||
u32 OptimalFitIndex = 0;
|
|
||||||
uptr MinDiff = UINTPTR_MAX;
|
|
||||||
for (u32 I = 0; I < MaxCount; I++) {
|
|
||||||
if (!Entries[I].isValid())
|
|
||||||
continue;
|
|
||||||
const uptr CommitBase = Entries[I].CommitBase;
|
|
||||||
const uptr CommitSize = Entries[I].CommitSize;
|
|
||||||
const uptr AllocPos =
|
|
||||||
roundDown(CommitBase + CommitSize - Size, Alignment);
|
|
||||||
const uptr HeaderPos = AllocPos - HeadersSize;
|
|
||||||
if (HeaderPos > CommitBase + CommitSize)
|
|
||||||
continue;
|
|
||||||
if (HeaderPos < CommitBase ||
|
|
||||||
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Found = true;
|
|
||||||
const uptr Diff = HeaderPos - CommitBase;
|
|
||||||
// immediately use a cached block if it's size is close enough to the
|
|
||||||
// requested size.
|
|
||||||
const uptr MaxAllowedFragmentedBytes =
|
|
||||||
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
|
|
||||||
if (Diff <= MaxAllowedFragmentedBytes) {
|
|
||||||
OptimalFitIndex = I;
|
|
||||||
EntryHeaderPos = HeaderPos;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// keep track of the smallest cached block
|
|
||||||
// that is greater than (AllocSize + HeaderSize)
|
|
||||||
if (Diff > MinDiff)
|
|
||||||
continue;
|
|
||||||
OptimalFitIndex = I;
|
|
||||||
MinDiff = Diff;
|
|
||||||
EntryHeaderPos = HeaderPos;
|
|
||||||
}
|
|
||||||
if (Found) {
|
|
||||||
Entry = Entries[OptimalFitIndex];
|
|
||||||
Entries[OptimalFitIndex].invalidate();
|
|
||||||
EntriesCount--;
|
|
||||||
SuccessfulRetrieves++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!Found)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
*H = reinterpret_cast<LargeBlock::Header *>(
|
|
||||||
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
|
|
||||||
*Zeroed = Entry.Time == 0;
|
|
||||||
if (useMemoryTagging<Config>(Options))
|
|
||||||
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
|
|
||||||
uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
|
|
||||||
if (useMemoryTagging<Config>(Options)) {
|
|
||||||
if (*Zeroed) {
|
|
||||||
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
|
|
||||||
NewBlockBegin);
|
|
||||||
} else if (Entry.BlockBegin < NewBlockBegin) {
|
|
||||||
storeTags(Entry.BlockBegin, NewBlockBegin);
|
|
||||||
} else {
|
|
||||||
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(*H)->CommitBase = Entry.CommitBase;
|
|
||||||
(*H)->CommitSize = Entry.CommitSize;
|
|
||||||
(*H)->MemMap = Entry.MemMap;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool canCache(uptr Size) {
|
|
||||||
return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
|
|
||||||
Size <= atomic_load_relaxed(&MaxEntrySize);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool setOption(Option O, sptr Value) {
|
|
||||||
if (O == Option::ReleaseInterval) {
|
|
||||||
const s32 Interval = Max(
|
|
||||||
Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
|
|
||||||
CacheConfig::MinReleaseToOsIntervalMs);
|
|
||||||
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (O == Option::MaxCacheEntriesCount) {
|
|
||||||
const u32 MaxCount = static_cast<u32>(Value);
|
|
||||||
if (MaxCount > CacheConfig::EntriesArraySize)
|
|
||||||
return false;
|
|
||||||
atomic_store_relaxed(&MaxEntriesCount, MaxCount);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (O == Option::MaxCacheEntrySize) {
|
|
||||||
atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
// Not supported by the Secondary Cache, but not an error either.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
|
|
||||||
|
|
||||||
void disableMemoryTagging() EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
|
|
||||||
if (Quarantine[I].isValid()) {
|
|
||||||
MemMapT &MemMap = Quarantine[I].MemMap;
|
|
||||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
|
||||||
Quarantine[I].invalidate();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
|
|
||||||
for (u32 I = 0; I < MaxCount; I++) {
|
|
||||||
if (Entries[I].isValid()) {
|
|
||||||
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
|
|
||||||
Entries[I].CommitSize, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
QuarantinePos = -1U;
|
|
||||||
}
|
|
||||||
|
|
||||||
void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
|
|
||||||
|
|
||||||
void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
|
|
||||||
|
|
||||||
void unmapTestOnly() { empty(); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
void empty() {
|
|
||||||
MemMapT MapInfo[CacheConfig::EntriesArraySize];
|
|
||||||
uptr N = 0;
|
|
||||||
{
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
|
|
||||||
if (!Entries[I].isValid())
|
|
||||||
continue;
|
|
||||||
MapInfo[N] = Entries[I].MemMap;
|
|
||||||
Entries[I].invalidate();
|
|
||||||
N++;
|
|
||||||
}
|
|
||||||
EntriesCount = 0;
|
|
||||||
IsFullEvents = 0;
|
|
||||||
}
|
|
||||||
for (uptr I = 0; I < N; I++) {
|
|
||||||
MemMapT &MemMap = MapInfo[I];
|
|
||||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
|
|
||||||
if (!Entry.isValid() || !Entry.Time)
|
|
||||||
return;
|
|
||||||
if (Entry.Time > Time) {
|
|
||||||
if (OldestTime == 0 || Entry.Time < OldestTime)
|
|
||||||
OldestTime = Entry.Time;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
|
|
||||||
Entry.Time = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
|
|
||||||
return;
|
|
||||||
OldestTime = 0;
|
|
||||||
for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
|
|
||||||
releaseIfOlderThan(Quarantine[I], Time);
|
|
||||||
for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
|
|
||||||
releaseIfOlderThan(Entries[I], Time);
|
|
||||||
}
|
|
||||||
|
|
||||||
HybridMutex Mutex;
|
|
||||||
u32 EntriesCount GUARDED_BY(Mutex) = 0;
|
|
||||||
u32 QuarantinePos GUARDED_BY(Mutex) = 0;
|
|
||||||
atomic_u32 MaxEntriesCount = {};
|
|
||||||
atomic_uptr MaxEntrySize = {};
|
|
||||||
u64 OldestTime GUARDED_BY(Mutex) = 0;
|
|
||||||
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
|
|
||||||
atomic_s32 ReleaseToOsIntervalMs = {};
|
|
||||||
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
|
|
||||||
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
|
|
||||||
|
|
||||||
CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
|
|
||||||
NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
|
|
||||||
Quarantine GUARDED_BY(Mutex) = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> class MapAllocator {
|
|
||||||
public:
|
|
||||||
void init(GlobalStats *S,
|
|
||||||
s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
DCHECK_EQ(AllocatedBytes, 0U);
|
|
||||||
DCHECK_EQ(FreedBytes, 0U);
|
|
||||||
Cache.init(ReleaseToOsInterval);
|
|
||||||
Stats.init();
|
|
||||||
if (LIKELY(S))
|
|
||||||
S->link(&Stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
|
|
||||||
uptr *BlockEnd = nullptr,
|
|
||||||
FillContentsMode FillContents = NoFill);
|
|
||||||
|
|
||||||
void deallocate(const Options &Options, void *Ptr);
|
|
||||||
|
|
||||||
static uptr getBlockEnd(void *Ptr) {
|
|
||||||
auto *B = LargeBlock::getHeader<Config>(Ptr);
|
|
||||||
return B->CommitBase + B->CommitSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uptr getBlockSize(void *Ptr) {
|
|
||||||
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static constexpr uptr getHeadersSize() {
|
|
||||||
return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
void disable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
Mutex.lock();
|
|
||||||
Cache.disable();
|
|
||||||
}
|
|
||||||
|
|
||||||
void enable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
Cache.enable();
|
|
||||||
Mutex.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename F> void iterateOverBlocks(F Callback) const {
|
|
||||||
Mutex.assertHeld();
|
|
||||||
|
|
||||||
for (const auto &H : InUseBlocks) {
|
|
||||||
uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
|
|
||||||
if (allocatorSupportsMemoryTagging<Config>())
|
|
||||||
Ptr = untagPointer(Ptr);
|
|
||||||
Callback(Ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool canCache(uptr Size) { return Cache.canCache(Size); }
|
|
||||||
|
|
||||||
bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
|
|
||||||
|
|
||||||
void releaseToOS() { Cache.releaseToOS(); }
|
|
||||||
|
|
||||||
void disableMemoryTagging() { Cache.disableMemoryTagging(); }
|
|
||||||
|
|
||||||
void unmapTestOnly() { Cache.unmapTestOnly(); }
|
|
||||||
|
|
||||||
void getStats(ScopedString *Str);
|
|
||||||
|
|
||||||
private:
|
|
||||||
typename Config::Secondary::template CacheT<Config> Cache;
|
|
||||||
|
|
||||||
mutable HybridMutex Mutex;
|
|
||||||
DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
|
|
||||||
uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
|
|
||||||
uptr FreedBytes GUARDED_BY(Mutex) = 0;
|
|
||||||
uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
|
|
||||||
uptr LargestSize GUARDED_BY(Mutex) = 0;
|
|
||||||
u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
|
|
||||||
u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
|
|
||||||
LocalStats Stats GUARDED_BY(Mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
// As with the Primary, the size passed to this function includes any desired
|
|
||||||
// alignment, so that the frontend can align the user allocation. The hint
|
|
||||||
// parameter allows us to unmap spurious memory when dealing with larger
|
|
||||||
// (greater than a page) alignments on 32-bit platforms.
|
|
||||||
// Due to the sparsity of address space available on those platforms, requesting
|
|
||||||
// an allocation from the Secondary with a large alignment would end up wasting
|
|
||||||
// VA space (even though we are not committing the whole thing), hence the need
|
|
||||||
// to trim off some of the reserved space.
|
|
||||||
// For allocations requested with an alignment greater than or equal to a page,
|
|
||||||
// the committed memory will amount to something close to Size - AlignmentHint
|
|
||||||
// (pending rounding and headers).
|
|
||||||
template <typename Config>
|
|
||||||
void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
|
|
||||||
uptr Alignment, uptr *BlockEndPtr,
|
|
||||||
FillContentsMode FillContents) {
|
|
||||||
if (Options.get(OptionBit::AddLargeAllocationSlack))
|
|
||||||
Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
|
|
||||||
const uptr PageSize = getPageSizeCached();
|
|
||||||
|
|
||||||
// Note that cached blocks may have aligned address already. Thus we simply
|
|
||||||
// pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
|
|
||||||
const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
|
|
||||||
|
|
||||||
if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
|
|
||||||
LargeBlock::Header *H;
|
|
||||||
bool Zeroed;
|
|
||||||
if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
|
|
||||||
&Zeroed)) {
|
|
||||||
const uptr BlockEnd = H->CommitBase + H->CommitSize;
|
|
||||||
if (BlockEndPtr)
|
|
||||||
*BlockEndPtr = BlockEnd;
|
|
||||||
uptr HInt = reinterpret_cast<uptr>(H);
|
|
||||||
if (allocatorSupportsMemoryTagging<Config>())
|
|
||||||
HInt = untagPointer(HInt);
|
|
||||||
const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
|
|
||||||
void *Ptr = reinterpret_cast<void *>(PtrInt);
|
|
||||||
if (FillContents && !Zeroed)
|
|
||||||
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
|
|
||||||
BlockEnd - PtrInt);
|
|
||||||
{
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
InUseBlocks.push_back(H);
|
|
||||||
AllocatedBytes += H->CommitSize;
|
|
||||||
FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
|
|
||||||
NumberOfAllocs++;
|
|
||||||
Stats.add(StatAllocated, H->CommitSize);
|
|
||||||
Stats.add(StatMapped, H->MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
return Ptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr RoundedSize =
|
|
||||||
roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
|
|
||||||
if (Alignment > PageSize)
|
|
||||||
RoundedSize += Alignment - PageSize;
|
|
||||||
|
|
||||||
ReservedMemoryT ReservedMemory;
|
|
||||||
const uptr MapSize = RoundedSize + 2 * PageSize;
|
|
||||||
if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
|
|
||||||
MAP_ALLOWNOMEM))) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take the entire ownership of reserved region.
|
|
||||||
MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
|
|
||||||
ReservedMemory.getCapacity());
|
|
||||||
uptr MapBase = MemMap.getBase();
|
|
||||||
uptr CommitBase = MapBase + PageSize;
|
|
||||||
uptr MapEnd = MapBase + MapSize;
|
|
||||||
|
|
||||||
// In the unlikely event of alignments larger than a page, adjust the amount
|
|
||||||
// of memory we want to commit, and trim the extra memory.
|
|
||||||
if (UNLIKELY(Alignment >= PageSize)) {
|
|
||||||
// For alignments greater than or equal to a page, the user pointer (eg: the
|
|
||||||
// pointer that is returned by the C or C++ allocation APIs) ends up on a
|
|
||||||
// page boundary , and our headers will live in the preceding page.
|
|
||||||
CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
|
|
||||||
const uptr NewMapBase = CommitBase - PageSize;
|
|
||||||
DCHECK_GE(NewMapBase, MapBase);
|
|
||||||
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
|
|
||||||
// are less constrained memory wise, and that saves us two syscalls.
|
|
||||||
if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
|
|
||||||
MemMap.unmap(MapBase, NewMapBase - MapBase);
|
|
||||||
MapBase = NewMapBase;
|
|
||||||
}
|
|
||||||
const uptr NewMapEnd =
|
|
||||||
CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
|
|
||||||
DCHECK_LE(NewMapEnd, MapEnd);
|
|
||||||
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
|
|
||||||
MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
|
|
||||||
MapEnd = NewMapEnd;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const uptr CommitSize = MapEnd - PageSize - CommitBase;
|
|
||||||
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
|
|
||||||
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
|
|
||||||
MemMap)) {
|
|
||||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
const uptr HeaderPos = AllocPos - getHeadersSize();
|
|
||||||
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
|
|
||||||
LargeBlock::addHeaderTag<Config>(HeaderPos));
|
|
||||||
if (useMemoryTagging<Config>(Options))
|
|
||||||
storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
|
|
||||||
reinterpret_cast<uptr>(H + 1));
|
|
||||||
H->CommitBase = CommitBase;
|
|
||||||
H->CommitSize = CommitSize;
|
|
||||||
H->MemMap = MemMap;
|
|
||||||
if (BlockEndPtr)
|
|
||||||
*BlockEndPtr = CommitBase + CommitSize;
|
|
||||||
{
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
InUseBlocks.push_back(H);
|
|
||||||
AllocatedBytes += CommitSize;
|
|
||||||
FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
|
|
||||||
if (LargestSize < CommitSize)
|
|
||||||
LargestSize = CommitSize;
|
|
||||||
NumberOfAllocs++;
|
|
||||||
Stats.add(StatAllocated, CommitSize);
|
|
||||||
Stats.add(StatMapped, H->MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
|
|
||||||
EXCLUDES(Mutex) {
|
|
||||||
LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
|
|
||||||
const uptr CommitSize = H->CommitSize;
|
|
||||||
{
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
InUseBlocks.remove(H);
|
|
||||||
FreedBytes += CommitSize;
|
|
||||||
FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
|
|
||||||
NumberOfFrees++;
|
|
||||||
Stats.sub(StatAllocated, CommitSize);
|
|
||||||
Stats.sub(StatMapped, H->MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
Cache.store(Options, H);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
|
|
||||||
"(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
|
|
||||||
NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
|
|
||||||
FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
|
|
||||||
(AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
|
|
||||||
FragmentedBytes >> 10);
|
|
||||||
Cache.getStats(Str);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_SECONDARY_H_
|
|
353
Telegram/ThirdParty/scudo/size_class_map.h
vendored
353
Telegram/ThirdParty/scudo/size_class_map.h
vendored
|
@ -1,353 +0,0 @@
|
||||||
//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_SIZE_CLASS_MAP_H_
|
|
||||||
#define SCUDO_SIZE_CLASS_MAP_H_
|
|
||||||
|
|
||||||
#include "chunk.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
|
|
||||||
const uptr L = getMostSignificantSetBitIndex(Size);
|
|
||||||
const uptr LBits = (Size >> (L - LogBits)) - (1 << LogBits);
|
|
||||||
const uptr HBits = (L - ZeroLog) << LogBits;
|
|
||||||
return LBits + HBits;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config> struct SizeClassMapBase {
|
|
||||||
static u16 getMaxCachedHint(uptr Size) {
|
|
||||||
DCHECK_NE(Size, 0);
|
|
||||||
u32 N;
|
|
||||||
// Force a 32-bit division if the template parameters allow for it.
|
|
||||||
if (Config::MaxBytesCachedLog > 31 || Config::MaxSizeLog > 31)
|
|
||||||
N = static_cast<u32>((1UL << Config::MaxBytesCachedLog) / Size);
|
|
||||||
else
|
|
||||||
N = (1U << Config::MaxBytesCachedLog) / static_cast<u32>(Size);
|
|
||||||
|
|
||||||
// Note that Config::MaxNumCachedHint is u16 so the result is guaranteed to
|
|
||||||
// fit in u16.
|
|
||||||
return static_cast<u16>(Max(1U, Min<u32>(Config::MaxNumCachedHint, N)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// SizeClassMap maps allocation sizes into size classes and back, in an
|
|
||||||
// efficient table-free manner.
|
|
||||||
//
|
|
||||||
// Class 0 is a special class that doesn't abide by the same rules as other
|
|
||||||
// classes. The allocator uses it to hold batches.
|
|
||||||
//
|
|
||||||
// The other sizes are controlled by the template parameters:
|
|
||||||
// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
|
|
||||||
// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
|
|
||||||
// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
|
|
||||||
// 2^MidSizeLog bytes.
|
|
||||||
// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
|
|
||||||
// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
|
|
||||||
// 0b1xx0..0 (where x is either 0 or 1).
|
|
||||||
//
|
|
||||||
// This class also gives a hint to a thread-caching allocator about the amount
|
|
||||||
// of chunks that can be cached per-thread:
|
|
||||||
// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
|
|
||||||
// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
|
|
||||||
template <typename Config>
|
|
||||||
class FixedSizeClassMap : public SizeClassMapBase<Config> {
|
|
||||||
typedef SizeClassMapBase<Config> Base;
|
|
||||||
|
|
||||||
static const uptr MinSize = 1UL << Config::MinSizeLog;
|
|
||||||
static const uptr MidSize = 1UL << Config::MidSizeLog;
|
|
||||||
static const uptr MidClass = MidSize / MinSize;
|
|
||||||
static const u8 S = Config::NumBits - 1;
|
|
||||||
static const uptr M = (1UL << S) - 1;
|
|
||||||
|
|
||||||
public:
|
|
||||||
static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
|
|
||||||
|
|
||||||
static const uptr MaxSize = (1UL << Config::MaxSizeLog) + Config::SizeDelta;
|
|
||||||
static const uptr NumClasses =
|
|
||||||
MidClass + ((Config::MaxSizeLog - Config::MidSizeLog) << S) + 1;
|
|
||||||
static_assert(NumClasses <= 256, "");
|
|
||||||
static const uptr LargestClassId = NumClasses - 1;
|
|
||||||
static const uptr BatchClassId = 0;
|
|
||||||
|
|
||||||
static uptr getSizeByClassId(uptr ClassId) {
|
|
||||||
DCHECK_NE(ClassId, BatchClassId);
|
|
||||||
if (ClassId <= MidClass)
|
|
||||||
return (ClassId << Config::MinSizeLog) + Config::SizeDelta;
|
|
||||||
ClassId -= MidClass;
|
|
||||||
const uptr T = MidSize << (ClassId >> S);
|
|
||||||
return T + (T >> S) * (ClassId & M) + Config::SizeDelta;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u8 getSizeLSBByClassId(uptr ClassId) {
|
|
||||||
return u8(getLeastSignificantSetBitIndex(getSizeByClassId(ClassId)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static constexpr bool usesCompressedLSBFormat() { return false; }
|
|
||||||
|
|
||||||
static uptr getClassIdBySize(uptr Size) {
|
|
||||||
if (Size <= Config::SizeDelta + (1 << Config::MinSizeLog))
|
|
||||||
return 1;
|
|
||||||
Size -= Config::SizeDelta;
|
|
||||||
DCHECK_LE(Size, MaxSize);
|
|
||||||
if (Size <= MidSize)
|
|
||||||
return (Size + MinSize - 1) >> Config::MinSizeLog;
|
|
||||||
return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
|
|
||||||
}
|
|
||||||
|
|
||||||
static u16 getMaxCachedHint(uptr Size) {
|
|
||||||
DCHECK_LE(Size, MaxSize);
|
|
||||||
return Base::getMaxCachedHint(Size);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
class TableSizeClassMap : public SizeClassMapBase<Config> {
|
|
||||||
typedef SizeClassMapBase<Config> Base;
|
|
||||||
|
|
||||||
static const u8 S = Config::NumBits - 1;
|
|
||||||
static const uptr M = (1UL << S) - 1;
|
|
||||||
static const uptr ClassesSize =
|
|
||||||
sizeof(Config::Classes) / sizeof(Config::Classes[0]);
|
|
||||||
|
|
||||||
struct SizeTable {
|
|
||||||
constexpr SizeTable() {
|
|
||||||
uptr Pos = 1 << Config::MidSizeLog;
|
|
||||||
uptr Inc = 1 << (Config::MidSizeLog - S);
|
|
||||||
for (uptr i = 0; i != getTableSize(); ++i) {
|
|
||||||
Pos += Inc;
|
|
||||||
if ((Pos & (Pos - 1)) == 0)
|
|
||||||
Inc *= 2;
|
|
||||||
Tab[i] = computeClassId(Pos + Config::SizeDelta);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr static u8 computeClassId(uptr Size) {
|
|
||||||
for (uptr i = 0; i != ClassesSize; ++i) {
|
|
||||||
if (Size <= Config::Classes[i])
|
|
||||||
return static_cast<u8>(i + 1);
|
|
||||||
}
|
|
||||||
return static_cast<u8>(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr static uptr getTableSize() {
|
|
||||||
return (Config::MaxSizeLog - Config::MidSizeLog) << S;
|
|
||||||
}
|
|
||||||
|
|
||||||
u8 Tab[getTableSize()] = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
static constexpr SizeTable SzTable = {};
|
|
||||||
|
|
||||||
struct LSBTable {
|
|
||||||
constexpr LSBTable() {
|
|
||||||
u8 Min = 255, Max = 0;
|
|
||||||
for (uptr I = 0; I != ClassesSize; ++I) {
|
|
||||||
for (u8 Bit = 0; Bit != 64; ++Bit) {
|
|
||||||
if (Config::Classes[I] & (1 << Bit)) {
|
|
||||||
Tab[I] = Bit;
|
|
||||||
if (Bit < Min)
|
|
||||||
Min = Bit;
|
|
||||||
if (Bit > Max)
|
|
||||||
Max = Bit;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Max - Min > 3 || ClassesSize > 32)
|
|
||||||
return;
|
|
||||||
|
|
||||||
UseCompressedFormat = true;
|
|
||||||
CompressedMin = Min;
|
|
||||||
for (uptr I = 0; I != ClassesSize; ++I)
|
|
||||||
CompressedValue |= u64(Tab[I] - Min) << (I * 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
u8 Tab[ClassesSize] = {};
|
|
||||||
|
|
||||||
bool UseCompressedFormat = false;
|
|
||||||
u8 CompressedMin = 0;
|
|
||||||
u64 CompressedValue = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
static constexpr LSBTable LTable = {};
|
|
||||||
|
|
||||||
public:
|
|
||||||
static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
|
|
||||||
|
|
||||||
static const uptr NumClasses = ClassesSize + 1;
|
|
||||||
static_assert(NumClasses < 256, "");
|
|
||||||
static const uptr LargestClassId = NumClasses - 1;
|
|
||||||
static const uptr BatchClassId = 0;
|
|
||||||
static const uptr MaxSize = Config::Classes[LargestClassId - 1];
|
|
||||||
|
|
||||||
static uptr getSizeByClassId(uptr ClassId) {
|
|
||||||
return Config::Classes[ClassId - 1];
|
|
||||||
}
|
|
||||||
|
|
||||||
static u8 getSizeLSBByClassId(uptr ClassId) {
|
|
||||||
if (LTable.UseCompressedFormat)
|
|
||||||
return ((LTable.CompressedValue >> ((ClassId - 1) * 2)) & 3) +
|
|
||||||
LTable.CompressedMin;
|
|
||||||
else
|
|
||||||
return LTable.Tab[ClassId - 1];
|
|
||||||
}
|
|
||||||
|
|
||||||
static constexpr bool usesCompressedLSBFormat() {
|
|
||||||
return LTable.UseCompressedFormat;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uptr getClassIdBySize(uptr Size) {
|
|
||||||
if (Size <= Config::Classes[0])
|
|
||||||
return 1;
|
|
||||||
Size -= Config::SizeDelta;
|
|
||||||
DCHECK_LE(Size, MaxSize);
|
|
||||||
if (Size <= (1 << Config::MidSizeLog))
|
|
||||||
return ((Size - 1) >> Config::MinSizeLog) + 1;
|
|
||||||
return SzTable.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
|
|
||||||
}
|
|
||||||
|
|
||||||
static u16 getMaxCachedHint(uptr Size) {
|
|
||||||
DCHECK_LE(Size, MaxSize);
|
|
||||||
return Base::getMaxCachedHint(Size);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct DefaultSizeClassConfig {
|
|
||||||
static const uptr NumBits = 3;
|
|
||||||
static const uptr MinSizeLog = 5;
|
|
||||||
static const uptr MidSizeLog = 8;
|
|
||||||
static const uptr MaxSizeLog = 17;
|
|
||||||
static const u16 MaxNumCachedHint = 14;
|
|
||||||
static const uptr MaxBytesCachedLog = 10;
|
|
||||||
static const uptr SizeDelta = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef FixedSizeClassMap<DefaultSizeClassConfig> DefaultSizeClassMap;
|
|
||||||
|
|
||||||
struct FuchsiaSizeClassConfig {
|
|
||||||
static const uptr NumBits = 3;
|
|
||||||
static const uptr MinSizeLog = 5;
|
|
||||||
static const uptr MidSizeLog = 8;
|
|
||||||
static const uptr MaxSizeLog = 17;
|
|
||||||
static const u16 MaxNumCachedHint = 12;
|
|
||||||
static const uptr MaxBytesCachedLog = 10;
|
|
||||||
static const uptr SizeDelta = Chunk::getHeaderSize();
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef FixedSizeClassMap<FuchsiaSizeClassConfig> FuchsiaSizeClassMap;
|
|
||||||
|
|
||||||
struct AndroidSizeClassConfig {
|
|
||||||
#if SCUDO_WORDSIZE == 64U
|
|
||||||
static const uptr NumBits = 7;
|
|
||||||
static const uptr MinSizeLog = 4;
|
|
||||||
static const uptr MidSizeLog = 6;
|
|
||||||
static const uptr MaxSizeLog = 16;
|
|
||||||
static const u16 MaxNumCachedHint = 13;
|
|
||||||
static const uptr MaxBytesCachedLog = 13;
|
|
||||||
|
|
||||||
static constexpr uptr Classes[] = {
|
|
||||||
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0,
|
|
||||||
0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450,
|
|
||||||
0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210,
|
|
||||||
0x02d90, 0x03790, 0x04010, 0x04810, 0x05a10, 0x07310, 0x08210, 0x10010,
|
|
||||||
};
|
|
||||||
static const uptr SizeDelta = 16;
|
|
||||||
#else
|
|
||||||
static const uptr NumBits = 8;
|
|
||||||
static const uptr MinSizeLog = 4;
|
|
||||||
static const uptr MidSizeLog = 7;
|
|
||||||
static const uptr MaxSizeLog = 16;
|
|
||||||
static const u16 MaxNumCachedHint = 14;
|
|
||||||
static const uptr MaxBytesCachedLog = 13;
|
|
||||||
|
|
||||||
static constexpr uptr Classes[] = {
|
|
||||||
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090,
|
|
||||||
0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130,
|
|
||||||
0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0,
|
|
||||||
0x00330, 0x00370, 0x003a0, 0x00400, 0x00430, 0x004a0, 0x00530, 0x00610,
|
|
||||||
0x00730, 0x00840, 0x00910, 0x009c0, 0x00a60, 0x00b10, 0x00ca0, 0x00e00,
|
|
||||||
0x00fb0, 0x01030, 0x01130, 0x011f0, 0x01490, 0x01650, 0x01930, 0x02010,
|
|
||||||
0x02190, 0x02490, 0x02850, 0x02d50, 0x03010, 0x03210, 0x03c90, 0x04090,
|
|
||||||
0x04510, 0x04810, 0x05c10, 0x06f10, 0x07310, 0x08010, 0x0c010, 0x10010,
|
|
||||||
};
|
|
||||||
static const uptr SizeDelta = 16;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
|
|
||||||
|
|
||||||
#if SCUDO_WORDSIZE == 64U && defined(__clang__)
|
|
||||||
static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct TrustySizeClassConfig {
|
|
||||||
static const uptr NumBits = 1;
|
|
||||||
static const uptr MinSizeLog = 5;
|
|
||||||
static const uptr MidSizeLog = 5;
|
|
||||||
static const uptr MaxSizeLog = 15;
|
|
||||||
static const u16 MaxNumCachedHint = 12;
|
|
||||||
static const uptr MaxBytesCachedLog = 10;
|
|
||||||
static const uptr SizeDelta = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef FixedSizeClassMap<TrustySizeClassConfig> TrustySizeClassMap;
|
|
||||||
|
|
||||||
template <typename SCMap> inline void printMap() {
|
|
||||||
ScopedString Buffer;
|
|
||||||
uptr PrevS = 0;
|
|
||||||
uptr TotalCached = 0;
|
|
||||||
for (uptr I = 0; I < SCMap::NumClasses; I++) {
|
|
||||||
if (I == SCMap::BatchClassId)
|
|
||||||
continue;
|
|
||||||
const uptr S = SCMap::getSizeByClassId(I);
|
|
||||||
const uptr D = S - PrevS;
|
|
||||||
const uptr P = PrevS ? (D * 100 / PrevS) : 0;
|
|
||||||
const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
|
|
||||||
const uptr Cached = SCMap::getMaxCachedHint(S) * S;
|
|
||||||
Buffer.append(
|
|
||||||
"C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %u %zu; id %zu\n", I,
|
|
||||||
S, D, P, L, SCMap::getMaxCachedHint(S), Cached,
|
|
||||||
SCMap::getClassIdBySize(S));
|
|
||||||
TotalCached += Cached;
|
|
||||||
PrevS = S;
|
|
||||||
}
|
|
||||||
Buffer.append("Total Cached: %zu\n", TotalCached);
|
|
||||||
Buffer.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename SCMap> static UNUSED void validateMap() {
|
|
||||||
for (uptr C = 0; C < SCMap::NumClasses; C++) {
|
|
||||||
if (C == SCMap::BatchClassId)
|
|
||||||
continue;
|
|
||||||
const uptr S = SCMap::getSizeByClassId(C);
|
|
||||||
CHECK_NE(S, 0U);
|
|
||||||
CHECK_EQ(SCMap::getClassIdBySize(S), C);
|
|
||||||
if (C < SCMap::LargestClassId)
|
|
||||||
CHECK_EQ(SCMap::getClassIdBySize(S + 1), C + 1);
|
|
||||||
CHECK_EQ(SCMap::getClassIdBySize(S - 1), C);
|
|
||||||
if (C - 1 != SCMap::BatchClassId)
|
|
||||||
CHECK_GT(SCMap::getSizeByClassId(C), SCMap::getSizeByClassId(C - 1));
|
|
||||||
}
|
|
||||||
// Do not perform the loop if the maximum size is too large.
|
|
||||||
if (SCMap::MaxSize > (1 << 19))
|
|
||||||
return;
|
|
||||||
for (uptr S = 1; S <= SCMap::MaxSize; S++) {
|
|
||||||
const uptr C = SCMap::getClassIdBySize(S);
|
|
||||||
CHECK_LT(C, SCMap::NumClasses);
|
|
||||||
CHECK_GE(SCMap::getSizeByClassId(C), S);
|
|
||||||
if (C - 1 != SCMap::BatchClassId)
|
|
||||||
CHECK_LT(SCMap::getSizeByClassId(C - 1), S);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_SIZE_CLASS_MAP_H_
|
|
143
Telegram/ThirdParty/scudo/stack_depot.h
vendored
143
Telegram/ThirdParty/scudo/stack_depot.h
vendored
|
@ -1,143 +0,0 @@
|
||||||
//===-- stack_depot.h -------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_STACK_DEPOT_H_
|
|
||||||
#define SCUDO_STACK_DEPOT_H_
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class MurMur2HashBuilder {
|
|
||||||
static const u32 M = 0x5bd1e995;
|
|
||||||
static const u32 Seed = 0x9747b28c;
|
|
||||||
static const u32 R = 24;
|
|
||||||
u32 H;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; }
|
|
||||||
void add(u32 K) {
|
|
||||||
K *= M;
|
|
||||||
K ^= K >> R;
|
|
||||||
K *= M;
|
|
||||||
H *= M;
|
|
||||||
H ^= K;
|
|
||||||
}
|
|
||||||
u32 get() {
|
|
||||||
u32 X = H;
|
|
||||||
X ^= X >> 13;
|
|
||||||
X *= M;
|
|
||||||
X ^= X >> 15;
|
|
||||||
return X;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class StackDepot {
|
|
||||||
HybridMutex RingEndMu;
|
|
||||||
u32 RingEnd = 0;
|
|
||||||
|
|
||||||
// This data structure stores a stack trace for each allocation and
|
|
||||||
// deallocation when stack trace recording is enabled, that may be looked up
|
|
||||||
// using a hash of the stack trace. The lower bits of the hash are an index
|
|
||||||
// into the Tab array, which stores an index into the Ring array where the
|
|
||||||
// stack traces are stored. As the name implies, Ring is a ring buffer, so a
|
|
||||||
// stack trace may wrap around to the start of the array.
|
|
||||||
//
|
|
||||||
// Each stack trace in Ring is prefixed by a stack trace marker consisting of
|
|
||||||
// a fixed 1 bit in bit 0 (this allows disambiguation between stack frames
|
|
||||||
// and stack trace markers in the case where instruction pointers are 4-byte
|
|
||||||
// aligned, as they are on arm64), the stack trace hash in bits 1-32, and the
|
|
||||||
// size of the stack trace in bits 33-63.
|
|
||||||
//
|
|
||||||
// The insert() function is potentially racy in its accesses to the Tab and
|
|
||||||
// Ring arrays, but find() is resilient to races in the sense that, barring
|
|
||||||
// hash collisions, it will either return the correct stack trace or no stack
|
|
||||||
// trace at all, even if two instances of insert() raced with one another.
|
|
||||||
// This is achieved by re-checking the hash of the stack trace before
|
|
||||||
// returning the trace.
|
|
||||||
|
|
||||||
#if SCUDO_SMALL_STACK_DEPOT
|
|
||||||
static const uptr TabBits = 4;
|
|
||||||
#else
|
|
||||||
static const uptr TabBits = 16;
|
|
||||||
#endif
|
|
||||||
static const uptr TabSize = 1 << TabBits;
|
|
||||||
static const uptr TabMask = TabSize - 1;
|
|
||||||
atomic_u32 Tab[TabSize] = {};
|
|
||||||
|
|
||||||
#if SCUDO_SMALL_STACK_DEPOT
|
|
||||||
static const uptr RingBits = 4;
|
|
||||||
#else
|
|
||||||
static const uptr RingBits = 19;
|
|
||||||
#endif
|
|
||||||
static const uptr RingSize = 1 << RingBits;
|
|
||||||
static const uptr RingMask = RingSize - 1;
|
|
||||||
atomic_u64 Ring[RingSize] = {};
|
|
||||||
|
|
||||||
public:
|
|
||||||
// Insert hash of the stack trace [Begin, End) into the stack depot, and
|
|
||||||
// return the hash.
|
|
||||||
u32 insert(uptr *Begin, uptr *End) {
|
|
||||||
MurMur2HashBuilder B;
|
|
||||||
for (uptr *I = Begin; I != End; ++I)
|
|
||||||
B.add(u32(*I) >> 2);
|
|
||||||
u32 Hash = B.get();
|
|
||||||
|
|
||||||
u32 Pos = Hash & TabMask;
|
|
||||||
u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
|
|
||||||
u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
|
|
||||||
u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1;
|
|
||||||
if (Entry == Id)
|
|
||||||
return Hash;
|
|
||||||
|
|
||||||
ScopedLock Lock(RingEndMu);
|
|
||||||
RingPos = RingEnd;
|
|
||||||
atomic_store_relaxed(&Tab[Pos], RingPos);
|
|
||||||
atomic_store_relaxed(&Ring[RingPos], Id);
|
|
||||||
for (uptr *I = Begin; I != End; ++I) {
|
|
||||||
RingPos = (RingPos + 1) & RingMask;
|
|
||||||
atomic_store_relaxed(&Ring[RingPos], *I);
|
|
||||||
}
|
|
||||||
RingEnd = (RingPos + 1) & RingMask;
|
|
||||||
return Hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look up a stack trace by hash. Returns true if successful. The trace may be
|
|
||||||
// accessed via operator[] passing indexes between *RingPosPtr and
|
|
||||||
// *RingPosPtr + *SizePtr.
|
|
||||||
bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const {
|
|
||||||
u32 Pos = Hash & TabMask;
|
|
||||||
u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
|
|
||||||
if (RingPos >= RingSize)
|
|
||||||
return false;
|
|
||||||
u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
|
|
||||||
u64 HashWithTagBit = (u64(Hash) << 1) | 1;
|
|
||||||
if ((Entry & 0x1ffffffff) != HashWithTagBit)
|
|
||||||
return false;
|
|
||||||
u32 Size = u32(Entry >> 33);
|
|
||||||
if (Size >= RingSize)
|
|
||||||
return false;
|
|
||||||
*RingPosPtr = (RingPos + 1) & RingMask;
|
|
||||||
*SizePtr = Size;
|
|
||||||
MurMur2HashBuilder B;
|
|
||||||
for (uptr I = 0; I != Size; ++I) {
|
|
||||||
RingPos = (RingPos + 1) & RingMask;
|
|
||||||
B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2);
|
|
||||||
}
|
|
||||||
return B.get() == Hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 operator[](uptr RingPos) const {
|
|
||||||
return atomic_load_relaxed(&Ring[RingPos & RingMask]);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_STACK_DEPOT_H_
|
|
102
Telegram/ThirdParty/scudo/stats.h
vendored
102
Telegram/ThirdParty/scudo/stats.h
vendored
|
@ -1,102 +0,0 @@
|
||||||
//===-- stats.h -------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_STATS_H_
|
|
||||||
#define SCUDO_STATS_H_
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "list.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// Memory allocator statistics
|
|
||||||
enum StatType { StatAllocated, StatFree, StatMapped, StatCount };
|
|
||||||
|
|
||||||
typedef uptr StatCounters[StatCount];
|
|
||||||
|
|
||||||
// Per-thread stats, live in per-thread cache. We use atomics so that the
|
|
||||||
// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
|
|
||||||
// lock, because those are expensive operations , and we only care for the stats
|
|
||||||
// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
|
|
||||||
// LocalStats::add'ing, this is OK, we will still get a meaningful number.
|
|
||||||
class LocalStats {
|
|
||||||
public:
|
|
||||||
void init() {
|
|
||||||
for (uptr I = 0; I < StatCount; I++)
|
|
||||||
DCHECK_EQ(get(static_cast<StatType>(I)), 0U);
|
|
||||||
}
|
|
||||||
|
|
||||||
void add(StatType I, uptr V) {
|
|
||||||
V += atomic_load_relaxed(&StatsArray[I]);
|
|
||||||
atomic_store_relaxed(&StatsArray[I], V);
|
|
||||||
}
|
|
||||||
|
|
||||||
void sub(StatType I, uptr V) {
|
|
||||||
V = atomic_load_relaxed(&StatsArray[I]) - V;
|
|
||||||
atomic_store_relaxed(&StatsArray[I], V);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
|
|
||||||
|
|
||||||
uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
|
|
||||||
|
|
||||||
LocalStats *Next = nullptr;
|
|
||||||
LocalStats *Prev = nullptr;
|
|
||||||
|
|
||||||
private:
|
|
||||||
atomic_uptr StatsArray[StatCount] = {};
|
|
||||||
};
|
|
||||||
|
|
||||||
// Global stats, used for aggregation and querying.
|
|
||||||
class GlobalStats : public LocalStats {
|
|
||||||
public:
|
|
||||||
void init() { LocalStats::init(); }
|
|
||||||
|
|
||||||
void link(LocalStats *S) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
StatsList.push_back(S);
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlink(LocalStats *S) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
StatsList.remove(S);
|
|
||||||
for (uptr I = 0; I < StatCount; I++)
|
|
||||||
add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
|
|
||||||
}
|
|
||||||
|
|
||||||
void get(uptr *S) const EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
for (uptr I = 0; I < StatCount; I++)
|
|
||||||
S[I] = LocalStats::get(static_cast<StatType>(I));
|
|
||||||
for (const auto &Stats : StatsList) {
|
|
||||||
for (uptr I = 0; I < StatCount; I++)
|
|
||||||
S[I] += Stats.get(static_cast<StatType>(I));
|
|
||||||
}
|
|
||||||
// All stats must be non-negative.
|
|
||||||
for (uptr I = 0; I < StatCount; I++)
|
|
||||||
S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void lock() ACQUIRE(Mutex) { Mutex.lock(); }
|
|
||||||
void unlock() RELEASE(Mutex) { Mutex.unlock(); }
|
|
||||||
|
|
||||||
void disable() ACQUIRE(Mutex) { lock(); }
|
|
||||||
void enable() RELEASE(Mutex) { unlock(); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
mutable HybridMutex Mutex;
|
|
||||||
DoublyLinkedList<LocalStats> StatsList GUARDED_BY(Mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_STATS_H_
|
|
277
Telegram/ThirdParty/scudo/string_utils.cpp
vendored
277
Telegram/ThirdParty/scudo/string_utils.cpp
vendored
|
@ -1,277 +0,0 @@
|
||||||
//===-- string_utils.cpp ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "string_utils.h"
|
|
||||||
#include "common.h"
|
|
||||||
|
|
||||||
#include <stdarg.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
static int appendChar(char **Buffer, const char *BufferEnd, char C) {
|
|
||||||
if (*Buffer < BufferEnd) {
|
|
||||||
**Buffer = C;
|
|
||||||
(*Buffer)++;
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Appends number in a given Base to buffer. If its length is less than
|
|
||||||
// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
|
|
||||||
// on the value of |PadWithZero|.
|
|
||||||
static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
|
|
||||||
u8 Base, u8 MinNumberLength, bool PadWithZero,
|
|
||||||
bool Negative, bool Upper) {
|
|
||||||
constexpr uptr MaxLen = 30;
|
|
||||||
RAW_CHECK(Base == 10 || Base == 16);
|
|
||||||
RAW_CHECK(Base == 10 || !Negative);
|
|
||||||
RAW_CHECK(AbsoluteValue || !Negative);
|
|
||||||
RAW_CHECK(MinNumberLength < MaxLen);
|
|
||||||
int Res = 0;
|
|
||||||
if (Negative && MinNumberLength)
|
|
||||||
--MinNumberLength;
|
|
||||||
if (Negative && PadWithZero)
|
|
||||||
Res += appendChar(Buffer, BufferEnd, '-');
|
|
||||||
uptr NumBuffer[MaxLen];
|
|
||||||
int Pos = 0;
|
|
||||||
do {
|
|
||||||
RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
|
|
||||||
"appendNumber buffer overflow");
|
|
||||||
NumBuffer[Pos++] = static_cast<uptr>(AbsoluteValue % Base);
|
|
||||||
AbsoluteValue /= Base;
|
|
||||||
} while (AbsoluteValue > 0);
|
|
||||||
if (Pos < MinNumberLength) {
|
|
||||||
memset(&NumBuffer[Pos], 0,
|
|
||||||
sizeof(NumBuffer[0]) * static_cast<uptr>(MinNumberLength - Pos));
|
|
||||||
Pos = MinNumberLength;
|
|
||||||
}
|
|
||||||
RAW_CHECK(Pos > 0);
|
|
||||||
Pos--;
|
|
||||||
for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
|
|
||||||
char c = (PadWithZero || Pos == 0) ? '0' : ' ';
|
|
||||||
Res += appendChar(Buffer, BufferEnd, c);
|
|
||||||
}
|
|
||||||
if (Negative && !PadWithZero)
|
|
||||||
Res += appendChar(Buffer, BufferEnd, '-');
|
|
||||||
for (; Pos >= 0; Pos--) {
|
|
||||||
char Digit = static_cast<char>(NumBuffer[Pos]);
|
|
||||||
Digit = static_cast<char>((Digit < 10) ? '0' + Digit
|
|
||||||
: (Upper ? 'A' : 'a') + Digit - 10);
|
|
||||||
Res += appendChar(Buffer, BufferEnd, Digit);
|
|
||||||
}
|
|
||||||
return Res;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
|
|
||||||
u8 Base, u8 MinNumberLength, bool PadWithZero,
|
|
||||||
bool Upper) {
|
|
||||||
return appendNumber(Buffer, BufferEnd, Num, Base, MinNumberLength,
|
|
||||||
PadWithZero, /*Negative=*/false, Upper);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
|
|
||||||
u8 MinNumberLength, bool PadWithZero) {
|
|
||||||
const bool Negative = (Num < 0);
|
|
||||||
const u64 UnsignedNum = (Num == INT64_MIN)
|
|
||||||
? static_cast<u64>(INT64_MAX) + 1
|
|
||||||
: static_cast<u64>(Negative ? -Num : Num);
|
|
||||||
return appendNumber(Buffer, BufferEnd, UnsignedNum, 10, MinNumberLength,
|
|
||||||
PadWithZero, Negative, /*Upper=*/false);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
|
|
||||||
// interpret Width == 0 as "no Width requested":
|
|
||||||
// Width == 0 - no Width requested
|
|
||||||
// Width < 0 - left-justify S within and pad it to -Width chars, if necessary
|
|
||||||
// Width > 0 - right-justify S, not implemented yet
|
|
||||||
static int appendString(char **Buffer, const char *BufferEnd, int Width,
|
|
||||||
int MaxChars, const char *S) {
|
|
||||||
if (!S)
|
|
||||||
S = "<null>";
|
|
||||||
int Res = 0;
|
|
||||||
for (; *S; S++) {
|
|
||||||
if (MaxChars >= 0 && Res >= MaxChars)
|
|
||||||
break;
|
|
||||||
Res += appendChar(Buffer, BufferEnd, *S);
|
|
||||||
}
|
|
||||||
// Only the left justified strings are supported.
|
|
||||||
while (Width < -Res)
|
|
||||||
Res += appendChar(Buffer, BufferEnd, ' ');
|
|
||||||
return Res;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
|
|
||||||
int Res = 0;
|
|
||||||
Res += appendString(Buffer, BufferEnd, 0, -1, "0x");
|
|
||||||
Res += appendUnsigned(Buffer, BufferEnd, ptr_value, 16,
|
|
||||||
SCUDO_POINTER_FORMAT_LENGTH, /*PadWithZero=*/true,
|
|
||||||
/*Upper=*/false);
|
|
||||||
return Res;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int formatString(char *Buffer, uptr BufferLength, const char *Format,
|
|
||||||
va_list Args) {
|
|
||||||
static const char *PrintfFormatsHelp =
|
|
||||||
"Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
|
|
||||||
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
|
|
||||||
RAW_CHECK(Format);
|
|
||||||
RAW_CHECK(BufferLength > 0);
|
|
||||||
const char *BufferEnd = &Buffer[BufferLength - 1];
|
|
||||||
const char *Cur = Format;
|
|
||||||
int Res = 0;
|
|
||||||
for (; *Cur; Cur++) {
|
|
||||||
if (*Cur != '%') {
|
|
||||||
Res += appendChar(&Buffer, BufferEnd, *Cur);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Cur++;
|
|
||||||
const bool LeftJustified = *Cur == '-';
|
|
||||||
if (LeftJustified)
|
|
||||||
Cur++;
|
|
||||||
bool HaveWidth = (*Cur >= '0' && *Cur <= '9');
|
|
||||||
const bool PadWithZero = (*Cur == '0');
|
|
||||||
u8 Width = 0;
|
|
||||||
if (HaveWidth) {
|
|
||||||
while (*Cur >= '0' && *Cur <= '9')
|
|
||||||
Width = static_cast<u8>(Width * 10 + *Cur++ - '0');
|
|
||||||
}
|
|
||||||
const bool HavePrecision = (Cur[0] == '.' && Cur[1] == '*');
|
|
||||||
int Precision = -1;
|
|
||||||
if (HavePrecision) {
|
|
||||||
Cur += 2;
|
|
||||||
Precision = va_arg(Args, int);
|
|
||||||
}
|
|
||||||
const bool HaveZ = (*Cur == 'z');
|
|
||||||
Cur += HaveZ;
|
|
||||||
const bool HaveLL = !HaveZ && (Cur[0] == 'l' && Cur[1] == 'l');
|
|
||||||
Cur += HaveLL * 2;
|
|
||||||
s64 DVal;
|
|
||||||
u64 UVal;
|
|
||||||
const bool HaveLength = HaveZ || HaveLL;
|
|
||||||
const bool HaveFlags = HaveWidth || HaveLength;
|
|
||||||
// At the moment only %s supports precision and left-justification.
|
|
||||||
CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
|
|
||||||
switch (*Cur) {
|
|
||||||
case 'd': {
|
|
||||||
DVal = HaveLL ? va_arg(Args, s64)
|
|
||||||
: HaveZ ? va_arg(Args, sptr)
|
|
||||||
: va_arg(Args, int);
|
|
||||||
Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case 'u':
|
|
||||||
case 'x':
|
|
||||||
case 'X': {
|
|
||||||
UVal = HaveLL ? va_arg(Args, u64)
|
|
||||||
: HaveZ ? va_arg(Args, uptr)
|
|
||||||
: va_arg(Args, unsigned);
|
|
||||||
const bool Upper = (*Cur == 'X');
|
|
||||||
Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
|
|
||||||
Width, PadWithZero, Upper);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case 'p': {
|
|
||||||
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
|
|
||||||
Res += appendPointer(&Buffer, BufferEnd, va_arg(Args, uptr));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case 's': {
|
|
||||||
RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
|
|
||||||
// Only left-justified Width is supported.
|
|
||||||
CHECK(!HaveWidth || LeftJustified);
|
|
||||||
Res += appendString(&Buffer, BufferEnd, LeftJustified ? -Width : Width,
|
|
||||||
Precision, va_arg(Args, char *));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case 'c': {
|
|
||||||
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
|
|
||||||
Res +=
|
|
||||||
appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// In Scudo, `s64`/`u64` are supposed to use `lld` and `llu` respectively.
|
|
||||||
// However, `-Wformat` doesn't know we have a different parser for those
|
|
||||||
// placeholders and it keeps complaining the type mismatch on 64-bit
|
|
||||||
// platform which uses `ld`/`lu` for `s64`/`u64`. Therefore, in order to
|
|
||||||
// silence the warning, we turn to use `PRId64`/`PRIu64` for printing
|
|
||||||
// `s64`/`u64` and handle the `ld`/`lu` here.
|
|
||||||
case 'l': {
|
|
||||||
++Cur;
|
|
||||||
RAW_CHECK(*Cur == 'd' || *Cur == 'u');
|
|
||||||
|
|
||||||
if (*Cur == 'd') {
|
|
||||||
DVal = va_arg(Args, s64);
|
|
||||||
Res +=
|
|
||||||
appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
|
|
||||||
} else {
|
|
||||||
UVal = va_arg(Args, u64);
|
|
||||||
Res += appendUnsigned(&Buffer, BufferEnd, UVal, 10, Width, PadWithZero,
|
|
||||||
false);
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case '%': {
|
|
||||||
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
|
|
||||||
Res += appendChar(&Buffer, BufferEnd, '%');
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
RAW_CHECK_MSG(false, PrintfFormatsHelp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RAW_CHECK(Buffer <= BufferEnd);
|
|
||||||
appendChar(&Buffer, BufferEnd + 1, '\0');
|
|
||||||
return Res;
|
|
||||||
}
|
|
||||||
|
|
||||||
int formatString(char *Buffer, uptr BufferLength, const char *Format, ...) {
|
|
||||||
va_list Args;
|
|
||||||
va_start(Args, Format);
|
|
||||||
int Res = formatString(Buffer, BufferLength, Format, Args);
|
|
||||||
va_end(Args);
|
|
||||||
return Res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ScopedString::vappend(const char *Format, va_list Args) {
|
|
||||||
va_list ArgsCopy;
|
|
||||||
va_copy(ArgsCopy, Args);
|
|
||||||
// formatString doesn't currently support a null buffer or zero buffer length,
|
|
||||||
// so in order to get the resulting formatted string length, we use a one-char
|
|
||||||
// buffer.
|
|
||||||
char C[1];
|
|
||||||
const uptr AdditionalLength =
|
|
||||||
static_cast<uptr>(formatString(C, sizeof(C), Format, Args)) + 1;
|
|
||||||
const uptr Length = length();
|
|
||||||
String.resize(Length + AdditionalLength);
|
|
||||||
const uptr FormattedLength = static_cast<uptr>(formatString(
|
|
||||||
String.data() + Length, String.size() - Length, Format, ArgsCopy));
|
|
||||||
RAW_CHECK(data()[length()] == '\0');
|
|
||||||
RAW_CHECK(FormattedLength + 1 == AdditionalLength);
|
|
||||||
va_end(ArgsCopy);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ScopedString::append(const char *Format, ...) {
|
|
||||||
va_list Args;
|
|
||||||
va_start(Args, Format);
|
|
||||||
vappend(Format, Args);
|
|
||||||
va_end(Args);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Printf(const char *Format, ...) {
|
|
||||||
va_list Args;
|
|
||||||
va_start(Args, Format);
|
|
||||||
ScopedString Msg;
|
|
||||||
Msg.vappend(Format, Args);
|
|
||||||
outputRaw(Msg.data());
|
|
||||||
va_end(Args);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
43
Telegram/ThirdParty/scudo/string_utils.h
vendored
43
Telegram/ThirdParty/scudo/string_utils.h
vendored
|
@ -1,43 +0,0 @@
|
||||||
//===-- string_utils.h ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_STRING_UTILS_H_
|
|
||||||
#define SCUDO_STRING_UTILS_H_
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
#include "vector.h"
|
|
||||||
|
|
||||||
#include <stdarg.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class ScopedString {
|
|
||||||
public:
|
|
||||||
explicit ScopedString() { String.push_back('\0'); }
|
|
||||||
uptr length() { return String.size() - 1; }
|
|
||||||
const char *data() { return String.data(); }
|
|
||||||
void clear() {
|
|
||||||
String.clear();
|
|
||||||
String.push_back('\0');
|
|
||||||
}
|
|
||||||
void vappend(const char *Format, va_list Args);
|
|
||||||
void append(const char *Format, ...) FORMAT(2, 3);
|
|
||||||
void output() const { outputRaw(String.data()); }
|
|
||||||
void reserve(size_t Size) { String.reserve(Size + 1); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
Vector<char> String;
|
|
||||||
};
|
|
||||||
|
|
||||||
int formatString(char *Buffer, uptr BufferLength, const char *Format, ...)
|
|
||||||
FORMAT(3, 4);
|
|
||||||
void Printf(const char *Format, ...) FORMAT(1, 2);
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_STRING_UTILS_H_
|
|
144
Telegram/ThirdParty/scudo/tests/CMakeLists.txt
vendored
144
Telegram/ThirdParty/scudo/tests/CMakeLists.txt
vendored
|
@ -1,144 +0,0 @@
|
||||||
include_directories(..)
|
|
||||||
|
|
||||||
add_custom_target(ScudoUnitTests)
|
|
||||||
set_target_properties(ScudoUnitTests PROPERTIES
|
|
||||||
FOLDER "Compiler-RT Tests")
|
|
||||||
|
|
||||||
set(SCUDO_UNITTEST_CFLAGS
|
|
||||||
${COMPILER_RT_UNITTEST_CFLAGS}
|
|
||||||
${COMPILER_RT_GTEST_CFLAGS}
|
|
||||||
${SANITIZER_TEST_CXX_CFLAGS}
|
|
||||||
-I${COMPILER_RT_SOURCE_DIR}/include
|
|
||||||
-I${COMPILER_RT_SOURCE_DIR}/lib
|
|
||||||
-I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone
|
|
||||||
-I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone/include
|
|
||||||
-DGTEST_HAS_RTTI=0
|
|
||||||
-g
|
|
||||||
# Extra flags for the C++ tests
|
|
||||||
-Wconversion
|
|
||||||
# TODO(kostyak): find a way to make -fsized-deallocation work
|
|
||||||
-Wno-mismatched-new-delete)
|
|
||||||
|
|
||||||
if(COMPILER_RT_DEBUG)
|
|
||||||
list(APPEND SCUDO_UNITTEST_CFLAGS -DSCUDO_DEBUG=1 -DSCUDO_ENABLE_HOOKS=1)
|
|
||||||
if (NOT FUCHSIA)
|
|
||||||
list(APPEND SCUDO_UNITTEST_CFLAGS -DSCUDO_ENABLE_HOOKS_TESTS=1)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(ANDROID)
|
|
||||||
list(APPEND SCUDO_UNITTEST_CFLAGS -fno-emulated-tls)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (COMPILER_RT_HAS_GWP_ASAN)
|
|
||||||
list(APPEND SCUDO_UNITTEST_CFLAGS -DGWP_ASAN_HOOKS -fno-omit-frame-pointer
|
|
||||||
-mno-omit-leaf-frame-pointer)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
append_list_if(COMPILER_RT_HAS_WTHREAD_SAFETY_FLAG -Werror=thread-safety
|
|
||||||
SCUDO_UNITTEST_CFLAGS)
|
|
||||||
|
|
||||||
set(SCUDO_TEST_ARCH ${SCUDO_STANDALONE_SUPPORTED_ARCH})
|
|
||||||
|
|
||||||
# gtests requires c++
|
|
||||||
set(SCUDO_UNITTEST_LINK_FLAGS
|
|
||||||
${COMPILER_RT_UNITTEST_LINK_FLAGS}
|
|
||||||
${COMPILER_RT_UNWINDER_LINK_LIBS}
|
|
||||||
${SANITIZER_TEST_CXX_LIBRARIES})
|
|
||||||
list(APPEND SCUDO_UNITTEST_LINK_FLAGS -pthread -no-pie)
|
|
||||||
# Linking against libatomic is required with some compilers
|
|
||||||
check_library_exists(atomic __atomic_load_8 "" COMPILER_RT_HAS_LIBATOMIC)
|
|
||||||
if (COMPILER_RT_HAS_LIBATOMIC)
|
|
||||||
list(APPEND SCUDO_UNITTEST_LINK_FLAGS -latomic)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(SCUDO_TEST_HEADERS
|
|
||||||
scudo_unit_test.h
|
|
||||||
)
|
|
||||||
foreach (header ${SCUDO_HEADERS})
|
|
||||||
list(APPEND SCUDO_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
macro(add_scudo_unittest testname)
|
|
||||||
cmake_parse_arguments(TEST "" "" "SOURCES;ADDITIONAL_RTOBJECTS" ${ARGN})
|
|
||||||
if (COMPILER_RT_HAS_GWP_ASAN)
|
|
||||||
list(APPEND TEST_ADDITIONAL_RTOBJECTS
|
|
||||||
RTGwpAsan RTGwpAsanBacktraceLibc RTGwpAsanSegvHandler)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(COMPILER_RT_HAS_SCUDO_STANDALONE)
|
|
||||||
foreach(arch ${SCUDO_TEST_ARCH})
|
|
||||||
# Additional runtime objects get added along RTScudoStandalone
|
|
||||||
set(SCUDO_TEST_RTOBJECTS $<TARGET_OBJECTS:RTScudoStandalone.${arch}>)
|
|
||||||
foreach(rtobject ${TEST_ADDITIONAL_RTOBJECTS})
|
|
||||||
list(APPEND SCUDO_TEST_RTOBJECTS $<TARGET_OBJECTS:${rtobject}.${arch}>)
|
|
||||||
endforeach()
|
|
||||||
# Add the static runtime library made of all the runtime objects
|
|
||||||
set(RUNTIME RT${testname}.${arch})
|
|
||||||
add_library(${RUNTIME} STATIC ${SCUDO_TEST_RTOBJECTS})
|
|
||||||
set(ScudoUnitTestsObjects)
|
|
||||||
generate_compiler_rt_tests(ScudoUnitTestsObjects ScudoUnitTests
|
|
||||||
"${testname}-${arch}-Test" ${arch}
|
|
||||||
SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}
|
|
||||||
COMPILE_DEPS ${SCUDO_TEST_HEADERS}
|
|
||||||
DEPS llvm_gtest scudo_standalone
|
|
||||||
RUNTIME ${RUNTIME}
|
|
||||||
CFLAGS ${SCUDO_UNITTEST_CFLAGS}
|
|
||||||
LINK_FLAGS ${SCUDO_UNITTEST_LINK_FLAGS})
|
|
||||||
endforeach()
|
|
||||||
endif()
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
set(SCUDO_UNIT_TEST_SOURCES
|
|
||||||
atomic_test.cpp
|
|
||||||
bytemap_test.cpp
|
|
||||||
checksum_test.cpp
|
|
||||||
chunk_test.cpp
|
|
||||||
combined_test.cpp
|
|
||||||
common_test.cpp
|
|
||||||
condition_variable_test.cpp
|
|
||||||
flags_test.cpp
|
|
||||||
list_test.cpp
|
|
||||||
map_test.cpp
|
|
||||||
memtag_test.cpp
|
|
||||||
mutex_test.cpp
|
|
||||||
primary_test.cpp
|
|
||||||
quarantine_test.cpp
|
|
||||||
release_test.cpp
|
|
||||||
report_test.cpp
|
|
||||||
secondary_test.cpp
|
|
||||||
size_class_map_test.cpp
|
|
||||||
stats_test.cpp
|
|
||||||
strings_test.cpp
|
|
||||||
timing_test.cpp
|
|
||||||
tsd_test.cpp
|
|
||||||
vector_test.cpp
|
|
||||||
scudo_unit_test_main.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
# Temporary hack until LLVM libc supports inttypes.h print format macros
|
|
||||||
# See: https://github.com/llvm/llvm-project/issues/63317#issuecomment-1591906241
|
|
||||||
if(LLVM_LIBC_INCLUDE_SCUDO)
|
|
||||||
list(REMOVE_ITEM SCUDO_UNIT_TEST_SOURCES timing_test.cpp)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_scudo_unittest(ScudoUnitTest
|
|
||||||
SOURCES ${SCUDO_UNIT_TEST_SOURCES})
|
|
||||||
|
|
||||||
set(SCUDO_C_UNIT_TEST_SOURCES
|
|
||||||
wrappers_c_test.cpp
|
|
||||||
scudo_unit_test_main.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
add_scudo_unittest(ScudoCUnitTest
|
|
||||||
SOURCES ${SCUDO_C_UNIT_TEST_SOURCES}
|
|
||||||
ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers)
|
|
||||||
|
|
||||||
set(SCUDO_CXX_UNIT_TEST_SOURCES
|
|
||||||
wrappers_cpp_test.cpp
|
|
||||||
scudo_unit_test_main.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
add_scudo_unittest(ScudoCxxUnitTest
|
|
||||||
SOURCES ${SCUDO_CXX_UNIT_TEST_SOURCES}
|
|
||||||
ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers RTScudoStandaloneCxxWrappers)
|
|
101
Telegram/ThirdParty/scudo/tests/atomic_test.cpp
vendored
101
Telegram/ThirdParty/scudo/tests/atomic_test.cpp
vendored
|
@ -1,101 +0,0 @@
|
||||||
//===-- atomic_test.cpp -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <typename T> struct ValAndMagic {
|
|
||||||
typename T::Type Magic0;
|
|
||||||
T A;
|
|
||||||
typename T::Type Magic1;
|
|
||||||
|
|
||||||
static ValAndMagic<T> *Sink;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T> ValAndMagic<T> *ValAndMagic<T>::Sink;
|
|
||||||
|
|
||||||
template <typename T, memory_order LoadMO, memory_order StoreMO>
|
|
||||||
void checkStoreLoad() {
|
|
||||||
typedef typename T::Type Type;
|
|
||||||
ValAndMagic<T> Val;
|
|
||||||
// Prevent the compiler from scalarizing the struct.
|
|
||||||
ValAndMagic<T>::Sink = &Val;
|
|
||||||
// Ensure that surrounding memory is not overwritten.
|
|
||||||
Val.Magic0 = Val.Magic1 = (Type)-3;
|
|
||||||
for (u64 I = 0; I < 100; I++) {
|
|
||||||
// Generate A value that occupies all bytes of the variable.
|
|
||||||
u64 V = I;
|
|
||||||
V |= V << 8;
|
|
||||||
V |= V << 16;
|
|
||||||
V |= V << 32;
|
|
||||||
Val.A.ValDoNotUse = (Type)V;
|
|
||||||
EXPECT_EQ(atomic_load(&Val.A, LoadMO), (Type)V);
|
|
||||||
Val.A.ValDoNotUse = (Type)-1;
|
|
||||||
atomic_store(&Val.A, (Type)V, StoreMO);
|
|
||||||
EXPECT_EQ(Val.A.ValDoNotUse, (Type)V);
|
|
||||||
}
|
|
||||||
EXPECT_EQ(Val.Magic0, (Type)-3);
|
|
||||||
EXPECT_EQ(Val.Magic1, (Type)-3);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoAtomicTest, AtomicStoreLoad) {
|
|
||||||
checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u8, memory_order_consume, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u8, memory_order_acquire, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_release>();
|
|
||||||
checkStoreLoad<atomic_u8, memory_order_seq_cst, memory_order_seq_cst>();
|
|
||||||
|
|
||||||
checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u16, memory_order_consume, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u16, memory_order_acquire, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_release>();
|
|
||||||
checkStoreLoad<atomic_u16, memory_order_seq_cst, memory_order_seq_cst>();
|
|
||||||
|
|
||||||
checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u32, memory_order_consume, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u32, memory_order_acquire, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_release>();
|
|
||||||
checkStoreLoad<atomic_u32, memory_order_seq_cst, memory_order_seq_cst>();
|
|
||||||
|
|
||||||
checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u64, memory_order_consume, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u64, memory_order_acquire, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_release>();
|
|
||||||
checkStoreLoad<atomic_u64, memory_order_seq_cst, memory_order_seq_cst>();
|
|
||||||
|
|
||||||
checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_uptr, memory_order_consume, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_uptr, memory_order_acquire, memory_order_relaxed>();
|
|
||||||
checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_release>();
|
|
||||||
checkStoreLoad<atomic_uptr, memory_order_seq_cst, memory_order_seq_cst>();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T> void checkAtomicCompareExchange() {
|
|
||||||
typedef typename T::Type Type;
|
|
||||||
Type OldVal = 42;
|
|
||||||
Type NewVal = 24;
|
|
||||||
Type V = OldVal;
|
|
||||||
EXPECT_TRUE(atomic_compare_exchange_strong(reinterpret_cast<T *>(&V), &OldVal,
|
|
||||||
NewVal, memory_order_relaxed));
|
|
||||||
EXPECT_FALSE(atomic_compare_exchange_strong(
|
|
||||||
reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
|
|
||||||
EXPECT_EQ(NewVal, OldVal);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoAtomicTest, AtomicCompareExchangeTest) {
|
|
||||||
checkAtomicCompareExchange<atomic_u8>();
|
|
||||||
checkAtomicCompareExchange<atomic_u16>();
|
|
||||||
checkAtomicCompareExchange<atomic_u32>();
|
|
||||||
checkAtomicCompareExchange<atomic_u64>();
|
|
||||||
checkAtomicCompareExchange<atomic_uptr>();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
33
Telegram/ThirdParty/scudo/tests/bytemap_test.cpp
vendored
33
Telegram/ThirdParty/scudo/tests/bytemap_test.cpp
vendored
|
@ -1,33 +0,0 @@
|
||||||
//===-- bytemap_test.cpp ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "bytemap.h"
|
|
||||||
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
template <typename T> void testMap(T &Map, scudo::uptr Size) {
|
|
||||||
Map.init();
|
|
||||||
for (scudo::uptr I = 0; I < Size; I += 7)
|
|
||||||
Map.set(I, (I % 100) + 1);
|
|
||||||
for (scudo::uptr J = 0; J < Size; J++) {
|
|
||||||
if (J % 7)
|
|
||||||
EXPECT_EQ(Map[J], 0);
|
|
||||||
else
|
|
||||||
EXPECT_EQ(Map[J], (J % 100) + 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoByteMapTest, FlatByteMap) {
|
|
||||||
const scudo::uptr Size = 1U << 10;
|
|
||||||
scudo::FlatByteMap<Size> Map;
|
|
||||||
testMap(Map, Size);
|
|
||||||
Map.unmapTestOnly();
|
|
||||||
}
|
|
|
@ -1,58 +0,0 @@
|
||||||
//===-- checksum_test.cpp ---------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "checksum.h"
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
static scudo::u16 computeSoftwareChecksum(scudo::u32 Seed, scudo::uptr *Array,
|
|
||||||
scudo::uptr ArraySize) {
|
|
||||||
scudo::u16 Checksum = static_cast<scudo::u16>(Seed & 0xffff);
|
|
||||||
for (scudo::uptr I = 0; I < ArraySize; I++)
|
|
||||||
Checksum = scudo::computeBSDChecksum(Checksum, Array[I]);
|
|
||||||
return Checksum;
|
|
||||||
}
|
|
||||||
|
|
||||||
static scudo::u16 computeHardwareChecksum(scudo::u32 Seed, scudo::uptr *Array,
|
|
||||||
scudo::uptr ArraySize) {
|
|
||||||
scudo::u32 Crc = Seed;
|
|
||||||
for (scudo::uptr I = 0; I < ArraySize; I++)
|
|
||||||
Crc = scudo::computeHardwareCRC32(Crc, Array[I]);
|
|
||||||
return static_cast<scudo::u16>((Crc & 0xffff) ^ (Crc >> 16));
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef scudo::u16 (*ComputeChecksum)(scudo::u32, scudo::uptr *, scudo::uptr);
|
|
||||||
|
|
||||||
// This verifies that flipping bits in the data being checksummed produces a
|
|
||||||
// different checksum. We do not use random data to avoid flakyness.
|
|
||||||
template <ComputeChecksum F> static void verifyChecksumFunctionBitFlip() {
|
|
||||||
scudo::uptr Array[sizeof(scudo::u64) / sizeof(scudo::uptr)];
|
|
||||||
const scudo::uptr ArraySize = ARRAY_SIZE(Array);
|
|
||||||
memset(Array, 0xaa, sizeof(Array));
|
|
||||||
const scudo::u32 Seed = 0x41424343U;
|
|
||||||
const scudo::u16 Reference = F(Seed, Array, ArraySize);
|
|
||||||
scudo::u8 IdenticalChecksums = 0;
|
|
||||||
for (scudo::uptr I = 0; I < ArraySize; I++) {
|
|
||||||
for (scudo::uptr J = 0; J < SCUDO_WORDSIZE; J++) {
|
|
||||||
Array[I] ^= scudo::uptr{1} << J;
|
|
||||||
if (F(Seed, Array, ArraySize) == Reference)
|
|
||||||
IdenticalChecksums++;
|
|
||||||
Array[I] ^= scudo::uptr{1} << J;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Allow for a couple of identical checksums over the whole set of flips.
|
|
||||||
EXPECT_LE(IdenticalChecksums, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoChecksumTest, ChecksumFunctions) {
|
|
||||||
verifyChecksumFunctionBitFlip<computeSoftwareChecksum>();
|
|
||||||
if (&scudo::computeHardwareCRC32 && scudo::hasHardwareCRC32())
|
|
||||||
verifyChecksumFunctionBitFlip<computeHardwareChecksum>();
|
|
||||||
}
|
|
57
Telegram/ThirdParty/scudo/tests/chunk_test.cpp
vendored
57
Telegram/ThirdParty/scudo/tests/chunk_test.cpp
vendored
|
@ -1,57 +0,0 @@
|
||||||
//===-- chunk_test.cpp ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "chunk.h"
|
|
||||||
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
static constexpr scudo::uptr HeaderSize = scudo::Chunk::getHeaderSize();
|
|
||||||
static constexpr scudo::u32 Cookie = 0x41424344U;
|
|
||||||
static constexpr scudo::u32 InvalidCookie = 0x11223344U;
|
|
||||||
|
|
||||||
static void initChecksum(void) {
|
|
||||||
if (&scudo::computeHardwareCRC32 && scudo::hasHardwareCRC32())
|
|
||||||
scudo::HashAlgorithm = scudo::Checksum::HardwareCRC32;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoChunkDeathTest, ChunkBasic) {
|
|
||||||
initChecksum();
|
|
||||||
const scudo::uptr Size = 0x100U;
|
|
||||||
scudo::Chunk::UnpackedHeader Header = {};
|
|
||||||
void *Block = malloc(HeaderSize + Size);
|
|
||||||
void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
|
|
||||||
HeaderSize);
|
|
||||||
scudo::Chunk::storeHeader(Cookie, P, &Header);
|
|
||||||
memset(P, 'A', Size);
|
|
||||||
scudo::Chunk::loadHeader(Cookie, P, &Header);
|
|
||||||
EXPECT_TRUE(scudo::Chunk::isValid(Cookie, P, &Header));
|
|
||||||
EXPECT_FALSE(scudo::Chunk::isValid(InvalidCookie, P, &Header));
|
|
||||||
EXPECT_DEATH(scudo::Chunk::loadHeader(InvalidCookie, P, &Header), "");
|
|
||||||
free(Block);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoChunkDeathTest, CorruptHeader) {
|
|
||||||
initChecksum();
|
|
||||||
const scudo::uptr Size = 0x100U;
|
|
||||||
scudo::Chunk::UnpackedHeader Header = {};
|
|
||||||
void *Block = malloc(HeaderSize + Size);
|
|
||||||
void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
|
|
||||||
HeaderSize);
|
|
||||||
scudo::Chunk::storeHeader(Cookie, P, &Header);
|
|
||||||
memset(P, 'A', Size);
|
|
||||||
scudo::Chunk::loadHeader(Cookie, P, &Header);
|
|
||||||
// Simulate a couple of corrupted bits per byte of header data.
|
|
||||||
for (scudo::uptr I = 0; I < sizeof(scudo::Chunk::PackedHeader); I++) {
|
|
||||||
*(reinterpret_cast<scudo::u8 *>(Block) + I) ^= 0x42U;
|
|
||||||
EXPECT_DEATH(scudo::Chunk::loadHeader(Cookie, P, &Header), "");
|
|
||||||
*(reinterpret_cast<scudo::u8 *>(Block) + I) ^= 0x42U;
|
|
||||||
}
|
|
||||||
free(Block);
|
|
||||||
}
|
|
903
Telegram/ThirdParty/scudo/tests/combined_test.cpp
vendored
903
Telegram/ThirdParty/scudo/tests/combined_test.cpp
vendored
|
@ -1,903 +0,0 @@
|
||||||
//===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "memtag.h"
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "allocator_config.h"
|
|
||||||
#include "chunk.h"
|
|
||||||
#include "combined.h"
|
|
||||||
#include "condition_variable.h"
|
|
||||||
#include "mem_map.h"
|
|
||||||
#include "size_class_map.h"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <memory>
|
|
||||||
#include <mutex>
|
|
||||||
#include <set>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
|
|
||||||
static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
|
|
||||||
|
|
||||||
// Fuchsia complains that the function is not used.
|
|
||||||
UNUSED static void disableDebuggerdMaybe() {
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
// Disable the debuggerd signal handler on Android, without this we can end
|
|
||||||
// up spending a significant amount of time creating tombstones.
|
|
||||||
signal(SIGSEGV, SIG_DFL);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class AllocatorT>
|
|
||||||
bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
|
|
||||||
const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
if (Alignment < MinAlignment)
|
|
||||||
Alignment = MinAlignment;
|
|
||||||
const scudo::uptr NeededSize =
|
|
||||||
scudo::roundUp(Size, MinAlignment) +
|
|
||||||
((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
|
|
||||||
return AllocatorT::PrimaryT::canAllocate(NeededSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class AllocatorT>
|
|
||||||
void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
|
|
||||||
scudo::uptr Alignment) {
|
|
||||||
const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
Size = scudo::roundUp(Size, MinAlignment);
|
|
||||||
if (Allocator->useMemoryTaggingTestOnly())
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
disableDebuggerdMaybe();
|
|
||||||
reinterpret_cast<char *>(P)[-1] = 'A';
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
|
|
||||||
? Allocator->useMemoryTaggingTestOnly()
|
|
||||||
: Alignment == MinAlignment) {
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
disableDebuggerdMaybe();
|
|
||||||
reinterpret_cast<char *>(P)[Size] = 'A';
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
|
|
||||||
TestAllocator() {
|
|
||||||
this->initThreadMaybe();
|
|
||||||
if (scudo::archSupportsMemoryTagging() &&
|
|
||||||
!scudo::systemDetectsMemoryTagFaultsTestOnly())
|
|
||||||
this->disableMemoryTagging();
|
|
||||||
}
|
|
||||||
~TestAllocator() { this->unmapTestOnly(); }
|
|
||||||
|
|
||||||
void *operator new(size_t size);
|
|
||||||
void operator delete(void *ptr);
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr size_t kMaxAlign = std::max({
|
|
||||||
alignof(scudo::Allocator<scudo::DefaultConfig>),
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
alignof(scudo::Allocator<scudo::FuchsiaConfig>),
|
|
||||||
#endif
|
|
||||||
alignof(scudo::Allocator<scudo::AndroidConfig>)
|
|
||||||
});
|
|
||||||
|
|
||||||
#if SCUDO_RISCV64
|
|
||||||
// The allocator is over 4MB large. Rather than creating an instance of this on
|
|
||||||
// the heap, keep it in a global storage to reduce fragmentation from having to
|
|
||||||
// mmap this at the start of every test.
|
|
||||||
struct TestAllocatorStorage {
|
|
||||||
static constexpr size_t kMaxSize = std::max({
|
|
||||||
sizeof(scudo::Allocator<scudo::DefaultConfig>),
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
sizeof(scudo::Allocator<scudo::FuchsiaConfig>),
|
|
||||||
#endif
|
|
||||||
sizeof(scudo::Allocator<scudo::AndroidConfig>)
|
|
||||||
});
|
|
||||||
|
|
||||||
// To alleviate some problem, let's skip the thread safety analysis here.
|
|
||||||
static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
CHECK(size <= kMaxSize &&
|
|
||||||
"Allocation size doesn't fit in the allocator storage");
|
|
||||||
M.lock();
|
|
||||||
return AllocatorStorage;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
M.assertHeld();
|
|
||||||
M.unlock();
|
|
||||||
ASSERT_EQ(ptr, AllocatorStorage);
|
|
||||||
}
|
|
||||||
|
|
||||||
static scudo::HybridMutex M;
|
|
||||||
static uint8_t AllocatorStorage[kMaxSize];
|
|
||||||
};
|
|
||||||
scudo::HybridMutex TestAllocatorStorage::M;
|
|
||||||
alignas(kMaxAlign) uint8_t TestAllocatorStorage::AllocatorStorage[kMaxSize];
|
|
||||||
#else
|
|
||||||
struct TestAllocatorStorage {
|
|
||||||
static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
void *p = nullptr;
|
|
||||||
EXPECT_EQ(0, posix_memalign(&p, kMaxAlign, size));
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS { free(ptr); }
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
void *TestAllocator<Config>::operator new(size_t size) {
|
|
||||||
return TestAllocatorStorage::get(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config>
|
|
||||||
void TestAllocator<Config>::operator delete(void *ptr) {
|
|
||||||
TestAllocatorStorage::release(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class TypeParam> struct ScudoCombinedTest : public Test {
|
|
||||||
ScudoCombinedTest() {
|
|
||||||
UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
|
|
||||||
Allocator = std::make_unique<AllocatorT>();
|
|
||||||
}
|
|
||||||
~ScudoCombinedTest() {
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
UseQuarantine = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void RunTest();
|
|
||||||
|
|
||||||
void BasicTest(scudo::uptr SizeLog);
|
|
||||||
|
|
||||||
using AllocatorT = TestAllocator<TypeParam>;
|
|
||||||
std::unique_ptr<AllocatorT> Allocator;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
struct TestConditionVariableConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = true;
|
|
||||||
template <class A>
|
|
||||||
using TSDRegistryT =
|
|
||||||
scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = scudo::AndroidSizeClassMap;
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
static const scudo::uptr RegionSizeLog = 28U;
|
|
||||||
typedef scudo::u32 CompactPtrT;
|
|
||||||
static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
static const scudo::uptr GroupSizeLog = 20U;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
#else
|
|
||||||
static const scudo::uptr RegionSizeLog = 18U;
|
|
||||||
static const scudo::uptr GroupSizeLog = 18U;
|
|
||||||
typedef scudo::uptr CompactPtrT;
|
|
||||||
#endif
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
|
|
||||||
static const bool UseConditionVariable = true;
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
using ConditionVariableT = scudo::ConditionVariableLinux;
|
|
||||||
#else
|
|
||||||
using ConditionVariableT = scudo::ConditionVariableDummy;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
template <typename Config>
|
|
||||||
using PrimaryT = scudo::SizeClassAllocator64<Config>;
|
|
||||||
#else
|
|
||||||
template <typename Config>
|
|
||||||
using PrimaryT = scudo::SizeClassAllocator32<Config>;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct Secondary {
|
|
||||||
template <typename Config>
|
|
||||||
using CacheT = scudo::MapAllocatorNoCache<Config>;
|
|
||||||
};
|
|
||||||
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
|
|
||||||
};
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
|
|
||||||
#else
|
|
||||||
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
|
|
||||||
using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
|
|
||||||
TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
|
|
||||||
|
|
||||||
#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
|
|
||||||
template <class TypeParam> \
|
|
||||||
struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
|
|
||||||
void Run(); \
|
|
||||||
}; \
|
|
||||||
SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
|
||||||
template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
|
|
||||||
EXPECT_FALSE(
|
|
||||||
Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
|
|
||||||
|
|
||||||
scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
|
|
||||||
for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
|
|
||||||
StackBuffer[I] = 0x42U;
|
|
||||||
EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
|
|
||||||
for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
|
|
||||||
EXPECT_EQ(StackBuffer[I], 0x42U);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class Config>
|
|
||||||
void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// This allocates and deallocates a bunch of chunks, with a wide range of
|
|
||||||
// sizes and alignments, with a focus on sizes that could trigger weird
|
|
||||||
// behaviors (plus or minus a small delta of a power of two for example).
|
|
||||||
for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
|
|
||||||
const scudo::uptr Align = 1U << AlignLog;
|
|
||||||
for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
|
|
||||||
if ((1LL << SizeLog) + Delta < 0)
|
|
||||||
continue;
|
|
||||||
const scudo::uptr Size =
|
|
||||||
static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
|
|
||||||
void *P = Allocator->allocate(Size, Origin, Align);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_TRUE(Allocator->isOwned(P));
|
|
||||||
EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
|
|
||||||
EXPECT_LE(Size, Allocator->getUsableSize(P));
|
|
||||||
memset(P, 0xaa, Size);
|
|
||||||
checkMemoryTaggingMaybe(Allocator, P, Size, Align);
|
|
||||||
Allocator->deallocate(P, Origin, Size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Allocator->printStats();
|
|
||||||
Allocator->printFragmentationInfo();
|
|
||||||
}
|
|
||||||
|
|
||||||
#define SCUDO_MAKE_BASIC_TEST(SizeLog) \
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
|
|
||||||
this->BasicTest(SizeLog); \
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_MAKE_BASIC_TEST(0)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(1)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(2)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(3)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(4)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(5)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(6)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(7)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(8)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(9)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(10)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(11)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(12)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(13)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(14)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(15)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(16)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(17)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(18)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(19)
|
|
||||||
SCUDO_MAKE_BASIC_TEST(20)
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Ensure that specifying ZeroContents returns a zero'd out block.
|
|
||||||
for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
|
|
||||||
for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
|
|
||||||
const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
|
|
||||||
void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
for (scudo::uptr I = 0; I < Size; I++)
|
|
||||||
ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
|
|
||||||
memset(P, 0xaa, Size);
|
|
||||||
Allocator->deallocate(P, Origin, Size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Ensure that specifying ZeroFill returns a zero'd out block.
|
|
||||||
Allocator->setFillContents(scudo::ZeroFill);
|
|
||||||
for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
|
|
||||||
for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
|
|
||||||
const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
|
|
||||||
void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
for (scudo::uptr I = 0; I < Size; I++)
|
|
||||||
ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
|
|
||||||
memset(P, 0xaa, Size);
|
|
||||||
Allocator->deallocate(P, Origin, Size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
|
|
||||||
// block. The primary allocator only produces pattern filled blocks if MTE
|
|
||||||
// is disabled, so we only require pattern filled blocks in that case.
|
|
||||||
Allocator->setFillContents(scudo::PatternOrZeroFill);
|
|
||||||
for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
|
|
||||||
for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
|
|
||||||
const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
|
|
||||||
void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
for (scudo::uptr I = 0; I < Size; I++) {
|
|
||||||
unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
|
|
||||||
if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
|
|
||||||
1U << MinAlignLog) &&
|
|
||||||
!Allocator->useMemoryTaggingTestOnly())
|
|
||||||
ASSERT_EQ(V, scudo::PatternFillByte);
|
|
||||||
else
|
|
||||||
ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
|
|
||||||
}
|
|
||||||
memset(P, 0xaa, Size);
|
|
||||||
Allocator->deallocate(P, Origin, Size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Verify that a chunk will end up being reused, at some point.
|
|
||||||
const scudo::uptr NeedleSize = 1024U;
|
|
||||||
void *NeedleP = Allocator->allocate(NeedleSize, Origin);
|
|
||||||
Allocator->deallocate(NeedleP, Origin);
|
|
||||||
bool Found = false;
|
|
||||||
for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
|
|
||||||
void *P = Allocator->allocate(NeedleSize, Origin);
|
|
||||||
if (Allocator->getHeaderTaggedPointer(P) ==
|
|
||||||
Allocator->getHeaderTaggedPointer(NeedleP))
|
|
||||||
Found = true;
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
}
|
|
||||||
EXPECT_TRUE(Found);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Reallocate a chunk all the way up to a secondary allocation, verifying that
|
|
||||||
// we preserve the data in the process.
|
|
||||||
scudo::uptr Size = 16;
|
|
||||||
void *P = Allocator->allocate(Size, Origin);
|
|
||||||
const char Marker = 'A';
|
|
||||||
memset(P, Marker, Size);
|
|
||||||
while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
|
|
||||||
void *NewP = Allocator->reallocate(P, Size * 2);
|
|
||||||
EXPECT_NE(NewP, nullptr);
|
|
||||||
for (scudo::uptr J = 0; J < Size; J++)
|
|
||||||
EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
|
|
||||||
memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
|
|
||||||
Size *= 2U;
|
|
||||||
P = NewP;
|
|
||||||
}
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Reallocate a large chunk all the way down to a byte, verifying that we
|
|
||||||
// preserve the data in the process.
|
|
||||||
scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
|
|
||||||
const scudo::uptr DataSize = 2048U;
|
|
||||||
void *P = Allocator->allocate(Size, Origin);
|
|
||||||
const char Marker = 'A';
|
|
||||||
memset(P, Marker, scudo::Min(Size, DataSize));
|
|
||||||
while (Size > 1U) {
|
|
||||||
Size /= 2U;
|
|
||||||
void *NewP = Allocator->reallocate(P, Size);
|
|
||||||
EXPECT_NE(NewP, nullptr);
|
|
||||||
for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
|
|
||||||
EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
|
|
||||||
P = NewP;
|
|
||||||
}
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Check that reallocating a chunk to a slightly smaller or larger size
|
|
||||||
// returns the same chunk. This requires that all the sizes we iterate on use
|
|
||||||
// the same block size, but that should be the case for MaxSize - 64 with our
|
|
||||||
// default class size maps.
|
|
||||||
constexpr scudo::uptr ReallocSize =
|
|
||||||
TypeParam::Primary::SizeClassMap::MaxSize - 64;
|
|
||||||
void *P = Allocator->allocate(ReallocSize, Origin);
|
|
||||||
const char Marker = 'A';
|
|
||||||
memset(P, Marker, ReallocSize);
|
|
||||||
for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
|
|
||||||
const scudo::uptr NewSize =
|
|
||||||
static_cast<scudo::uptr>(static_cast<scudo::sptr>(ReallocSize) + Delta);
|
|
||||||
void *NewP = Allocator->reallocate(P, NewSize);
|
|
||||||
EXPECT_EQ(NewP, P);
|
|
||||||
for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
|
|
||||||
EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
|
|
||||||
checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
|
|
||||||
}
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
// Allocates a bunch of chunks, then iterate over all the chunks, ensuring
|
|
||||||
// they are the ones we allocated. This requires the allocator to not have any
|
|
||||||
// other allocated chunk at this point (eg: won't work with the Quarantine).
|
|
||||||
// FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
|
|
||||||
// iterateOverChunks reads header by tagged and non-tagger pointers so one of
|
|
||||||
// them will fail.
|
|
||||||
if (!UseQuarantine) {
|
|
||||||
std::vector<void *> V;
|
|
||||||
for (scudo::uptr I = 0; I < 64U; I++)
|
|
||||||
V.push_back(Allocator->allocate(
|
|
||||||
static_cast<scudo::uptr>(std::rand()) %
|
|
||||||
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
|
|
||||||
Origin));
|
|
||||||
Allocator->disable();
|
|
||||||
Allocator->iterateOverChunks(
|
|
||||||
0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
|
|
||||||
[](uintptr_t Base, UNUSED size_t Size, void *Arg) {
|
|
||||||
std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
|
|
||||||
void *P = reinterpret_cast<void *>(Base);
|
|
||||||
EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
|
|
||||||
},
|
|
||||||
reinterpret_cast<void *>(&V));
|
|
||||||
Allocator->enable();
|
|
||||||
for (auto P : V)
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Check that use-after-free is detected.
|
|
||||||
for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
|
|
||||||
const scudo::uptr Size = 1U << SizeLog;
|
|
||||||
if (!Allocator->useMemoryTaggingTestOnly())
|
|
||||||
continue;
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
disableDebuggerdMaybe();
|
|
||||||
void *P = Allocator->allocate(Size, Origin);
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
reinterpret_cast<char *>(P)[0] = 'A';
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
disableDebuggerdMaybe();
|
|
||||||
void *P = Allocator->allocate(Size, Origin);
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
reinterpret_cast<char *>(P)[Size - 1] = 'A';
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
if (Allocator->useMemoryTaggingTestOnly()) {
|
|
||||||
// Check that disabling memory tagging works correctly.
|
|
||||||
void *P = Allocator->allocate(2048, Origin);
|
|
||||||
EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 'A', "");
|
|
||||||
scudo::ScopedDisableMemoryTagChecks NoTagChecks;
|
|
||||||
Allocator->disableMemoryTagging();
|
|
||||||
reinterpret_cast<char *>(P)[2048] = 'A';
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
|
|
||||||
P = Allocator->allocate(2048, Origin);
|
|
||||||
EXPECT_EQ(scudo::untagPointer(P), P);
|
|
||||||
reinterpret_cast<char *>(P)[2048] = 'A';
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
scudo::uptr BufferSize = 8192;
|
|
||||||
std::vector<char> Buffer(BufferSize);
|
|
||||||
scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
|
|
||||||
while (ActualSize > BufferSize) {
|
|
||||||
BufferSize = ActualSize + 1024;
|
|
||||||
Buffer.resize(BufferSize);
|
|
||||||
ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
|
|
||||||
}
|
|
||||||
std::string Stats(Buffer.begin(), Buffer.end());
|
|
||||||
// Basic checks on the contents of the statistics output, which also allows us
|
|
||||||
// to verify that we got it all.
|
|
||||||
EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
|
|
||||||
EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
|
|
||||||
EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
std::vector<void *> V;
|
|
||||||
for (scudo::uptr I = 0; I < 64U; I++)
|
|
||||||
V.push_back(Allocator->allocate(
|
|
||||||
static_cast<scudo::uptr>(std::rand()) %
|
|
||||||
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
|
|
||||||
Origin));
|
|
||||||
for (auto P : V)
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
|
|
||||||
bool UnlockRequired;
|
|
||||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
|
||||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
|
||||||
EXPECT_TRUE(!TSD->getCache().isEmpty());
|
|
||||||
TSD->getCache().drain();
|
|
||||||
EXPECT_TRUE(TSD->getCache().isEmpty());
|
|
||||||
if (UnlockRequired)
|
|
||||||
TSD->unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
std::vector<void *> V;
|
|
||||||
for (scudo::uptr I = 0; I < 64U; I++)
|
|
||||||
V.push_back(Allocator->allocate(
|
|
||||||
static_cast<scudo::uptr>(std::rand()) %
|
|
||||||
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
|
|
||||||
Origin));
|
|
||||||
for (auto P : V)
|
|
||||||
Allocator->deallocate(P, Origin);
|
|
||||||
|
|
||||||
// `ForceAll` will also drain the caches.
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);
|
|
||||||
|
|
||||||
bool UnlockRequired;
|
|
||||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
|
||||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
|
||||||
EXPECT_TRUE(TSD->getCache().isEmpty());
|
|
||||||
EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
|
|
||||||
EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
|
|
||||||
if (UnlockRequired)
|
|
||||||
TSD->unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
|
|
||||||
std::mutex Mutex;
|
|
||||||
std::condition_variable Cv;
|
|
||||||
bool Ready = false;
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
std::thread Threads[32];
|
|
||||||
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
|
|
||||||
Threads[I] = std::thread([&]() {
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
while (!Ready)
|
|
||||||
Cv.wait(Lock);
|
|
||||||
}
|
|
||||||
std::vector<std::pair<void *, scudo::uptr>> V;
|
|
||||||
for (scudo::uptr I = 0; I < 256U; I++) {
|
|
||||||
const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
|
|
||||||
void *P = Allocator->allocate(Size, Origin);
|
|
||||||
// A region could have ran out of memory, resulting in a null P.
|
|
||||||
if (P)
|
|
||||||
V.push_back(std::make_pair(P, Size));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to interleave pushBlocks(), popBatch() and releaseToOS().
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
|
|
||||||
while (!V.empty()) {
|
|
||||||
auto Pair = V.back();
|
|
||||||
Allocator->deallocate(Pair.first, Origin, Pair.second);
|
|
||||||
V.pop_back();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Ready = true;
|
|
||||||
Cv.notify_all();
|
|
||||||
}
|
|
||||||
for (auto &T : Threads)
|
|
||||||
T.join();
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that multiple instantiations of the allocator have not messed up the
|
|
||||||
// process's signal handlers (GWP-ASan used to do this).
|
|
||||||
TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
|
|
||||||
const scudo::uptr Size = 4 * scudo::getPageSizeCached();
|
|
||||||
scudo::ReservedMemoryT ReservedMemory;
|
|
||||||
ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, "testSEGV"));
|
|
||||||
void *P = reinterpret_cast<void *>(ReservedMemory.getBase());
|
|
||||||
ASSERT_NE(P, nullptr);
|
|
||||||
EXPECT_DEATH(memset(P, 0xaa, Size), "");
|
|
||||||
ReservedMemory.release();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DeathSizeClassConfig {
|
|
||||||
static const scudo::uptr NumBits = 1;
|
|
||||||
static const scudo::uptr MinSizeLog = 10;
|
|
||||||
static const scudo::uptr MidSizeLog = 10;
|
|
||||||
static const scudo::uptr MaxSizeLog = 13;
|
|
||||||
static const scudo::u16 MaxNumCachedHint = 8;
|
|
||||||
static const scudo::uptr MaxBytesCachedLog = 12;
|
|
||||||
static const scudo::uptr SizeDelta = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
static const scudo::uptr DeathRegionSizeLog = 21U;
|
|
||||||
struct DeathConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = false;
|
|
||||||
template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
// Tiny allocator, its Primary only serves chunks of four sizes.
|
|
||||||
using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
|
|
||||||
static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
typedef scudo::uptr CompactPtrT;
|
|
||||||
static const scudo::uptr CompactPtrScale = 0;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
static const scudo::uptr GroupSizeLog = 18;
|
|
||||||
};
|
|
||||||
template <typename Config>
|
|
||||||
using PrimaryT = scudo::SizeClassAllocator64<Config>;
|
|
||||||
|
|
||||||
struct Secondary {
|
|
||||||
template <typename Config>
|
|
||||||
using CacheT = scudo::MapAllocatorNoCache<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(ScudoCombinedDeathTest, DeathCombined) {
|
|
||||||
using AllocatorT = TestAllocator<DeathConfig>;
|
|
||||||
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
|
|
||||||
|
|
||||||
const scudo::uptr Size = 1000U;
|
|
||||||
void *P = Allocator->allocate(Size, Origin);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
|
|
||||||
// Invalid sized deallocation.
|
|
||||||
EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
|
|
||||||
|
|
||||||
// Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
|
|
||||||
UNUSED void *MisalignedP =
|
|
||||||
reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
|
|
||||||
EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
|
|
||||||
EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
|
|
||||||
|
|
||||||
// Header corruption.
|
|
||||||
scudo::u64 *H =
|
|
||||||
reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
|
|
||||||
*H ^= 0x42U;
|
|
||||||
EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
|
|
||||||
*H ^= 0x420042U;
|
|
||||||
EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
|
|
||||||
*H ^= 0x420000U;
|
|
||||||
|
|
||||||
// Invalid chunk state.
|
|
||||||
Allocator->deallocate(P, Origin, Size);
|
|
||||||
EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
|
|
||||||
EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
|
|
||||||
EXPECT_DEATH(Allocator->getUsableSize(P), "");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that when a region gets full, the allocator will still manage to
|
|
||||||
// fulfill the allocation through a larger size class.
|
|
||||||
TEST(ScudoCombinedTest, FullRegion) {
|
|
||||||
using AllocatorT = TestAllocator<DeathConfig>;
|
|
||||||
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
|
|
||||||
|
|
||||||
std::vector<void *> V;
|
|
||||||
scudo::uptr FailedAllocationsCount = 0;
|
|
||||||
for (scudo::uptr ClassId = 1U;
|
|
||||||
ClassId <= DeathConfig::Primary::SizeClassMap::LargestClassId;
|
|
||||||
ClassId++) {
|
|
||||||
const scudo::uptr Size =
|
|
||||||
DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId);
|
|
||||||
// Allocate enough to fill all of the regions above this one.
|
|
||||||
const scudo::uptr MaxNumberOfChunks =
|
|
||||||
((1U << DeathRegionSizeLog) / Size) *
|
|
||||||
(DeathConfig::Primary::SizeClassMap::LargestClassId - ClassId + 1);
|
|
||||||
void *P;
|
|
||||||
for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
|
|
||||||
P = Allocator->allocate(Size - 64U, Origin);
|
|
||||||
if (!P)
|
|
||||||
FailedAllocationsCount++;
|
|
||||||
else
|
|
||||||
V.push_back(P);
|
|
||||||
}
|
|
||||||
while (!V.empty()) {
|
|
||||||
Allocator->deallocate(V.back(), Origin);
|
|
||||||
V.pop_back();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EXPECT_EQ(FailedAllocationsCount, 0U);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that releaseToOS can be called prior to any other allocator
|
|
||||||
// operation without issue.
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
Allocator->setOption(scudo::Option::MemtagTuning, M_MEMTAG_TUNING_BUFFER_OVERFLOW);
|
|
||||||
|
|
||||||
if (!Allocator->useMemoryTaggingTestOnly())
|
|
||||||
return;
|
|
||||||
|
|
||||||
auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
|
|
||||||
scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
|
|
||||||
scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
|
|
||||||
EXPECT_NE(Tag1 % 2, Tag2 % 2);
|
|
||||||
};
|
|
||||||
|
|
||||||
using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
|
|
||||||
for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
|
|
||||||
ClassId++) {
|
|
||||||
const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
|
|
||||||
|
|
||||||
std::set<scudo::uptr> Ptrs;
|
|
||||||
bool Found = false;
|
|
||||||
for (unsigned I = 0; I != 65536; ++I) {
|
|
||||||
scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
|
|
||||||
Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
|
|
||||||
if (Ptrs.count(P - Size)) {
|
|
||||||
Found = true;
|
|
||||||
CheckOddEven(P, P - Size);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (Ptrs.count(P + Size)) {
|
|
||||||
Found = true;
|
|
||||||
CheckOddEven(P, P + Size);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Ptrs.insert(P);
|
|
||||||
}
|
|
||||||
EXPECT_TRUE(Found);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
std::vector<void *> Ptrs(65536);
|
|
||||||
|
|
||||||
Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
|
|
||||||
|
|
||||||
constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
|
|
||||||
|
|
||||||
// Test that if mem-init is disabled on a thread, calloc should still work as
|
|
||||||
// expected. This is tricky to ensure when MTE is enabled, so this test tries
|
|
||||||
// to exercise the relevant code on our MTE path.
|
|
||||||
for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
|
|
||||||
using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
|
|
||||||
const scudo::uptr Size =
|
|
||||||
SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
|
|
||||||
if (Size < 8)
|
|
||||||
continue;
|
|
||||||
for (unsigned I = 0; I != Ptrs.size(); ++I) {
|
|
||||||
Ptrs[I] = Allocator->allocate(Size, Origin);
|
|
||||||
memset(Ptrs[I], 0xaa, Size);
|
|
||||||
}
|
|
||||||
for (unsigned I = 0; I != Ptrs.size(); ++I)
|
|
||||||
Allocator->deallocate(Ptrs[I], Origin, Size);
|
|
||||||
for (unsigned I = 0; I != Ptrs.size(); ++I) {
|
|
||||||
Ptrs[I] = Allocator->allocate(Size - 8, Origin);
|
|
||||||
memset(Ptrs[I], 0xbb, Size - 8);
|
|
||||||
}
|
|
||||||
for (unsigned I = 0; I != Ptrs.size(); ++I)
|
|
||||||
Allocator->deallocate(Ptrs[I], Origin, Size - 8);
|
|
||||||
for (unsigned I = 0; I != Ptrs.size(); ++I) {
|
|
||||||
Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
|
|
||||||
for (scudo::uptr J = 0; J < Size; ++J)
|
|
||||||
ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], '\0');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
|
|
||||||
// Regression test: make realloc-in-place happen at the very right end of a
|
|
||||||
// mapped region.
|
|
||||||
constexpr size_t nPtrs = 10000;
|
|
||||||
for (scudo::uptr i = 1; i < 32; ++i) {
|
|
||||||
scudo::uptr Size = 16 * i - 1;
|
|
||||||
std::vector<void *> Ptrs;
|
|
||||||
for (size_t i = 0; i < nPtrs; ++i) {
|
|
||||||
void *P = Allocator->allocate(Size, Origin);
|
|
||||||
P = Allocator->reallocate(P, Size + 1);
|
|
||||||
Ptrs.push_back(P);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < nPtrs; ++i)
|
|
||||||
Allocator->deallocate(Ptrs[i], Origin);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
auto Size = Allocator->getRingBufferSize();
|
|
||||||
if (Size > 0)
|
|
||||||
EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
|
|
||||||
auto *Allocator = this->Allocator.get();
|
|
||||||
auto *Addr = Allocator->getRingBufferAddress();
|
|
||||||
EXPECT_NE(Addr, nullptr);
|
|
||||||
EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
#if SCUDO_TRUSTY
|
|
||||||
|
|
||||||
// TrustyConfig is designed for a domain-specific allocator. Add a basic test
|
|
||||||
// which covers only simple operations and ensure the configuration is able to
|
|
||||||
// compile.
|
|
||||||
TEST(ScudoCombinedTest, BasicTrustyConfig) {
|
|
||||||
using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
|
|
||||||
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
|
|
||||||
|
|
||||||
for (scudo::uptr ClassId = 1U;
|
|
||||||
ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
|
|
||||||
ClassId++) {
|
|
||||||
const scudo::uptr Size =
|
|
||||||
scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
|
|
||||||
void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
|
|
||||||
ASSERT_NE(p, nullptr);
|
|
||||||
free(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool UnlockRequired;
|
|
||||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
|
||||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
|
||||||
TSD->getCache().drain();
|
|
||||||
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
#endif
|
|
75
Telegram/ThirdParty/scudo/tests/common_test.cpp
vendored
75
Telegram/ThirdParty/scudo/tests/common_test.cpp
vendored
|
@ -1,75 +0,0 @@
|
||||||
//===-- common_test.cpp -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "internal_defs.h"
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "mem_map.h"
|
|
||||||
#include <algorithm>
|
|
||||||
#include <fstream>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
static uptr getResidentMemorySize() {
|
|
||||||
if (!SCUDO_LINUX)
|
|
||||||
UNREACHABLE("Not implemented!");
|
|
||||||
uptr Size;
|
|
||||||
uptr Resident;
|
|
||||||
std::ifstream IFS("/proc/self/statm");
|
|
||||||
IFS >> Size;
|
|
||||||
IFS >> Resident;
|
|
||||||
return Resident * getPageSizeCached();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fuchsia needs getResidentMemorySize implementation.
|
|
||||||
TEST(ScudoCommonTest, SKIP_ON_FUCHSIA(ResidentMemorySize)) {
|
|
||||||
uptr OnStart = getResidentMemorySize();
|
|
||||||
EXPECT_GT(OnStart, 0UL);
|
|
||||||
|
|
||||||
const uptr Size = 1ull << 30;
|
|
||||||
const uptr Threshold = Size >> 3;
|
|
||||||
|
|
||||||
MemMapT MemMap;
|
|
||||||
ASSERT_TRUE(MemMap.map(/*Addr=*/0U, Size, "ResidentMemorySize"));
|
|
||||||
ASSERT_NE(MemMap.getBase(), 0U);
|
|
||||||
void *P = reinterpret_cast<void *>(MemMap.getBase());
|
|
||||||
EXPECT_LT(getResidentMemorySize(), OnStart + Threshold);
|
|
||||||
|
|
||||||
memset(P, 1, Size);
|
|
||||||
EXPECT_GT(getResidentMemorySize(), OnStart + Size - Threshold);
|
|
||||||
|
|
||||||
MemMap.releasePagesToOS(MemMap.getBase(), Size);
|
|
||||||
EXPECT_LT(getResidentMemorySize(), OnStart + Threshold);
|
|
||||||
|
|
||||||
memset(P, 1, Size);
|
|
||||||
EXPECT_GT(getResidentMemorySize(), OnStart + Size - Threshold);
|
|
||||||
|
|
||||||
MemMap.unmap(MemMap.getBase(), Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoCommonTest, Zeros) {
|
|
||||||
const uptr Size = 1ull << 20;
|
|
||||||
|
|
||||||
MemMapT MemMap;
|
|
||||||
ASSERT_TRUE(MemMap.map(/*Addr=*/0U, Size, "Zeros"));
|
|
||||||
ASSERT_NE(MemMap.getBase(), 0U);
|
|
||||||
uptr *P = reinterpret_cast<uptr *>(MemMap.getBase());
|
|
||||||
const ptrdiff_t N = Size / sizeof(uptr);
|
|
||||||
EXPECT_EQ(std::count(P, P + N, 0), N);
|
|
||||||
|
|
||||||
memset(P, 1, Size);
|
|
||||||
EXPECT_EQ(std::count(P, P + N, 0), 0);
|
|
||||||
|
|
||||||
MemMap.releasePagesToOS(MemMap.getBase(), Size);
|
|
||||||
EXPECT_EQ(std::count(P, P + N, 0), N);
|
|
||||||
|
|
||||||
MemMap.unmap(MemMap.getBase(), Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
|
@ -1,59 +0,0 @@
|
||||||
//===-- condition_variable_test.cpp -----------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "condition_variable.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
template <typename ConditionVariableT> void simpleWaitAndNotifyAll() {
|
|
||||||
constexpr scudo::u32 NumThreads = 2;
|
|
||||||
constexpr scudo::u32 CounterMax = 1024;
|
|
||||||
std::thread Threads[NumThreads];
|
|
||||||
|
|
||||||
scudo::HybridMutex M;
|
|
||||||
ConditionVariableT CV;
|
|
||||||
CV.bindTestOnly(M);
|
|
||||||
scudo::u32 Counter = 0;
|
|
||||||
|
|
||||||
for (scudo::u32 I = 0; I < NumThreads; ++I) {
|
|
||||||
Threads[I] = std::thread(
|
|
||||||
[&](scudo::u32 Id) {
|
|
||||||
do {
|
|
||||||
scudo::ScopedLock L(M);
|
|
||||||
if (Counter % NumThreads != Id && Counter < CounterMax)
|
|
||||||
CV.wait(M);
|
|
||||||
if (Counter >= CounterMax) {
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
++Counter;
|
|
||||||
CV.notifyAll(M);
|
|
||||||
}
|
|
||||||
} while (true);
|
|
||||||
},
|
|
||||||
I);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (std::thread &T : Threads)
|
|
||||||
T.join();
|
|
||||||
|
|
||||||
EXPECT_EQ(Counter, CounterMax);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoConditionVariableTest, DummyCVWaitAndNotifyAll) {
|
|
||||||
simpleWaitAndNotifyAll<scudo::ConditionVariableDummy>();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef SCUDO_LINUX
|
|
||||||
TEST(ScudoConditionVariableTest, LinuxCVWaitAndNotifyAll) {
|
|
||||||
simpleWaitAndNotifyAll<scudo::ConditionVariableLinux>();
|
|
||||||
}
|
|
||||||
#endif
|
|
134
Telegram/ThirdParty/scudo/tests/flags_test.cpp
vendored
134
Telegram/ThirdParty/scudo/tests/flags_test.cpp
vendored
|
@ -1,134 +0,0 @@
|
||||||
//===-- flags_test.cpp ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "flags.h"
|
|
||||||
#include "flags_parser.h"
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
static const char FlagName[] = "flag_name";
|
|
||||||
static const char FlagDesc[] = "flag description";
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
static void testFlag(scudo::FlagType Type, T StartValue, const char *Env,
|
|
||||||
T FinalValue) {
|
|
||||||
scudo::FlagParser Parser;
|
|
||||||
T Flag = StartValue;
|
|
||||||
Parser.registerFlag(FlagName, FlagDesc, Type, &Flag);
|
|
||||||
Parser.parseString(Env);
|
|
||||||
EXPECT_EQ(FinalValue, Flag);
|
|
||||||
// Reporting unrecognized flags is needed to reset them.
|
|
||||||
scudo::reportUnrecognizedFlags();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoFlagsTest, BooleanFlags) {
|
|
||||||
testFlag(scudo::FlagType::FT_bool, false, "flag_name=1", true);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, false, "flag_name=yes", true);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, false, "flag_name='yes'", true);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, false, "flag_name=true", true);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, true, "flag_name=0", false);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, true, "flag_name=\"0\"", false);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, true, "flag_name=no", false);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, true, "flag_name=false", false);
|
|
||||||
testFlag(scudo::FlagType::FT_bool, true, "flag_name='false'", false);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoFlagsDeathTest, BooleanFlags) {
|
|
||||||
EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name", true),
|
|
||||||
"expected '='");
|
|
||||||
EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=", true),
|
|
||||||
"invalid value for bool option: ''");
|
|
||||||
EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=2", true),
|
|
||||||
"invalid value for bool option: '2'");
|
|
||||||
EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=-1", true),
|
|
||||||
"invalid value for bool option: '-1'");
|
|
||||||
EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=on", true),
|
|
||||||
"invalid value for bool option: 'on'");
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoFlagsTest, IntFlags) {
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, nullptr, -11);
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, "flag_name=0", 0);
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, "flag_name='0'", 0);
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, "flag_name=42", 42);
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, "flag_name=-42", -42);
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, "flag_name=\"-42\"", -42);
|
|
||||||
|
|
||||||
// Unrecognized flags are ignored.
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, "--flag_name=42", -11);
|
|
||||||
testFlag(scudo::FlagType::FT_int, -11, "zzzzzzz=42", -11);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoFlagsDeathTest, IntFlags) {
|
|
||||||
EXPECT_DEATH(testFlag(scudo::FlagType::FT_int, -11, "flag_name", 0),
|
|
||||||
"expected '='");
|
|
||||||
EXPECT_DEATH(testFlag(scudo::FlagType::FT_int, -11, "flag_name=42U", 0),
|
|
||||||
"invalid value for int option");
|
|
||||||
}
|
|
||||||
|
|
||||||
static void testTwoFlags(const char *Env, bool ExpectedFlag1,
|
|
||||||
const int ExpectedFlag2, const char *Name1 = "flag1",
|
|
||||||
const char *Name2 = "flag2") {
|
|
||||||
scudo::FlagParser Parser;
|
|
||||||
bool Flag1 = !ExpectedFlag1;
|
|
||||||
int Flag2;
|
|
||||||
Parser.registerFlag(Name1, FlagDesc, scudo::FlagType::FT_bool, &Flag1);
|
|
||||||
Parser.registerFlag(Name2, FlagDesc, scudo::FlagType::FT_int, &Flag2);
|
|
||||||
Parser.parseString(Env);
|
|
||||||
EXPECT_EQ(ExpectedFlag1, Flag1);
|
|
||||||
EXPECT_EQ(Flag2, ExpectedFlag2);
|
|
||||||
// Reporting unrecognized flags is needed to reset them.
|
|
||||||
scudo::reportUnrecognizedFlags();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoFlagsTest, MultipleFlags) {
|
|
||||||
testTwoFlags("flag1=1 flag2=42", true, 42);
|
|
||||||
testTwoFlags("flag2=-1 flag1=0", false, -1);
|
|
||||||
testTwoFlags("flag1=false:flag2=1337", false, 1337);
|
|
||||||
testTwoFlags("flag2=42:flag1=yes", true, 42);
|
|
||||||
testTwoFlags("flag2=42\nflag1=yes", true, 42);
|
|
||||||
testTwoFlags("flag2=42\r\nflag1=yes", true, 42);
|
|
||||||
testTwoFlags("flag2=42\tflag1=yes", true, 42);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoFlagsTest, CommonSuffixFlags) {
|
|
||||||
testTwoFlags("flag=1 other_flag=42", true, 42, "flag", "other_flag");
|
|
||||||
testTwoFlags("other_flag=42 flag=1", true, 42, "flag", "other_flag");
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoFlagsTest, AllocatorFlags) {
|
|
||||||
scudo::FlagParser Parser;
|
|
||||||
scudo::Flags Flags;
|
|
||||||
scudo::registerFlags(&Parser, &Flags);
|
|
||||||
Flags.setDefaults();
|
|
||||||
Flags.dealloc_type_mismatch = false;
|
|
||||||
Flags.delete_size_mismatch = false;
|
|
||||||
Flags.quarantine_max_chunk_size = 1024;
|
|
||||||
Parser.parseString("dealloc_type_mismatch=true:delete_size_mismatch=true:"
|
|
||||||
"quarantine_max_chunk_size=2048");
|
|
||||||
EXPECT_TRUE(Flags.dealloc_type_mismatch);
|
|
||||||
EXPECT_TRUE(Flags.delete_size_mismatch);
|
|
||||||
EXPECT_EQ(2048, Flags.quarantine_max_chunk_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef GWP_ASAN_HOOKS
|
|
||||||
TEST(ScudoFlagsTest, GWPASanFlags) {
|
|
||||||
scudo::FlagParser Parser;
|
|
||||||
scudo::Flags Flags;
|
|
||||||
scudo::registerFlags(&Parser, &Flags);
|
|
||||||
Flags.setDefaults();
|
|
||||||
Flags.GWP_ASAN_Enabled = false;
|
|
||||||
Parser.parseString("GWP_ASAN_Enabled=true:GWP_ASAN_SampleRate=1:"
|
|
||||||
"GWP_ASAN_InstallSignalHandlers=false");
|
|
||||||
EXPECT_TRUE(Flags.GWP_ASAN_Enabled);
|
|
||||||
EXPECT_FALSE(Flags.GWP_ASAN_InstallSignalHandlers);
|
|
||||||
EXPECT_EQ(1, Flags.GWP_ASAN_SampleRate);
|
|
||||||
}
|
|
||||||
#endif // GWP_ASAN_HOOKS
|
|
216
Telegram/ThirdParty/scudo/tests/list_test.cpp
vendored
216
Telegram/ThirdParty/scudo/tests/list_test.cpp
vendored
|
@ -1,216 +0,0 @@
|
||||||
//===-- list_test.cpp -------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "list.h"
|
|
||||||
|
|
||||||
struct ListItem {
|
|
||||||
ListItem *Next;
|
|
||||||
ListItem *Prev;
|
|
||||||
};
|
|
||||||
|
|
||||||
static ListItem Items[6];
|
|
||||||
static ListItem *X = &Items[0];
|
|
||||||
static ListItem *Y = &Items[1];
|
|
||||||
static ListItem *Z = &Items[2];
|
|
||||||
static ListItem *A = &Items[3];
|
|
||||||
static ListItem *B = &Items[4];
|
|
||||||
static ListItem *C = &Items[5];
|
|
||||||
|
|
||||||
typedef scudo::SinglyLinkedList<ListItem> SLList;
|
|
||||||
typedef scudo::DoublyLinkedList<ListItem> DLList;
|
|
||||||
|
|
||||||
template <typename ListT>
|
|
||||||
static void setList(ListT *L, ListItem *I1 = nullptr, ListItem *I2 = nullptr,
|
|
||||||
ListItem *I3 = nullptr) {
|
|
||||||
L->clear();
|
|
||||||
if (I1)
|
|
||||||
L->push_back(I1);
|
|
||||||
if (I2)
|
|
||||||
L->push_back(I2);
|
|
||||||
if (I3)
|
|
||||||
L->push_back(I3);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename ListT>
|
|
||||||
static void checkList(ListT *L, ListItem *I1, ListItem *I2 = nullptr,
|
|
||||||
ListItem *I3 = nullptr, ListItem *I4 = nullptr,
|
|
||||||
ListItem *I5 = nullptr, ListItem *I6 = nullptr) {
|
|
||||||
if (I1) {
|
|
||||||
EXPECT_EQ(L->front(), I1);
|
|
||||||
L->pop_front();
|
|
||||||
}
|
|
||||||
if (I2) {
|
|
||||||
EXPECT_EQ(L->front(), I2);
|
|
||||||
L->pop_front();
|
|
||||||
}
|
|
||||||
if (I3) {
|
|
||||||
EXPECT_EQ(L->front(), I3);
|
|
||||||
L->pop_front();
|
|
||||||
}
|
|
||||||
if (I4) {
|
|
||||||
EXPECT_EQ(L->front(), I4);
|
|
||||||
L->pop_front();
|
|
||||||
}
|
|
||||||
if (I5) {
|
|
||||||
EXPECT_EQ(L->front(), I5);
|
|
||||||
L->pop_front();
|
|
||||||
}
|
|
||||||
if (I6) {
|
|
||||||
EXPECT_EQ(L->front(), I6);
|
|
||||||
L->pop_front();
|
|
||||||
}
|
|
||||||
EXPECT_TRUE(L->empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename ListT> static void testListCommon(void) {
|
|
||||||
ListT L;
|
|
||||||
L.clear();
|
|
||||||
|
|
||||||
EXPECT_EQ(L.size(), 0U);
|
|
||||||
L.push_back(X);
|
|
||||||
EXPECT_EQ(L.size(), 1U);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_TRUE(L.empty());
|
|
||||||
L.checkConsistency();
|
|
||||||
|
|
||||||
L.push_front(X);
|
|
||||||
EXPECT_EQ(L.size(), 1U);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_TRUE(L.empty());
|
|
||||||
L.checkConsistency();
|
|
||||||
|
|
||||||
L.push_front(X);
|
|
||||||
L.push_front(Y);
|
|
||||||
L.push_front(Z);
|
|
||||||
EXPECT_EQ(L.size(), 3U);
|
|
||||||
EXPECT_EQ(L.front(), Z);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
L.checkConsistency();
|
|
||||||
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_EQ(L.size(), 2U);
|
|
||||||
EXPECT_EQ(L.front(), Y);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
L.pop_front();
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_TRUE(L.empty());
|
|
||||||
L.checkConsistency();
|
|
||||||
|
|
||||||
L.push_back(X);
|
|
||||||
L.push_back(Y);
|
|
||||||
L.push_back(Z);
|
|
||||||
EXPECT_EQ(L.size(), 3U);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
EXPECT_EQ(L.back(), Z);
|
|
||||||
L.checkConsistency();
|
|
||||||
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_EQ(L.size(), 2U);
|
|
||||||
EXPECT_EQ(L.front(), Y);
|
|
||||||
EXPECT_EQ(L.back(), Z);
|
|
||||||
L.pop_front();
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_TRUE(L.empty());
|
|
||||||
L.checkConsistency();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoListTest, LinkedListCommon) {
|
|
||||||
testListCommon<SLList>();
|
|
||||||
testListCommon<DLList>();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoListTest, SinglyLinkedList) {
|
|
||||||
SLList L;
|
|
||||||
L.clear();
|
|
||||||
|
|
||||||
L.push_back(X);
|
|
||||||
L.push_back(Y);
|
|
||||||
L.push_back(Z);
|
|
||||||
L.extract(X, Y);
|
|
||||||
EXPECT_EQ(L.size(), 2U);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
EXPECT_EQ(L.back(), Z);
|
|
||||||
L.checkConsistency();
|
|
||||||
L.extract(X, Z);
|
|
||||||
EXPECT_EQ(L.size(), 1U);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
L.checkConsistency();
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_TRUE(L.empty());
|
|
||||||
|
|
||||||
SLList L1, L2;
|
|
||||||
L1.clear();
|
|
||||||
L2.clear();
|
|
||||||
|
|
||||||
L1.append_back(&L2);
|
|
||||||
EXPECT_TRUE(L1.empty());
|
|
||||||
EXPECT_TRUE(L2.empty());
|
|
||||||
|
|
||||||
setList(&L1, X);
|
|
||||||
checkList(&L1, X);
|
|
||||||
|
|
||||||
setList(&L1, X, Y);
|
|
||||||
L1.insert(X, Z);
|
|
||||||
checkList(&L1, X, Z, Y);
|
|
||||||
|
|
||||||
setList(&L1, X, Y, Z);
|
|
||||||
setList(&L2, A, B, C);
|
|
||||||
L1.append_back(&L2);
|
|
||||||
checkList(&L1, X, Y, Z, A, B, C);
|
|
||||||
EXPECT_TRUE(L2.empty());
|
|
||||||
|
|
||||||
L1.clear();
|
|
||||||
L2.clear();
|
|
||||||
L1.push_back(X);
|
|
||||||
L1.append_back(&L2);
|
|
||||||
EXPECT_EQ(L1.back(), X);
|
|
||||||
EXPECT_EQ(L1.front(), X);
|
|
||||||
EXPECT_EQ(L1.size(), 1U);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoListTest, DoublyLinkedList) {
|
|
||||||
DLList L;
|
|
||||||
L.clear();
|
|
||||||
|
|
||||||
L.push_back(X);
|
|
||||||
L.push_back(Y);
|
|
||||||
L.push_back(Z);
|
|
||||||
L.remove(Y);
|
|
||||||
EXPECT_EQ(L.size(), 2U);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
EXPECT_EQ(L.back(), Z);
|
|
||||||
L.checkConsistency();
|
|
||||||
L.remove(Z);
|
|
||||||
EXPECT_EQ(L.size(), 1U);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
L.checkConsistency();
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_TRUE(L.empty());
|
|
||||||
|
|
||||||
L.push_back(X);
|
|
||||||
L.insert(Y, X);
|
|
||||||
EXPECT_EQ(L.size(), 2U);
|
|
||||||
EXPECT_EQ(L.front(), Y);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
L.checkConsistency();
|
|
||||||
L.remove(Y);
|
|
||||||
EXPECT_EQ(L.size(), 1U);
|
|
||||||
EXPECT_EQ(L.front(), X);
|
|
||||||
EXPECT_EQ(L.back(), X);
|
|
||||||
L.checkConsistency();
|
|
||||||
L.pop_front();
|
|
||||||
EXPECT_TRUE(L.empty());
|
|
||||||
}
|
|
91
Telegram/ThirdParty/scudo/tests/map_test.cpp
vendored
91
Telegram/ThirdParty/scudo/tests/map_test.cpp
vendored
|
@ -1,91 +0,0 @@
|
||||||
//===-- map_test.cpp --------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "mem_map.h"
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
static const char *MappingName = "scudo:test";
|
|
||||||
|
|
||||||
TEST(ScudoMapTest, PageSize) {
|
|
||||||
EXPECT_EQ(scudo::getPageSizeCached(),
|
|
||||||
static_cast<scudo::uptr>(sysconf(_SC_PAGESIZE)));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoMapDeathTest, MapNoAccessUnmap) {
|
|
||||||
const scudo::uptr Size = 4 * scudo::getPageSizeCached();
|
|
||||||
scudo::ReservedMemoryT ReservedMemory;
|
|
||||||
|
|
||||||
ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, MappingName));
|
|
||||||
EXPECT_NE(ReservedMemory.getBase(), 0U);
|
|
||||||
EXPECT_DEATH(
|
|
||||||
memset(reinterpret_cast<void *>(ReservedMemory.getBase()), 0xaa, Size),
|
|
||||||
"");
|
|
||||||
|
|
||||||
ReservedMemory.release();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoMapDeathTest, MapUnmap) {
|
|
||||||
const scudo::uptr Size = 4 * scudo::getPageSizeCached();
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
// Repeat few time to avoid missing crash if it's mmaped by unrelated
|
|
||||||
// code.
|
|
||||||
for (int i = 0; i < 10; ++i) {
|
|
||||||
scudo::MemMapT MemMap;
|
|
||||||
MemMap.map(/*Addr=*/0U, Size, MappingName);
|
|
||||||
scudo::uptr P = MemMap.getBase();
|
|
||||||
if (P == 0U)
|
|
||||||
continue;
|
|
||||||
MemMap.unmap(MemMap.getBase(), Size);
|
|
||||||
memset(reinterpret_cast<void *>(P), 0xbb, Size);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoMapDeathTest, MapWithGuardUnmap) {
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
const scudo::uptr Size = 4 * PageSize;
|
|
||||||
scudo::ReservedMemoryT ReservedMemory;
|
|
||||||
ASSERT_TRUE(
|
|
||||||
ReservedMemory.create(/*Addr=*/0U, Size + 2 * PageSize, MappingName));
|
|
||||||
ASSERT_NE(ReservedMemory.getBase(), 0U);
|
|
||||||
|
|
||||||
scudo::MemMapT MemMap =
|
|
||||||
ReservedMemory.dispatch(ReservedMemory.getBase(), Size + 2 * PageSize);
|
|
||||||
ASSERT_TRUE(MemMap.isAllocated());
|
|
||||||
scudo::uptr Q = MemMap.getBase() + PageSize;
|
|
||||||
ASSERT_TRUE(MemMap.remap(Q, Size, MappingName));
|
|
||||||
memset(reinterpret_cast<void *>(Q), 0xaa, Size);
|
|
||||||
EXPECT_DEATH(memset(reinterpret_cast<void *>(Q), 0xaa, Size + 1), "");
|
|
||||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoMapTest, MapGrowUnmap) {
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
const scudo::uptr Size = 4 * PageSize;
|
|
||||||
scudo::ReservedMemoryT ReservedMemory;
|
|
||||||
ReservedMemory.create(/*Addr=*/0U, Size, MappingName);
|
|
||||||
ASSERT_TRUE(ReservedMemory.isCreated());
|
|
||||||
|
|
||||||
scudo::MemMapT MemMap =
|
|
||||||
ReservedMemory.dispatch(ReservedMemory.getBase(), Size);
|
|
||||||
ASSERT_TRUE(MemMap.isAllocated());
|
|
||||||
scudo::uptr Q = MemMap.getBase() + PageSize;
|
|
||||||
ASSERT_TRUE(MemMap.remap(Q, PageSize, MappingName));
|
|
||||||
memset(reinterpret_cast<void *>(Q), 0xaa, PageSize);
|
|
||||||
Q += PageSize;
|
|
||||||
ASSERT_TRUE(MemMap.remap(Q, PageSize, MappingName));
|
|
||||||
memset(reinterpret_cast<void *>(Q), 0xbb, PageSize);
|
|
||||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
|
||||||
}
|
|
213
Telegram/ThirdParty/scudo/tests/memtag_test.cpp
vendored
213
Telegram/ThirdParty/scudo/tests/memtag_test.cpp
vendored
|
@ -1,213 +0,0 @@
|
||||||
//===-- memtag_test.cpp -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "mem_map.h"
|
|
||||||
#include "memtag.h"
|
|
||||||
#include "platform.h"
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
TEST(MemtagBasicDeathTest, Unsupported) {
|
|
||||||
if (archSupportsMemoryTagging())
|
|
||||||
GTEST_SKIP();
|
|
||||||
|
|
||||||
EXPECT_DEATH(archMemoryTagGranuleSize(), "not supported");
|
|
||||||
EXPECT_DEATH(untagPointer((uptr)0), "not supported");
|
|
||||||
EXPECT_DEATH(extractTag((uptr)0), "not supported");
|
|
||||||
|
|
||||||
EXPECT_DEATH(systemSupportsMemoryTagging(), "not supported");
|
|
||||||
EXPECT_DEATH(systemDetectsMemoryTagFaultsTestOnly(), "not supported");
|
|
||||||
EXPECT_DEATH(enableSystemMemoryTaggingTestOnly(), "not supported");
|
|
||||||
|
|
||||||
EXPECT_DEATH(selectRandomTag((uptr)0, 0), "not supported");
|
|
||||||
EXPECT_DEATH(addFixedTag((uptr)0, 1), "not supported");
|
|
||||||
EXPECT_DEATH(storeTags((uptr)0, (uptr)0 + sizeof(0)), "not supported");
|
|
||||||
EXPECT_DEATH(storeTag((uptr)0), "not supported");
|
|
||||||
EXPECT_DEATH(loadTag((uptr)0), "not supported");
|
|
||||||
|
|
||||||
EXPECT_DEATH(setRandomTag(nullptr, 64, 0, nullptr, nullptr), "not supported");
|
|
||||||
EXPECT_DEATH(untagPointer(nullptr), "not supported");
|
|
||||||
EXPECT_DEATH(loadTag(nullptr), "not supported");
|
|
||||||
EXPECT_DEATH(addFixedTag(nullptr, 0), "not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
class MemtagTest : public Test {
|
|
||||||
protected:
|
|
||||||
void SetUp() override {
|
|
||||||
if (!archSupportsMemoryTagging() || !systemDetectsMemoryTagFaultsTestOnly())
|
|
||||||
GTEST_SKIP() << "Memory tagging is not supported";
|
|
||||||
|
|
||||||
BufferSize = getPageSizeCached();
|
|
||||||
ASSERT_FALSE(MemMap.isAllocated());
|
|
||||||
ASSERT_TRUE(MemMap.map(/*Addr=*/0U, BufferSize, "MemtagTest", MAP_MEMTAG));
|
|
||||||
ASSERT_NE(MemMap.getBase(), 0U);
|
|
||||||
Addr = MemMap.getBase();
|
|
||||||
Buffer = reinterpret_cast<u8 *>(Addr);
|
|
||||||
EXPECT_TRUE(isAligned(Addr, archMemoryTagGranuleSize()));
|
|
||||||
EXPECT_EQ(Addr, untagPointer(Addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TearDown() override {
|
|
||||||
if (Buffer) {
|
|
||||||
ASSERT_TRUE(MemMap.isAllocated());
|
|
||||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr BufferSize = 0;
|
|
||||||
scudo::MemMapT MemMap = {};
|
|
||||||
u8 *Buffer = nullptr;
|
|
||||||
uptr Addr = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
using MemtagDeathTest = MemtagTest;
|
|
||||||
|
|
||||||
TEST_F(MemtagTest, ArchMemoryTagGranuleSize) {
|
|
||||||
EXPECT_GT(archMemoryTagGranuleSize(), 1u);
|
|
||||||
EXPECT_TRUE(isPowerOfTwo(archMemoryTagGranuleSize()));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagTest, ExtractTag) {
|
|
||||||
// The test is already skipped on anything other than 64 bit. But
|
|
||||||
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
|
|
||||||
#if defined(__LP64__)
|
|
||||||
uptr Tags = 0;
|
|
||||||
// Try all value for the top byte and check the tags values are in the
|
|
||||||
// expected range.
|
|
||||||
for (u64 Top = 0; Top < 0x100; ++Top)
|
|
||||||
Tags = Tags | (1u << extractTag(Addr | (Top << 56)));
|
|
||||||
EXPECT_EQ(0xffffull, Tags);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagDeathTest, AddFixedTag) {
|
|
||||||
for (uptr Tag = 0; Tag < 0x10; ++Tag)
|
|
||||||
EXPECT_EQ(Tag, extractTag(addFixedTag(Addr, Tag)));
|
|
||||||
if (SCUDO_DEBUG) {
|
|
||||||
EXPECT_DEATH(addFixedTag(Addr, 16), "");
|
|
||||||
EXPECT_DEATH(addFixedTag(~Addr, 0), "");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagTest, UntagPointer) {
|
|
||||||
uptr UnTagMask = untagPointer(~uptr(0));
|
|
||||||
for (u64 Top = 0; Top < 0x100; ++Top) {
|
|
||||||
uptr Ptr = (Addr | (Top << 56)) & UnTagMask;
|
|
||||||
EXPECT_EQ(addFixedTag(Ptr, 0), untagPointer(Ptr));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagDeathTest, ScopedDisableMemoryTagChecks) {
|
|
||||||
u8 *P = reinterpret_cast<u8 *>(addFixedTag(Addr, 1));
|
|
||||||
EXPECT_NE(P, Buffer);
|
|
||||||
|
|
||||||
EXPECT_DEATH(*P = 20, "");
|
|
||||||
ScopedDisableMemoryTagChecks Disable;
|
|
||||||
*P = 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagTest, SelectRandomTag) {
|
|
||||||
for (uptr SrcTag = 0; SrcTag < 0x10; ++SrcTag) {
|
|
||||||
uptr Ptr = addFixedTag(Addr, SrcTag);
|
|
||||||
uptr Tags = 0;
|
|
||||||
for (uptr I = 0; I < 100000; ++I)
|
|
||||||
Tags = Tags | (1u << extractTag(selectRandomTag(Ptr, 0)));
|
|
||||||
// std::popcnt is C++20
|
|
||||||
int PopCnt = 0;
|
|
||||||
while (Tags) {
|
|
||||||
PopCnt += Tags & 1;
|
|
||||||
Tags >>= 1;
|
|
||||||
}
|
|
||||||
// Random tags are not always very random, and this test is not about PRNG
|
|
||||||
// quality. Anything above half would be satisfactory.
|
|
||||||
EXPECT_GE(PopCnt, 8);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagTest, SelectRandomTagWithMask) {
|
|
||||||
// The test is already skipped on anything other than 64 bit. But
|
|
||||||
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
|
|
||||||
#if defined(__LP64__)
|
|
||||||
for (uptr j = 0; j < 32; ++j) {
|
|
||||||
for (uptr i = 0; i < 1000; ++i)
|
|
||||||
EXPECT_NE(j, extractTag(selectRandomTag(Addr, 1ull << j)));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(LoadStoreTagUnaligned)) {
|
|
||||||
for (uptr P = Addr; P < Addr + 4 * archMemoryTagGranuleSize(); ++P) {
|
|
||||||
if (P % archMemoryTagGranuleSize() == 0)
|
|
||||||
continue;
|
|
||||||
EXPECT_DEATH(loadTag(P), "");
|
|
||||||
EXPECT_DEATH(storeTag(P), "");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagTest, LoadStoreTag) {
|
|
||||||
uptr Base = Addr + 0x100;
|
|
||||||
uptr Tagged = addFixedTag(Base, 7);
|
|
||||||
storeTag(Tagged);
|
|
||||||
|
|
||||||
EXPECT_EQ(Base - archMemoryTagGranuleSize(),
|
|
||||||
loadTag(Base - archMemoryTagGranuleSize()));
|
|
||||||
EXPECT_EQ(Tagged, loadTag(Base));
|
|
||||||
EXPECT_EQ(Base + archMemoryTagGranuleSize(),
|
|
||||||
loadTag(Base + archMemoryTagGranuleSize()));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(StoreTagsUnaligned)) {
|
|
||||||
for (uptr P = Addr; P < Addr + 4 * archMemoryTagGranuleSize(); ++P) {
|
|
||||||
uptr Tagged = addFixedTag(P, 5);
|
|
||||||
if (Tagged % archMemoryTagGranuleSize() == 0)
|
|
||||||
continue;
|
|
||||||
EXPECT_DEATH(storeTags(Tagged, Tagged), "");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MemtagTest, StoreTags) {
|
|
||||||
// The test is already skipped on anything other than 64 bit. But
|
|
||||||
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
|
|
||||||
#if defined(__LP64__)
|
|
||||||
const uptr MaxTaggedSize = 4 * archMemoryTagGranuleSize();
|
|
||||||
for (uptr Size = 0; Size <= MaxTaggedSize; ++Size) {
|
|
||||||
uptr NoTagBegin = Addr + archMemoryTagGranuleSize();
|
|
||||||
uptr NoTagEnd = NoTagBegin + Size;
|
|
||||||
|
|
||||||
u8 Tag = 5;
|
|
||||||
|
|
||||||
uptr TaggedBegin = addFixedTag(NoTagBegin, Tag);
|
|
||||||
uptr TaggedEnd = addFixedTag(NoTagEnd, Tag);
|
|
||||||
|
|
||||||
EXPECT_EQ(roundUp(TaggedEnd, archMemoryTagGranuleSize()),
|
|
||||||
storeTags(TaggedBegin, TaggedEnd));
|
|
||||||
|
|
||||||
uptr LoadPtr = Addr;
|
|
||||||
// Untagged left granule.
|
|
||||||
EXPECT_EQ(LoadPtr, loadTag(LoadPtr));
|
|
||||||
|
|
||||||
for (LoadPtr += archMemoryTagGranuleSize(); LoadPtr < NoTagEnd;
|
|
||||||
LoadPtr += archMemoryTagGranuleSize()) {
|
|
||||||
EXPECT_EQ(addFixedTag(LoadPtr, 5), loadTag(LoadPtr));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Untagged right granule.
|
|
||||||
EXPECT_EQ(LoadPtr, loadTag(LoadPtr));
|
|
||||||
|
|
||||||
// Reset tags without using StoreTags.
|
|
||||||
MemMap.releasePagesToOS(Addr, BufferSize);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif
|
|
108
Telegram/ThirdParty/scudo/tests/mutex_test.cpp
vendored
108
Telegram/ThirdParty/scudo/tests/mutex_test.cpp
vendored
|
@ -1,108 +0,0 @@
|
||||||
//===-- mutex_test.cpp ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "mutex.h"
|
|
||||||
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
class TestData {
|
|
||||||
public:
|
|
||||||
explicit TestData(scudo::HybridMutex &M) : Mutex(M) {
|
|
||||||
for (scudo::u32 I = 0; I < Size; I++)
|
|
||||||
Data[I] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void write() {
|
|
||||||
scudo::ScopedLock L(Mutex);
|
|
||||||
T V0 = Data[0];
|
|
||||||
for (scudo::u32 I = 0; I < Size; I++) {
|
|
||||||
EXPECT_EQ(Data[I], V0);
|
|
||||||
Data[I]++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void tryWrite() {
|
|
||||||
if (!Mutex.tryLock())
|
|
||||||
return;
|
|
||||||
T V0 = Data[0];
|
|
||||||
for (scudo::u32 I = 0; I < Size; I++) {
|
|
||||||
EXPECT_EQ(Data[I], V0);
|
|
||||||
Data[I]++;
|
|
||||||
}
|
|
||||||
Mutex.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void backoff() {
|
|
||||||
volatile T LocalData[Size] = {};
|
|
||||||
for (scudo::u32 I = 0; I < Size; I++) {
|
|
||||||
LocalData[I] = LocalData[I] + 1;
|
|
||||||
EXPECT_EQ(LocalData[I], 1U);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
static const scudo::u32 Size = 64U;
|
|
||||||
typedef scudo::u64 T;
|
|
||||||
scudo::HybridMutex &Mutex;
|
|
||||||
alignas(SCUDO_CACHE_LINE_SIZE) T Data[Size];
|
|
||||||
};
|
|
||||||
|
|
||||||
const scudo::u32 NumberOfThreads = 8;
|
|
||||||
#if SCUDO_DEBUG
|
|
||||||
const scudo::u32 NumberOfIterations = 4 * 1024;
|
|
||||||
#else
|
|
||||||
const scudo::u32 NumberOfIterations = 16 * 1024;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void *lockThread(void *Param) {
|
|
||||||
TestData *Data = reinterpret_cast<TestData *>(Param);
|
|
||||||
for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
|
|
||||||
Data->write();
|
|
||||||
Data->backoff();
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *tryThread(void *Param) {
|
|
||||||
TestData *Data = reinterpret_cast<TestData *>(Param);
|
|
||||||
for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
|
|
||||||
Data->tryWrite();
|
|
||||||
Data->backoff();
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoMutexTest, Mutex) {
|
|
||||||
scudo::HybridMutex M;
|
|
||||||
TestData Data(M);
|
|
||||||
pthread_t Threads[NumberOfThreads];
|
|
||||||
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
|
|
||||||
pthread_create(&Threads[I], 0, lockThread, &Data);
|
|
||||||
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
|
|
||||||
pthread_join(Threads[I], 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoMutexTest, MutexTry) {
|
|
||||||
scudo::HybridMutex M;
|
|
||||||
TestData Data(M);
|
|
||||||
pthread_t Threads[NumberOfThreads];
|
|
||||||
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
|
|
||||||
pthread_create(&Threads[I], 0, tryThread, &Data);
|
|
||||||
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
|
|
||||||
pthread_join(Threads[I], 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoMutexTest, MutexAssertHeld) {
|
|
||||||
scudo::HybridMutex M;
|
|
||||||
M.lock();
|
|
||||||
M.assertHeld();
|
|
||||||
M.unlock();
|
|
||||||
}
|
|
441
Telegram/ThirdParty/scudo/tests/primary_test.cpp
vendored
441
Telegram/ThirdParty/scudo/tests/primary_test.cpp
vendored
|
@ -1,441 +0,0 @@
|
||||||
//===-- primary_test.cpp ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "allocator_config.h"
|
|
||||||
#include "condition_variable.h"
|
|
||||||
#include "primary32.h"
|
|
||||||
#include "primary64.h"
|
|
||||||
#include "size_class_map.h"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <chrono>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <mutex>
|
|
||||||
#include <random>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
// Note that with small enough regions, the SizeClassAllocator64 also works on
|
|
||||||
// 32-bit architectures. It's not something we want to encourage, but we still
|
|
||||||
// should ensure the tests pass.
|
|
||||||
|
|
||||||
template <typename SizeClassMapT> struct TestConfig1 {
|
|
||||||
static const bool MaySupportMemoryTagging = false;
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = SizeClassMapT;
|
|
||||||
static const scudo::uptr RegionSizeLog = 18U;
|
|
||||||
static const scudo::uptr GroupSizeLog = 18U;
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
typedef scudo::uptr CompactPtrT;
|
|
||||||
static const scudo::uptr CompactPtrScale = 0;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename SizeClassMapT> struct TestConfig2 {
|
|
||||||
static const bool MaySupportMemoryTagging = false;
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = SizeClassMapT;
|
|
||||||
#if defined(__mips__)
|
|
||||||
// Unable to allocate greater size on QEMU-user.
|
|
||||||
static const scudo::uptr RegionSizeLog = 23U;
|
|
||||||
#else
|
|
||||||
static const scudo::uptr RegionSizeLog = 24U;
|
|
||||||
#endif
|
|
||||||
static const scudo::uptr GroupSizeLog = 20U;
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
typedef scudo::uptr CompactPtrT;
|
|
||||||
static const scudo::uptr CompactPtrScale = 0;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename SizeClassMapT> struct TestConfig3 {
|
|
||||||
static const bool MaySupportMemoryTagging = true;
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = SizeClassMapT;
|
|
||||||
#if defined(__mips__)
|
|
||||||
// Unable to allocate greater size on QEMU-user.
|
|
||||||
static const scudo::uptr RegionSizeLog = 23U;
|
|
||||||
#else
|
|
||||||
static const scudo::uptr RegionSizeLog = 24U;
|
|
||||||
#endif
|
|
||||||
static const scudo::uptr GroupSizeLog = 20U;
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
typedef scudo::uptr CompactPtrT;
|
|
||||||
static const scudo::uptr CompactPtrScale = 0;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename SizeClassMapT> struct TestConfig4 {
|
|
||||||
static const bool MaySupportMemoryTagging = true;
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = SizeClassMapT;
|
|
||||||
#if defined(__mips__)
|
|
||||||
// Unable to allocate greater size on QEMU-user.
|
|
||||||
static const scudo::uptr RegionSizeLog = 23U;
|
|
||||||
#else
|
|
||||||
static const scudo::uptr RegionSizeLog = 24U;
|
|
||||||
#endif
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
static const scudo::uptr CompactPtrScale = 3U;
|
|
||||||
static const scudo::uptr GroupSizeLog = 20U;
|
|
||||||
typedef scudo::u32 CompactPtrT;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
// This is the only test config that enables the condition variable.
|
|
||||||
template <typename SizeClassMapT> struct TestConfig5 {
|
|
||||||
static const bool MaySupportMemoryTagging = true;
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = SizeClassMapT;
|
|
||||||
#if defined(__mips__)
|
|
||||||
// Unable to allocate greater size on QEMU-user.
|
|
||||||
static const scudo::uptr RegionSizeLog = 23U;
|
|
||||||
#else
|
|
||||||
static const scudo::uptr RegionSizeLog = 24U;
|
|
||||||
#endif
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
|
||||||
static const scudo::uptr GroupSizeLog = 18U;
|
|
||||||
typedef scudo::u32 CompactPtrT;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
static const bool UseConditionVariable = true;
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
using ConditionVariableT = scudo::ConditionVariableLinux;
|
|
||||||
#else
|
|
||||||
using ConditionVariableT = scudo::ConditionVariableDummy;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <template <typename> class BaseConfig, typename SizeClassMapT>
|
|
||||||
struct Config : public BaseConfig<SizeClassMapT> {};
|
|
||||||
|
|
||||||
template <template <typename> class BaseConfig, typename SizeClassMapT>
|
|
||||||
struct SizeClassAllocator
|
|
||||||
: public scudo::SizeClassAllocator64<Config<BaseConfig, SizeClassMapT>> {};
|
|
||||||
template <typename SizeClassMapT>
|
|
||||||
struct SizeClassAllocator<TestConfig1, SizeClassMapT>
|
|
||||||
: public scudo::SizeClassAllocator32<Config<TestConfig1, SizeClassMapT>> {};
|
|
||||||
|
|
||||||
template <template <typename> class BaseConfig, typename SizeClassMapT>
|
|
||||||
struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
|
|
||||||
~TestAllocator() {
|
|
||||||
this->verifyAllBlocksAreReleasedTestOnly();
|
|
||||||
this->unmapTestOnly();
|
|
||||||
}
|
|
||||||
|
|
||||||
void *operator new(size_t size) {
|
|
||||||
void *p = nullptr;
|
|
||||||
EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
void operator delete(void *ptr) { free(ptr); }
|
|
||||||
};
|
|
||||||
|
|
||||||
template <template <typename> class BaseConfig>
|
|
||||||
struct ScudoPrimaryTest : public Test {};
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)
|
|
||||||
#else
|
|
||||||
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4) \
|
|
||||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig5)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
|
|
||||||
using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<TYPE>; \
|
|
||||||
TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
|
|
||||||
|
|
||||||
#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
|
|
||||||
template <template <typename> class TypeParam> \
|
|
||||||
struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
|
|
||||||
void Run(); \
|
|
||||||
}; \
|
|
||||||
SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
|
||||||
template <template <typename> class TypeParam> \
|
|
||||||
void FIXTURE##NAME<TypeParam>::Run()
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
|
|
||||||
using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
|
|
||||||
std::unique_ptr<Primary> Allocator(new Primary);
|
|
||||||
Allocator->init(/*ReleaseToOsInterval=*/-1);
|
|
||||||
typename Primary::CacheT Cache;
|
|
||||||
Cache.init(nullptr, Allocator.get());
|
|
||||||
const scudo::uptr NumberOfAllocations = 32U;
|
|
||||||
for (scudo::uptr I = 0; I <= 16U; I++) {
|
|
||||||
const scudo::uptr Size = 1UL << I;
|
|
||||||
if (!Primary::canAllocate(Size))
|
|
||||||
continue;
|
|
||||||
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
|
|
||||||
void *Pointers[NumberOfAllocations];
|
|
||||||
for (scudo::uptr J = 0; J < NumberOfAllocations; J++) {
|
|
||||||
void *P = Cache.allocate(ClassId);
|
|
||||||
memset(P, 'B', Size);
|
|
||||||
Pointers[J] = P;
|
|
||||||
}
|
|
||||||
for (scudo::uptr J = 0; J < NumberOfAllocations; J++)
|
|
||||||
Cache.deallocate(ClassId, Pointers[J]);
|
|
||||||
}
|
|
||||||
Cache.destroy(nullptr);
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Allocator->getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SmallRegionsConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = false;
|
|
||||||
|
|
||||||
struct Primary {
|
|
||||||
using SizeClassMap = scudo::DefaultSizeClassMap;
|
|
||||||
static const scudo::uptr RegionSizeLog = 21U;
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
typedef scudo::uptr CompactPtrT;
|
|
||||||
static const scudo::uptr CompactPtrScale = 0;
|
|
||||||
static const bool EnableRandomOffset = true;
|
|
||||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
|
||||||
static const scudo::uptr GroupSizeLog = 20U;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
// The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
|
|
||||||
// For the 32-bit one, it requires actually exhausting memory, so we skip it.
|
|
||||||
TEST(ScudoPrimaryTest, Primary64OOM) {
|
|
||||||
using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
|
|
||||||
using TransferBatch = Primary::TransferBatchT;
|
|
||||||
Primary Allocator;
|
|
||||||
Allocator.init(/*ReleaseToOsInterval=*/-1);
|
|
||||||
typename Primary::CacheT Cache;
|
|
||||||
scudo::GlobalStats Stats;
|
|
||||||
Stats.init();
|
|
||||||
Cache.init(&Stats, &Allocator);
|
|
||||||
bool AllocationFailed = false;
|
|
||||||
std::vector<TransferBatch *> Batches;
|
|
||||||
const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
|
|
||||||
const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
|
|
||||||
typename Primary::CacheT::CompactPtrT Blocks[TransferBatch::MaxNumCached];
|
|
||||||
|
|
||||||
for (scudo::uptr I = 0; I < 10000U; I++) {
|
|
||||||
TransferBatch *B = Allocator.popBatch(&Cache, ClassId);
|
|
||||||
if (!B) {
|
|
||||||
AllocationFailed = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
for (scudo::u16 J = 0; J < B->getCount(); J++)
|
|
||||||
memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size);
|
|
||||||
Batches.push_back(B);
|
|
||||||
}
|
|
||||||
while (!Batches.empty()) {
|
|
||||||
TransferBatch *B = Batches.back();
|
|
||||||
Batches.pop_back();
|
|
||||||
const scudo::u16 Count = B->getCount();
|
|
||||||
B->moveToArray(Blocks);
|
|
||||||
Allocator.pushBlocks(&Cache, ClassId, Blocks, Count);
|
|
||||||
Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
|
|
||||||
}
|
|
||||||
Cache.destroy(nullptr);
|
|
||||||
Allocator.releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Allocator.getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
EXPECT_EQ(AllocationFailed, true);
|
|
||||||
Allocator.unmapTestOnly();
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
|
|
||||||
using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
|
|
||||||
std::unique_ptr<Primary> Allocator(new Primary);
|
|
||||||
Allocator->init(/*ReleaseToOsInterval=*/-1);
|
|
||||||
typename Primary::CacheT Cache;
|
|
||||||
Cache.init(nullptr, Allocator.get());
|
|
||||||
std::vector<std::pair<scudo::uptr, void *>> V;
|
|
||||||
for (scudo::uptr I = 0; I < 64U; I++) {
|
|
||||||
const scudo::uptr Size =
|
|
||||||
static_cast<scudo::uptr>(std::rand()) % Primary::SizeClassMap::MaxSize;
|
|
||||||
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
|
|
||||||
void *P = Cache.allocate(ClassId);
|
|
||||||
V.push_back(std::make_pair(ClassId, P));
|
|
||||||
}
|
|
||||||
scudo::uptr Found = 0;
|
|
||||||
auto Lambda = [&V, &Found](scudo::uptr Block) {
|
|
||||||
for (const auto &Pair : V) {
|
|
||||||
if (Pair.second == reinterpret_cast<void *>(Block))
|
|
||||||
Found++;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Allocator->disable();
|
|
||||||
Allocator->iterateOverBlocks(Lambda);
|
|
||||||
Allocator->enable();
|
|
||||||
EXPECT_EQ(Found, V.size());
|
|
||||||
while (!V.empty()) {
|
|
||||||
auto Pair = V.back();
|
|
||||||
Cache.deallocate(Pair.first, Pair.second);
|
|
||||||
V.pop_back();
|
|
||||||
}
|
|
||||||
Cache.destroy(nullptr);
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Allocator->getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
|
|
||||||
using Primary = TestAllocator<TypeParam, scudo::Config::Primary::SizeClassMap>;
|
|
||||||
std::unique_ptr<Primary> Allocator(new Primary);
|
|
||||||
Allocator->init(/*ReleaseToOsInterval=*/-1);
|
|
||||||
std::mutex Mutex;
|
|
||||||
std::condition_variable Cv;
|
|
||||||
bool Ready = false;
|
|
||||||
std::thread Threads[32];
|
|
||||||
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) {
|
|
||||||
Threads[I] = std::thread([&]() {
|
|
||||||
static thread_local typename Primary::CacheT Cache;
|
|
||||||
Cache.init(nullptr, Allocator.get());
|
|
||||||
std::vector<std::pair<scudo::uptr, void *>> V;
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
while (!Ready)
|
|
||||||
Cv.wait(Lock);
|
|
||||||
}
|
|
||||||
for (scudo::uptr I = 0; I < 256U; I++) {
|
|
||||||
const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) %
|
|
||||||
Primary::SizeClassMap::MaxSize / 4;
|
|
||||||
const scudo::uptr ClassId =
|
|
||||||
Primary::SizeClassMap::getClassIdBySize(Size);
|
|
||||||
void *P = Cache.allocate(ClassId);
|
|
||||||
if (P)
|
|
||||||
V.push_back(std::make_pair(ClassId, P));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to interleave pushBlocks(), popBatch() and releaseToOS().
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
|
|
||||||
while (!V.empty()) {
|
|
||||||
auto Pair = V.back();
|
|
||||||
Cache.deallocate(Pair.first, Pair.second);
|
|
||||||
V.pop_back();
|
|
||||||
// This increases the chance of having non-full TransferBatches and it
|
|
||||||
// will jump into the code path of merging TransferBatches.
|
|
||||||
if (std::rand() % 8 == 0)
|
|
||||||
Cache.drain();
|
|
||||||
}
|
|
||||||
Cache.destroy(nullptr);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Ready = true;
|
|
||||||
Cv.notify_all();
|
|
||||||
}
|
|
||||||
for (auto &T : Threads)
|
|
||||||
T.join();
|
|
||||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Allocator->getStats(&Str);
|
|
||||||
Allocator->getFragmentationInfo(&Str);
|
|
||||||
Str.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Through a simple allocation that spans two pages, verify that releaseToOS
|
|
||||||
// actually releases some bytes (at least one page worth). This is a regression
|
|
||||||
// test for an error in how the release criteria were computed.
|
|
||||||
SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) {
|
|
||||||
using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
|
|
||||||
std::unique_ptr<Primary> Allocator(new Primary);
|
|
||||||
Allocator->init(/*ReleaseToOsInterval=*/-1);
|
|
||||||
typename Primary::CacheT Cache;
|
|
||||||
Cache.init(nullptr, Allocator.get());
|
|
||||||
const scudo::uptr Size = scudo::getPageSizeCached() * 2;
|
|
||||||
EXPECT_TRUE(Primary::canAllocate(Size));
|
|
||||||
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
|
|
||||||
void *P = Cache.allocate(ClassId);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
Cache.deallocate(ClassId, P);
|
|
||||||
Cache.destroy(nullptr);
|
|
||||||
EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll), 0U);
|
|
||||||
}
|
|
||||||
|
|
||||||
SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
|
|
||||||
using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
|
|
||||||
std::unique_ptr<Primary> Allocator(new Primary);
|
|
||||||
Allocator->init(/*ReleaseToOsInterval=*/-1);
|
|
||||||
typename Primary::CacheT Cache;
|
|
||||||
Cache.init(nullptr, Allocator.get());
|
|
||||||
const scudo::uptr Size = 32U;
|
|
||||||
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
|
|
||||||
|
|
||||||
// We will allocate 4 times the group size memory and release all of them. We
|
|
||||||
// expect the free blocks will be classified with groups. Then we will
|
|
||||||
// allocate the same amount of memory as group size and expect the blocks will
|
|
||||||
// have the max address difference smaller or equal to 2 times the group size.
|
|
||||||
// Note that it isn't necessary to be in the range of single group size
|
|
||||||
// because the way we get the group id is doing compact pointer shifting.
|
|
||||||
// According to configuration, the compact pointer may not align to group
|
|
||||||
// size. As a result, the blocks can cross two groups at most.
|
|
||||||
const scudo::uptr GroupSizeMem = (1ULL << Primary::GroupSizeLog);
|
|
||||||
const scudo::uptr PeakAllocationMem = 4 * GroupSizeMem;
|
|
||||||
const scudo::uptr PeakNumberOfAllocations = PeakAllocationMem / Size;
|
|
||||||
const scudo::uptr FinalNumberOfAllocations = GroupSizeMem / Size;
|
|
||||||
std::vector<scudo::uptr> Blocks;
|
|
||||||
std::mt19937 R;
|
|
||||||
|
|
||||||
for (scudo::uptr I = 0; I < PeakNumberOfAllocations; ++I)
|
|
||||||
Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
|
|
||||||
|
|
||||||
std::shuffle(Blocks.begin(), Blocks.end(), R);
|
|
||||||
|
|
||||||
// Release all the allocated blocks, including those held by local cache.
|
|
||||||
while (!Blocks.empty()) {
|
|
||||||
Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
|
|
||||||
Blocks.pop_back();
|
|
||||||
}
|
|
||||||
Cache.drain();
|
|
||||||
|
|
||||||
for (scudo::uptr I = 0; I < FinalNumberOfAllocations; ++I)
|
|
||||||
Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
|
|
||||||
|
|
||||||
EXPECT_LE(*std::max_element(Blocks.begin(), Blocks.end()) -
|
|
||||||
*std::min_element(Blocks.begin(), Blocks.end()),
|
|
||||||
GroupSizeMem * 2);
|
|
||||||
|
|
||||||
while (!Blocks.empty()) {
|
|
||||||
Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
|
|
||||||
Blocks.pop_back();
|
|
||||||
}
|
|
||||||
Cache.drain();
|
|
||||||
}
|
|
255
Telegram/ThirdParty/scudo/tests/quarantine_test.cpp
vendored
255
Telegram/ThirdParty/scudo/tests/quarantine_test.cpp
vendored
|
@ -1,255 +0,0 @@
|
||||||
//===-- quarantine_test.cpp -------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "quarantine.h"
|
|
||||||
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
static void *FakePtr = reinterpret_cast<void *>(0xFA83FA83);
|
|
||||||
static const scudo::uptr BlockSize = 8UL;
|
|
||||||
static const scudo::uptr LargeBlockSize = 16384UL;
|
|
||||||
|
|
||||||
struct QuarantineCallback {
|
|
||||||
void recycle(void *P) { EXPECT_EQ(P, FakePtr); }
|
|
||||||
void *allocate(scudo::uptr Size) { return malloc(Size); }
|
|
||||||
void deallocate(void *P) { free(P); }
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef scudo::GlobalQuarantine<QuarantineCallback, void> QuarantineT;
|
|
||||||
typedef typename QuarantineT::CacheT CacheT;
|
|
||||||
|
|
||||||
static QuarantineCallback Cb;
|
|
||||||
|
|
||||||
static void deallocateCache(CacheT *Cache) {
|
|
||||||
while (scudo::QuarantineBatch *Batch = Cache->dequeueBatch())
|
|
||||||
Cb.deallocate(Batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoQuarantineTest, QuarantineBatchMerge) {
|
|
||||||
// Verify the trivial case.
|
|
||||||
scudo::QuarantineBatch Into;
|
|
||||||
Into.init(FakePtr, 4UL);
|
|
||||||
scudo::QuarantineBatch From;
|
|
||||||
From.init(FakePtr, 8UL);
|
|
||||||
|
|
||||||
Into.merge(&From);
|
|
||||||
|
|
||||||
EXPECT_EQ(Into.Count, 2UL);
|
|
||||||
EXPECT_EQ(Into.Batch[0], FakePtr);
|
|
||||||
EXPECT_EQ(Into.Batch[1], FakePtr);
|
|
||||||
EXPECT_EQ(Into.Size, 12UL + sizeof(scudo::QuarantineBatch));
|
|
||||||
EXPECT_EQ(Into.getQuarantinedSize(), 12UL);
|
|
||||||
|
|
||||||
EXPECT_EQ(From.Count, 0UL);
|
|
||||||
EXPECT_EQ(From.Size, sizeof(scudo::QuarantineBatch));
|
|
||||||
EXPECT_EQ(From.getQuarantinedSize(), 0UL);
|
|
||||||
|
|
||||||
// Merge the batch to the limit.
|
|
||||||
for (scudo::uptr I = 2; I < scudo::QuarantineBatch::MaxCount; ++I)
|
|
||||||
From.push_back(FakePtr, 8UL);
|
|
||||||
EXPECT_TRUE(Into.Count + From.Count == scudo::QuarantineBatch::MaxCount);
|
|
||||||
EXPECT_TRUE(Into.canMerge(&From));
|
|
||||||
|
|
||||||
Into.merge(&From);
|
|
||||||
EXPECT_TRUE(Into.Count == scudo::QuarantineBatch::MaxCount);
|
|
||||||
|
|
||||||
// No more space, not even for one element.
|
|
||||||
From.init(FakePtr, 8UL);
|
|
||||||
|
|
||||||
EXPECT_FALSE(Into.canMerge(&From));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesEmpty) {
|
|
||||||
CacheT Cache;
|
|
||||||
CacheT ToDeallocate;
|
|
||||||
Cache.init();
|
|
||||||
ToDeallocate.init();
|
|
||||||
Cache.mergeBatches(&ToDeallocate);
|
|
||||||
|
|
||||||
EXPECT_EQ(ToDeallocate.getSize(), 0UL);
|
|
||||||
EXPECT_EQ(ToDeallocate.dequeueBatch(), nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(SanitizerCommon, QuarantineCacheMergeBatchesOneBatch) {
|
|
||||||
CacheT Cache;
|
|
||||||
Cache.init();
|
|
||||||
Cache.enqueue(Cb, FakePtr, BlockSize);
|
|
||||||
EXPECT_EQ(BlockSize + sizeof(scudo::QuarantineBatch), Cache.getSize());
|
|
||||||
|
|
||||||
CacheT ToDeallocate;
|
|
||||||
ToDeallocate.init();
|
|
||||||
Cache.mergeBatches(&ToDeallocate);
|
|
||||||
|
|
||||||
// Nothing to merge, nothing to deallocate.
|
|
||||||
EXPECT_EQ(BlockSize + sizeof(scudo::QuarantineBatch), Cache.getSize());
|
|
||||||
|
|
||||||
EXPECT_EQ(ToDeallocate.getSize(), 0UL);
|
|
||||||
EXPECT_EQ(ToDeallocate.dequeueBatch(), nullptr);
|
|
||||||
|
|
||||||
deallocateCache(&Cache);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesSmallBatches) {
|
|
||||||
// Make a Cache with two batches small enough to merge.
|
|
||||||
CacheT From;
|
|
||||||
From.init();
|
|
||||||
From.enqueue(Cb, FakePtr, BlockSize);
|
|
||||||
CacheT Cache;
|
|
||||||
Cache.init();
|
|
||||||
Cache.enqueue(Cb, FakePtr, BlockSize);
|
|
||||||
|
|
||||||
Cache.transfer(&From);
|
|
||||||
EXPECT_EQ(BlockSize * 2 + sizeof(scudo::QuarantineBatch) * 2,
|
|
||||||
Cache.getSize());
|
|
||||||
|
|
||||||
CacheT ToDeallocate;
|
|
||||||
ToDeallocate.init();
|
|
||||||
Cache.mergeBatches(&ToDeallocate);
|
|
||||||
|
|
||||||
// Batches merged, one batch to deallocate.
|
|
||||||
EXPECT_EQ(BlockSize * 2 + sizeof(scudo::QuarantineBatch), Cache.getSize());
|
|
||||||
EXPECT_EQ(ToDeallocate.getSize(), sizeof(scudo::QuarantineBatch));
|
|
||||||
|
|
||||||
deallocateCache(&Cache);
|
|
||||||
deallocateCache(&ToDeallocate);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesTooBigToMerge) {
|
|
||||||
const scudo::uptr NumBlocks = scudo::QuarantineBatch::MaxCount - 1;
|
|
||||||
|
|
||||||
// Make a Cache with two batches small enough to merge.
|
|
||||||
CacheT From;
|
|
||||||
CacheT Cache;
|
|
||||||
From.init();
|
|
||||||
Cache.init();
|
|
||||||
for (scudo::uptr I = 0; I < NumBlocks; ++I) {
|
|
||||||
From.enqueue(Cb, FakePtr, BlockSize);
|
|
||||||
Cache.enqueue(Cb, FakePtr, BlockSize);
|
|
||||||
}
|
|
||||||
Cache.transfer(&From);
|
|
||||||
EXPECT_EQ(BlockSize * NumBlocks * 2 + sizeof(scudo::QuarantineBatch) * 2,
|
|
||||||
Cache.getSize());
|
|
||||||
|
|
||||||
CacheT ToDeallocate;
|
|
||||||
ToDeallocate.init();
|
|
||||||
Cache.mergeBatches(&ToDeallocate);
|
|
||||||
|
|
||||||
// Batches cannot be merged.
|
|
||||||
EXPECT_EQ(BlockSize * NumBlocks * 2 + sizeof(scudo::QuarantineBatch) * 2,
|
|
||||||
Cache.getSize());
|
|
||||||
EXPECT_EQ(ToDeallocate.getSize(), 0UL);
|
|
||||||
|
|
||||||
deallocateCache(&Cache);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesALotOfBatches) {
|
|
||||||
const scudo::uptr NumBatchesAfterMerge = 3;
|
|
||||||
const scudo::uptr NumBlocks =
|
|
||||||
scudo::QuarantineBatch::MaxCount * NumBatchesAfterMerge;
|
|
||||||
const scudo::uptr NumBatchesBeforeMerge = NumBlocks;
|
|
||||||
|
|
||||||
// Make a Cache with many small batches.
|
|
||||||
CacheT Cache;
|
|
||||||
Cache.init();
|
|
||||||
for (scudo::uptr I = 0; I < NumBlocks; ++I) {
|
|
||||||
CacheT From;
|
|
||||||
From.init();
|
|
||||||
From.enqueue(Cb, FakePtr, BlockSize);
|
|
||||||
Cache.transfer(&From);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_EQ(BlockSize * NumBlocks +
|
|
||||||
sizeof(scudo::QuarantineBatch) * NumBatchesBeforeMerge,
|
|
||||||
Cache.getSize());
|
|
||||||
|
|
||||||
CacheT ToDeallocate;
|
|
||||||
ToDeallocate.init();
|
|
||||||
Cache.mergeBatches(&ToDeallocate);
|
|
||||||
|
|
||||||
// All blocks should fit Into 3 batches.
|
|
||||||
EXPECT_EQ(BlockSize * NumBlocks +
|
|
||||||
sizeof(scudo::QuarantineBatch) * NumBatchesAfterMerge,
|
|
||||||
Cache.getSize());
|
|
||||||
|
|
||||||
EXPECT_EQ(ToDeallocate.getSize(),
|
|
||||||
sizeof(scudo::QuarantineBatch) *
|
|
||||||
(NumBatchesBeforeMerge - NumBatchesAfterMerge));
|
|
||||||
|
|
||||||
deallocateCache(&Cache);
|
|
||||||
deallocateCache(&ToDeallocate);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const scudo::uptr MaxQuarantineSize = 1024UL << 10; // 1MB
|
|
||||||
static const scudo::uptr MaxCacheSize = 256UL << 10; // 256KB
|
|
||||||
|
|
||||||
TEST(ScudoQuarantineTest, GlobalQuarantine) {
|
|
||||||
QuarantineT Quarantine;
|
|
||||||
CacheT Cache;
|
|
||||||
Cache.init();
|
|
||||||
Quarantine.init(MaxQuarantineSize, MaxCacheSize);
|
|
||||||
EXPECT_EQ(Quarantine.getMaxSize(), MaxQuarantineSize);
|
|
||||||
EXPECT_EQ(Quarantine.getCacheSize(), MaxCacheSize);
|
|
||||||
|
|
||||||
bool DrainOccurred = false;
|
|
||||||
scudo::uptr CacheSize = Cache.getSize();
|
|
||||||
EXPECT_EQ(Cache.getSize(), 0UL);
|
|
||||||
// We quarantine enough blocks that a drain has to occur. Verify this by
|
|
||||||
// looking for a decrease of the size of the cache.
|
|
||||||
for (scudo::uptr I = 0; I < 128UL; I++) {
|
|
||||||
Quarantine.put(&Cache, Cb, FakePtr, LargeBlockSize);
|
|
||||||
if (!DrainOccurred && Cache.getSize() < CacheSize)
|
|
||||||
DrainOccurred = true;
|
|
||||||
CacheSize = Cache.getSize();
|
|
||||||
}
|
|
||||||
EXPECT_TRUE(DrainOccurred);
|
|
||||||
|
|
||||||
Quarantine.drainAndRecycle(&Cache, Cb);
|
|
||||||
EXPECT_EQ(Cache.getSize(), 0UL);
|
|
||||||
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Quarantine.getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct PopulateQuarantineThread {
|
|
||||||
pthread_t Thread;
|
|
||||||
QuarantineT *Quarantine;
|
|
||||||
CacheT Cache;
|
|
||||||
};
|
|
||||||
|
|
||||||
void *populateQuarantine(void *Param) {
|
|
||||||
PopulateQuarantineThread *P = static_cast<PopulateQuarantineThread *>(Param);
|
|
||||||
P->Cache.init();
|
|
||||||
for (scudo::uptr I = 0; I < 128UL; I++)
|
|
||||||
P->Quarantine->put(&P->Cache, Cb, FakePtr, LargeBlockSize);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoQuarantineTest, ThreadedGlobalQuarantine) {
|
|
||||||
QuarantineT Quarantine;
|
|
||||||
Quarantine.init(MaxQuarantineSize, MaxCacheSize);
|
|
||||||
|
|
||||||
const scudo::uptr NumberOfThreads = 32U;
|
|
||||||
PopulateQuarantineThread T[NumberOfThreads];
|
|
||||||
for (scudo::uptr I = 0; I < NumberOfThreads; I++) {
|
|
||||||
T[I].Quarantine = &Quarantine;
|
|
||||||
pthread_create(&T[I].Thread, 0, populateQuarantine, &T[I]);
|
|
||||||
}
|
|
||||||
for (scudo::uptr I = 0; I < NumberOfThreads; I++)
|
|
||||||
pthread_join(T[I].Thread, 0);
|
|
||||||
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Quarantine.getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
|
|
||||||
for (scudo::uptr I = 0; I < NumberOfThreads; I++)
|
|
||||||
Quarantine.drainAndRecycle(&T[I].Cache, Cb);
|
|
||||||
}
|
|
654
Telegram/ThirdParty/scudo/tests/release_test.cpp
vendored
654
Telegram/ThirdParty/scudo/tests/release_test.cpp
vendored
|
@ -1,654 +0,0 @@
|
||||||
//===-- release_test.cpp ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "list.h"
|
|
||||||
#include "release.h"
|
|
||||||
#include "size_class_map.h"
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <random>
|
|
||||||
#include <set>
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, RegionPageMap) {
|
|
||||||
for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
|
|
||||||
// Various valid counter's max values packed into one word.
|
|
||||||
scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I);
|
|
||||||
ASSERT_TRUE(PageMap2N.isAllocated());
|
|
||||||
EXPECT_EQ(1U, PageMap2N.getBufferNumElements());
|
|
||||||
// Check the "all bit set" values too.
|
|
||||||
scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I);
|
|
||||||
ASSERT_TRUE(PageMap2N1_1.isAllocated());
|
|
||||||
EXPECT_EQ(1U, PageMap2N1_1.getBufferNumElements());
|
|
||||||
// Verify the packing ratio, the counter is Expected to be packed into the
|
|
||||||
// closest power of 2 bits.
|
|
||||||
scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
|
|
||||||
ASSERT_TRUE(PageMap.isAllocated());
|
|
||||||
EXPECT_EQ(scudo::roundUpPowerOfTwo(I + 1), PageMap.getBufferNumElements());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Go through 1, 2, 4, 8, .. {32,64} bits per counter.
|
|
||||||
for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
|
|
||||||
// Make sure counters request one memory page for the buffer.
|
|
||||||
const scudo::uptr NumCounters =
|
|
||||||
(scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
|
|
||||||
scudo::RegionPageMap PageMap(1U, NumCounters,
|
|
||||||
1UL << ((1UL << I) - 1));
|
|
||||||
ASSERT_TRUE(PageMap.isAllocated());
|
|
||||||
PageMap.inc(0U, 0U);
|
|
||||||
for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
|
|
||||||
EXPECT_EQ(0UL, PageMap.get(0U, C));
|
|
||||||
PageMap.inc(0U, C);
|
|
||||||
EXPECT_EQ(1UL, PageMap.get(0U, C - 1));
|
|
||||||
}
|
|
||||||
EXPECT_EQ(0UL, PageMap.get(0U, NumCounters - 1));
|
|
||||||
PageMap.inc(0U, NumCounters - 1);
|
|
||||||
if (I > 0) {
|
|
||||||
PageMap.incRange(0u, 0U, NumCounters - 1);
|
|
||||||
for (scudo::uptr C = 0; C < NumCounters; C++)
|
|
||||||
EXPECT_EQ(2UL, PageMap.get(0U, C));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Similar to the above except that we are using incN().
|
|
||||||
for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
|
|
||||||
// Make sure counters request one memory page for the buffer.
|
|
||||||
const scudo::uptr NumCounters =
|
|
||||||
(scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
|
|
||||||
scudo::uptr MaxValue = 1UL << ((1UL << I) - 1);
|
|
||||||
if (MaxValue <= 1U)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
scudo::RegionPageMap PageMap(1U, NumCounters, MaxValue);
|
|
||||||
|
|
||||||
scudo::uptr N = MaxValue / 2;
|
|
||||||
PageMap.incN(0U, 0, N);
|
|
||||||
for (scudo::uptr C = 1; C < NumCounters; C++) {
|
|
||||||
EXPECT_EQ(0UL, PageMap.get(0U, C));
|
|
||||||
PageMap.incN(0U, C, N);
|
|
||||||
EXPECT_EQ(N, PageMap.get(0U, C - 1));
|
|
||||||
}
|
|
||||||
EXPECT_EQ(N, PageMap.get(0U, NumCounters - 1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class StringRangeRecorder {
|
|
||||||
public:
|
|
||||||
std::string ReportedPages;
|
|
||||||
|
|
||||||
StringRangeRecorder()
|
|
||||||
: PageSizeScaledLog(scudo::getLog2(scudo::getPageSizeCached())) {}
|
|
||||||
|
|
||||||
void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
|
|
||||||
From >>= PageSizeScaledLog;
|
|
||||||
To >>= PageSizeScaledLog;
|
|
||||||
EXPECT_LT(From, To);
|
|
||||||
if (!ReportedPages.empty())
|
|
||||||
EXPECT_LT(LastPageReported, From);
|
|
||||||
ReportedPages.append(From - LastPageReported, '.');
|
|
||||||
ReportedPages.append(To - From, 'x');
|
|
||||||
LastPageReported = To;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const scudo::uptr PageSizeScaledLog;
|
|
||||||
scudo::uptr LastPageReported = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, FreePagesRangeTracker) {
|
|
||||||
// 'x' denotes a page to be released, '.' denotes a page to be kept around.
|
|
||||||
const char *TestCases[] = {
|
|
||||||
"",
|
|
||||||
".",
|
|
||||||
"x",
|
|
||||||
"........",
|
|
||||||
"xxxxxxxxxxx",
|
|
||||||
"..............xxxxx",
|
|
||||||
"xxxxxxxxxxxxxxxxxx.....",
|
|
||||||
"......xxxxxxxx........",
|
|
||||||
"xxx..........xxxxxxxxxxxxxxx",
|
|
||||||
"......xxxx....xxxx........",
|
|
||||||
"xxx..........xxxxxxxx....xxxxxxx",
|
|
||||||
"x.x.x.x.x.x.x.x.x.x.x.x.",
|
|
||||||
".x.x.x.x.x.x.x.x.x.x.x.x",
|
|
||||||
".x.x.x.x.x.x.x.x.x.x.x.x.",
|
|
||||||
"x.x.x.x.x.x.x.x.x.x.x.x.x",
|
|
||||||
};
|
|
||||||
typedef scudo::FreePagesRangeTracker<StringRangeRecorder> RangeTracker;
|
|
||||||
|
|
||||||
for (auto TestCase : TestCases) {
|
|
||||||
StringRangeRecorder Recorder;
|
|
||||||
RangeTracker Tracker(Recorder);
|
|
||||||
for (scudo::uptr I = 0; TestCase[I] != 0; I++)
|
|
||||||
Tracker.processNextPage(TestCase[I] == 'x');
|
|
||||||
Tracker.finish();
|
|
||||||
// Strip trailing '.'-pages before comparing the results as they are not
|
|
||||||
// going to be reported to range_recorder anyway.
|
|
||||||
const char *LastX = strrchr(TestCase, 'x');
|
|
||||||
std::string Expected(
|
|
||||||
TestCase,
|
|
||||||
LastX == nullptr ? 0U : static_cast<size_t>(LastX - TestCase + 1));
|
|
||||||
EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class ReleasedPagesRecorder {
|
|
||||||
public:
|
|
||||||
ReleasedPagesRecorder() = default;
|
|
||||||
explicit ReleasedPagesRecorder(scudo::uptr Base) : Base(Base) {}
|
|
||||||
std::set<scudo::uptr> ReportedPages;
|
|
||||||
|
|
||||||
void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
for (scudo::uptr I = From; I < To; I += PageSize)
|
|
||||||
ReportedPages.insert(I + getBase());
|
|
||||||
}
|
|
||||||
|
|
||||||
scudo::uptr getBase() const { return Base; }
|
|
||||||
scudo::uptr Base = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Simplified version of a TransferBatch.
|
|
||||||
template <class SizeClassMap> struct FreeBatch {
|
|
||||||
static const scudo::u16 MaxCount = SizeClassMap::MaxNumCachedHint;
|
|
||||||
void clear() { Count = 0; }
|
|
||||||
void add(scudo::uptr P) {
|
|
||||||
DCHECK_LT(Count, MaxCount);
|
|
||||||
Batch[Count++] = P;
|
|
||||||
}
|
|
||||||
scudo::u16 getCount() const { return Count; }
|
|
||||||
scudo::uptr get(scudo::u16 I) const {
|
|
||||||
DCHECK_LE(I, Count);
|
|
||||||
return Batch[I];
|
|
||||||
}
|
|
||||||
FreeBatch *Next;
|
|
||||||
|
|
||||||
private:
|
|
||||||
scudo::uptr Batch[MaxCount];
|
|
||||||
scudo::u16 Count;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
|
|
||||||
typedef FreeBatch<SizeClassMap> Batch;
|
|
||||||
const scudo::uptr PagesCount = 1024;
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
const scudo::uptr PageSizeLog = scudo::getLog2(PageSize);
|
|
||||||
std::mt19937 R;
|
|
||||||
scudo::u32 RandState = 42;
|
|
||||||
|
|
||||||
for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
|
|
||||||
const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
|
|
||||||
const scudo::uptr MaxBlocks = PagesCount * PageSize / BlockSize;
|
|
||||||
|
|
||||||
// Generate the random free list.
|
|
||||||
std::vector<scudo::uptr> FreeArray;
|
|
||||||
bool InFreeRange = false;
|
|
||||||
scudo::uptr CurrentRangeEnd = 0;
|
|
||||||
for (scudo::uptr I = 0; I < MaxBlocks; I++) {
|
|
||||||
if (I == CurrentRangeEnd) {
|
|
||||||
InFreeRange = (scudo::getRandomU32(&RandState) & 1U) == 1;
|
|
||||||
CurrentRangeEnd += (scudo::getRandomU32(&RandState) & 0x7f) + 1;
|
|
||||||
}
|
|
||||||
if (InFreeRange)
|
|
||||||
FreeArray.push_back(I * BlockSize);
|
|
||||||
}
|
|
||||||
if (FreeArray.empty())
|
|
||||||
continue;
|
|
||||||
// Shuffle the array to ensure that the order is irrelevant.
|
|
||||||
std::shuffle(FreeArray.begin(), FreeArray.end(), R);
|
|
||||||
|
|
||||||
// Build the FreeList from the FreeArray.
|
|
||||||
scudo::SinglyLinkedList<Batch> FreeList;
|
|
||||||
FreeList.clear();
|
|
||||||
Batch *CurrentBatch = nullptr;
|
|
||||||
for (auto const &Block : FreeArray) {
|
|
||||||
if (!CurrentBatch) {
|
|
||||||
CurrentBatch = new Batch;
|
|
||||||
CurrentBatch->clear();
|
|
||||||
FreeList.push_back(CurrentBatch);
|
|
||||||
}
|
|
||||||
CurrentBatch->add(Block);
|
|
||||||
if (CurrentBatch->getCount() == Batch::MaxCount)
|
|
||||||
CurrentBatch = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release the memory.
|
|
||||||
auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
|
|
||||||
auto DecompactPtr = [](scudo::uptr P) { return P; };
|
|
||||||
ReleasedPagesRecorder Recorder;
|
|
||||||
scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/MaxBlocks * BlockSize);
|
|
||||||
ASSERT_FALSE(Context.hasBlockMarked());
|
|
||||||
Context.markFreeBlocksInRegion(FreeList, DecompactPtr, Recorder.getBase(),
|
|
||||||
/*RegionIndex=*/0, MaxBlocks * BlockSize,
|
|
||||||
/*MayContainLastBlockInRegion=*/true);
|
|
||||||
ASSERT_TRUE(Context.hasBlockMarked());
|
|
||||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
|
||||||
scudo::RegionPageMap &PageMap = Context.PageMap;
|
|
||||||
|
|
||||||
// Verify that there are no released pages touched by used chunks and all
|
|
||||||
// ranges of free chunks big enough to contain the entire memory pages had
|
|
||||||
// these pages released.
|
|
||||||
scudo::uptr VerifiedReleasedPages = 0;
|
|
||||||
std::set<scudo::uptr> FreeBlocks(FreeArray.begin(), FreeArray.end());
|
|
||||||
|
|
||||||
scudo::uptr CurrentBlock = 0;
|
|
||||||
InFreeRange = false;
|
|
||||||
scudo::uptr CurrentFreeRangeStart = 0;
|
|
||||||
for (scudo::uptr I = 0; I < MaxBlocks; I++) {
|
|
||||||
const bool IsFreeBlock =
|
|
||||||
FreeBlocks.find(CurrentBlock) != FreeBlocks.end();
|
|
||||||
if (IsFreeBlock) {
|
|
||||||
if (!InFreeRange) {
|
|
||||||
InFreeRange = true;
|
|
||||||
CurrentFreeRangeStart = CurrentBlock;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Verify that this used chunk does not touch any released page.
|
|
||||||
const scudo::uptr StartPage = CurrentBlock / PageSize;
|
|
||||||
const scudo::uptr EndPage = (CurrentBlock + BlockSize - 1) / PageSize;
|
|
||||||
for (scudo::uptr J = StartPage; J <= EndPage; J++) {
|
|
||||||
const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
|
|
||||||
Recorder.ReportedPages.end();
|
|
||||||
EXPECT_EQ(false, PageReleased);
|
|
||||||
EXPECT_EQ(false,
|
|
||||||
PageMap.isAllCounted(0, (J * PageSize) >> PageSizeLog));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (InFreeRange) {
|
|
||||||
InFreeRange = false;
|
|
||||||
// Verify that all entire memory pages covered by this range of free
|
|
||||||
// chunks were released.
|
|
||||||
scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
|
|
||||||
while (P + PageSize <= CurrentBlock) {
|
|
||||||
const bool PageReleased =
|
|
||||||
Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
|
|
||||||
EXPECT_EQ(true, PageReleased);
|
|
||||||
EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
|
|
||||||
VerifiedReleasedPages++;
|
|
||||||
P += PageSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CurrentBlock += BlockSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (InFreeRange) {
|
|
||||||
scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize);
|
|
||||||
const scudo::uptr EndPage =
|
|
||||||
scudo::roundUp(MaxBlocks * BlockSize, PageSize);
|
|
||||||
while (P + PageSize <= EndPage) {
|
|
||||||
const bool PageReleased =
|
|
||||||
Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
|
|
||||||
EXPECT_EQ(true, PageReleased);
|
|
||||||
EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
|
|
||||||
VerifiedReleasedPages++;
|
|
||||||
P += PageSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages);
|
|
||||||
|
|
||||||
while (!FreeList.empty()) {
|
|
||||||
CurrentBatch = FreeList.front();
|
|
||||||
FreeList.pop_front();
|
|
||||||
delete CurrentBatch;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class SizeClassMap> void testPageMapMarkRange() {
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
|
|
||||||
for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
|
|
||||||
const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
|
|
||||||
|
|
||||||
const scudo::uptr GroupNum = 2;
|
|
||||||
const scudo::uptr GroupSize = scudo::roundUp(BlockSize, PageSize) * 2;
|
|
||||||
const scudo::uptr RegionSize =
|
|
||||||
scudo::roundUpSlow(GroupSize * GroupNum, BlockSize);
|
|
||||||
const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
|
|
||||||
|
|
||||||
std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
|
|
||||||
for (scudo::uptr Block = 0; Block < RoundedRegionSize; Block += BlockSize) {
|
|
||||||
for (scudo::uptr Page = Block / PageSize;
|
|
||||||
Page <= (Block + BlockSize - 1) / PageSize &&
|
|
||||||
Page < RoundedRegionSize / PageSize;
|
|
||||||
++Page) {
|
|
||||||
ASSERT_LT(Page, Pages.size());
|
|
||||||
++Pages[Page];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (scudo::uptr GroupId = 0; GroupId < GroupNum; ++GroupId) {
|
|
||||||
const scudo::uptr GroupBeg = GroupId * GroupSize;
|
|
||||||
const scudo::uptr GroupEnd = GroupBeg + GroupSize;
|
|
||||||
|
|
||||||
scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/RegionSize);
|
|
||||||
Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U,
|
|
||||||
/*RegionIndex=*/0, RegionSize);
|
|
||||||
|
|
||||||
scudo::uptr FirstBlock =
|
|
||||||
((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize;
|
|
||||||
|
|
||||||
// All the pages before first block page are not supposed to be marked.
|
|
||||||
if (FirstBlock / PageSize > 0) {
|
|
||||||
for (scudo::uptr Page = 0; Page <= FirstBlock / PageSize - 1; ++Page)
|
|
||||||
EXPECT_EQ(Context.PageMap.get(/*Region=*/0, Page), 0U);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the pages used by the blocks in the group except that if the
|
|
||||||
// end of the last block is not aligned with `GroupEnd`, it'll be verified
|
|
||||||
// later.
|
|
||||||
scudo::uptr Block;
|
|
||||||
for (Block = FirstBlock; Block + BlockSize <= GroupEnd;
|
|
||||||
Block += BlockSize) {
|
|
||||||
for (scudo::uptr Page = Block / PageSize;
|
|
||||||
Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
|
|
||||||
// First used page in the group has two cases, which are w/ and w/o
|
|
||||||
// block sitting across the boundary.
|
|
||||||
if (Page == FirstBlock / PageSize) {
|
|
||||||
if (FirstBlock % PageSize == 0) {
|
|
||||||
EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0U, Page));
|
|
||||||
} else {
|
|
||||||
// There's a block straddling `GroupBeg`, it's supposed to only
|
|
||||||
// increment the counter and we expect it should be 1 less
|
|
||||||
// (exclude the straddling one) than the total blocks on the page.
|
|
||||||
EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page),
|
|
||||||
Pages[Page] - 1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Block == GroupEnd)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// Examine the last block which sits across the group boundary.
|
|
||||||
if (Block + BlockSize == RegionSize) {
|
|
||||||
// This is the last block in the region, it's supposed to mark all the
|
|
||||||
// pages as all counted.
|
|
||||||
for (scudo::uptr Page = Block / PageSize;
|
|
||||||
Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
|
|
||||||
EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (scudo::uptr Page = Block / PageSize;
|
|
||||||
Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
|
|
||||||
if (Page <= (GroupEnd - 1) / PageSize)
|
|
||||||
EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
|
|
||||||
else
|
|
||||||
EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page), 1U);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const scudo::uptr FirstUncountedPage =
|
|
||||||
scudo::roundUp(Block + BlockSize, PageSize);
|
|
||||||
for (scudo::uptr Page = FirstUncountedPage;
|
|
||||||
Page <= RoundedRegionSize / PageSize; ++Page) {
|
|
||||||
EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page), 0U);
|
|
||||||
}
|
|
||||||
} // Iterate each Group
|
|
||||||
|
|
||||||
// Release the entire region. This is to ensure the last page is counted.
|
|
||||||
scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/RegionSize);
|
|
||||||
Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0,
|
|
||||||
/*RegionIndex=*/0, RegionSize);
|
|
||||||
for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page)
|
|
||||||
EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
|
|
||||||
} // Iterate each size class
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class SizeClassMap> void testReleasePartialRegion() {
|
|
||||||
typedef FreeBatch<SizeClassMap> Batch;
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
|
|
||||||
for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
|
|
||||||
// In the following, we want to ensure the region includes at least 2 pages
|
|
||||||
// and we will release all the pages except the first one. The handling of
|
|
||||||
// the last block is tricky, so we always test the case that includes the
|
|
||||||
// last block.
|
|
||||||
const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
|
|
||||||
const scudo::uptr ReleaseBase = scudo::roundUp(BlockSize, PageSize);
|
|
||||||
const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
|
|
||||||
const scudo::uptr RegionSize =
|
|
||||||
scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) + ReleaseBase,
|
|
||||||
BlockSize) +
|
|
||||||
BlockSize;
|
|
||||||
const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
|
|
||||||
|
|
||||||
scudo::SinglyLinkedList<Batch> FreeList;
|
|
||||||
FreeList.clear();
|
|
||||||
|
|
||||||
// Skip the blocks in the first page and add the remaining.
|
|
||||||
std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
|
|
||||||
for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
|
|
||||||
Block + BlockSize <= RoundedRegionSize; Block += BlockSize) {
|
|
||||||
for (scudo::uptr Page = Block / PageSize;
|
|
||||||
Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
|
|
||||||
ASSERT_LT(Page, Pages.size());
|
|
||||||
++Pages[Page];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This follows the logic how we count the last page. It should be
|
|
||||||
// consistent with how markFreeBlocksInRegion() handles the last block.
|
|
||||||
if (RoundedRegionSize % BlockSize != 0)
|
|
||||||
++Pages.back();
|
|
||||||
|
|
||||||
Batch *CurrentBatch = nullptr;
|
|
||||||
for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
|
|
||||||
Block < RegionSize; Block += BlockSize) {
|
|
||||||
if (CurrentBatch == nullptr ||
|
|
||||||
CurrentBatch->getCount() == Batch::MaxCount) {
|
|
||||||
CurrentBatch = new Batch;
|
|
||||||
CurrentBatch->clear();
|
|
||||||
FreeList.push_back(CurrentBatch);
|
|
||||||
}
|
|
||||||
CurrentBatch->add(Block);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto VerifyReleaseToOs = [&](scudo::PageReleaseContext &Context) {
|
|
||||||
auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
|
|
||||||
ReleasedPagesRecorder Recorder(ReleaseBase);
|
|
||||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
|
||||||
const scudo::uptr FirstBlock = scudo::roundUpSlow(ReleaseBase, BlockSize);
|
|
||||||
|
|
||||||
for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) {
|
|
||||||
if (P < FirstBlock) {
|
|
||||||
// If FirstBlock is not aligned with page boundary, the first touched
|
|
||||||
// page will not be released either.
|
|
||||||
EXPECT_TRUE(Recorder.ReportedPages.find(P) ==
|
|
||||||
Recorder.ReportedPages.end());
|
|
||||||
} else {
|
|
||||||
EXPECT_TRUE(Recorder.ReportedPages.find(P) !=
|
|
||||||
Recorder.ReportedPages.end());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Test marking by visiting each block.
|
|
||||||
{
|
|
||||||
auto DecompactPtr = [](scudo::uptr P) { return P; };
|
|
||||||
scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/RegionSize - PageSize,
|
|
||||||
ReleaseBase);
|
|
||||||
Context.markFreeBlocksInRegion(FreeList, DecompactPtr, /*Base=*/0U,
|
|
||||||
/*RegionIndex=*/0, RegionSize,
|
|
||||||
/*MayContainLastBlockInRegion=*/true);
|
|
||||||
for (const Batch &It : FreeList) {
|
|
||||||
for (scudo::u16 I = 0; I < It.getCount(); I++) {
|
|
||||||
scudo::uptr Block = It.get(I);
|
|
||||||
for (scudo::uptr Page = Block / PageSize;
|
|
||||||
Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
|
|
||||||
EXPECT_EQ(Pages[Page], Context.PageMap.get(/*Region=*/0U,
|
|
||||||
Page - BasePageOffset));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
VerifyReleaseToOs(Context);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test range marking.
|
|
||||||
{
|
|
||||||
scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/RegionSize - PageSize,
|
|
||||||
ReleaseBase);
|
|
||||||
Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U,
|
|
||||||
/*RegionIndex=*/0, RegionSize);
|
|
||||||
for (scudo::uptr Page = ReleaseBase / PageSize;
|
|
||||||
Page < RoundedRegionSize / PageSize; ++Page) {
|
|
||||||
if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) !=
|
|
||||||
Pages[Page]) {
|
|
||||||
EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0,
|
|
||||||
Page - BasePageOffset));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
VerifyReleaseToOs(Context);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the buffer size of PageMap.
|
|
||||||
{
|
|
||||||
scudo::PageReleaseContext Full(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/RegionSize);
|
|
||||||
Full.ensurePageMapAllocated();
|
|
||||||
scudo::PageReleaseContext Partial(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/RegionSize - PageSize,
|
|
||||||
ReleaseBase);
|
|
||||||
Partial.ensurePageMapAllocated();
|
|
||||||
|
|
||||||
EXPECT_GE(Full.PageMap.getBufferNumElements(),
|
|
||||||
Partial.PageMap.getBufferNumElements());
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!FreeList.empty()) {
|
|
||||||
CurrentBatch = FreeList.front();
|
|
||||||
FreeList.pop_front();
|
|
||||||
delete CurrentBatch;
|
|
||||||
}
|
|
||||||
} // Iterate each size class
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
|
|
||||||
testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) {
|
|
||||||
testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, PageMapMarkRange) {
|
|
||||||
testPageMapMarkRange<scudo::DefaultSizeClassMap>();
|
|
||||||
testPageMapMarkRange<scudo::AndroidSizeClassMap>();
|
|
||||||
testPageMapMarkRange<scudo::FuchsiaSizeClassMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, ReleasePartialRegion) {
|
|
||||||
testReleasePartialRegion<scudo::DefaultSizeClassMap>();
|
|
||||||
testReleasePartialRegion<scudo::AndroidSizeClassMap>();
|
|
||||||
testReleasePartialRegion<scudo::FuchsiaSizeClassMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class SizeClassMap> void testReleaseRangeWithSingleBlock() {
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
|
|
||||||
// We want to test if a memory group only contains single block that will be
|
|
||||||
// handled properly. The case is like:
|
|
||||||
//
|
|
||||||
// From To
|
|
||||||
// +----------------------+
|
|
||||||
// +------------+------------+
|
|
||||||
// | | |
|
|
||||||
// +------------+------------+
|
|
||||||
// ^
|
|
||||||
// RegionSize
|
|
||||||
//
|
|
||||||
// Note that `From` will be page aligned.
|
|
||||||
//
|
|
||||||
// If the second from the last block is aligned at `From`, then we expect all
|
|
||||||
// the pages after `From` will be marked as can-be-released. Otherwise, the
|
|
||||||
// pages only touched by the last blocks will be marked as can-be-released.
|
|
||||||
for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
|
|
||||||
const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
|
|
||||||
const scudo::uptr From = scudo::roundUp(BlockSize, PageSize);
|
|
||||||
const scudo::uptr To =
|
|
||||||
From % BlockSize == 0
|
|
||||||
? From + BlockSize
|
|
||||||
: scudo::roundDownSlow(From + BlockSize, BlockSize) + BlockSize;
|
|
||||||
const scudo::uptr RoundedRegionSize = scudo::roundUp(To, PageSize);
|
|
||||||
|
|
||||||
std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
|
|
||||||
for (scudo::uptr Block = (To - BlockSize); Block < RoundedRegionSize;
|
|
||||||
Block += BlockSize) {
|
|
||||||
for (scudo::uptr Page = Block / PageSize;
|
|
||||||
Page <= (Block + BlockSize - 1) / PageSize &&
|
|
||||||
Page < RoundedRegionSize / PageSize;
|
|
||||||
++Page) {
|
|
||||||
ASSERT_LT(Page, Pages.size());
|
|
||||||
++Pages[Page];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
|
|
||||||
/*ReleaseSize=*/To,
|
|
||||||
/*ReleaseBase=*/0U);
|
|
||||||
Context.markRangeAsAllCounted(From, To, /*Base=*/0U, /*RegionIndex=*/0,
|
|
||||||
/*RegionSize=*/To);
|
|
||||||
|
|
||||||
for (scudo::uptr Page = 0; Page < RoundedRegionSize; Page += PageSize) {
|
|
||||||
if (Context.PageMap.get(/*Region=*/0U, Page / PageSize) !=
|
|
||||||
Pages[Page / PageSize]) {
|
|
||||||
EXPECT_TRUE(
|
|
||||||
Context.PageMap.isAllCounted(/*Region=*/0U, Page / PageSize));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // for each size class
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) {
|
|
||||||
testReleaseRangeWithSingleBlock<scudo::DefaultSizeClassMap>();
|
|
||||||
testReleaseRangeWithSingleBlock<scudo::AndroidSizeClassMap>();
|
|
||||||
testReleaseRangeWithSingleBlock<scudo::FuchsiaSizeClassMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReleaseTest, BufferPool) {
|
|
||||||
constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1;
|
|
||||||
constexpr scudo::uptr StaticBufferNumElements = 512U;
|
|
||||||
|
|
||||||
// Allocate the buffer pool on the heap because it is quite large (slightly
|
|
||||||
// more than StaticBufferCount * StaticBufferNumElements * sizeof(uptr)) and
|
|
||||||
// it may not fit in the stack on some platforms.
|
|
||||||
using BufferPool =
|
|
||||||
scudo::BufferPool<StaticBufferCount, StaticBufferNumElements>;
|
|
||||||
std::unique_ptr<BufferPool> Pool(new BufferPool());
|
|
||||||
|
|
||||||
std::vector<BufferPool::Buffer> Buffers;
|
|
||||||
for (scudo::uptr I = 0; I < StaticBufferCount; ++I) {
|
|
||||||
BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements);
|
|
||||||
EXPECT_TRUE(Pool->isStaticBufferTestOnly(Buffer));
|
|
||||||
Buffers.push_back(Buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The static buffer is supposed to be used up.
|
|
||||||
BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements);
|
|
||||||
EXPECT_FALSE(Pool->isStaticBufferTestOnly(Buffer));
|
|
||||||
|
|
||||||
Pool->releaseBuffer(Buffer);
|
|
||||||
for (auto &Buffer : Buffers)
|
|
||||||
Pool->releaseBuffer(Buffer);
|
|
||||||
}
|
|
55
Telegram/ThirdParty/scudo/tests/report_test.cpp
vendored
55
Telegram/ThirdParty/scudo/tests/report_test.cpp
vendored
|
@ -1,55 +0,0 @@
|
||||||
//===-- report_test.cpp -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "report.h"
|
|
||||||
|
|
||||||
TEST(ScudoReportDeathTest, Check) {
|
|
||||||
CHECK_LT(-1, 1);
|
|
||||||
EXPECT_DEATH(CHECK_GT(-1, 1),
|
|
||||||
"\\(-1\\) > \\(1\\) \\(\\(u64\\)op1=18446744073709551615, "
|
|
||||||
"\\(u64\\)op2=1");
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReportDeathTest, Generic) {
|
|
||||||
// Potentially unused if EXPECT_DEATH isn't defined.
|
|
||||||
UNUSED void *P = reinterpret_cast<void *>(0x42424242U);
|
|
||||||
EXPECT_DEATH(scudo::reportError("TEST123"), "Scudo ERROR.*TEST123");
|
|
||||||
EXPECT_DEATH(scudo::reportInvalidFlag("ABC", "DEF"), "Scudo ERROR.*ABC.*DEF");
|
|
||||||
EXPECT_DEATH(scudo::reportHeaderCorruption(P), "Scudo ERROR.*42424242");
|
|
||||||
EXPECT_DEATH(scudo::reportSanityCheckError("XYZ"), "Scudo ERROR.*XYZ");
|
|
||||||
EXPECT_DEATH(scudo::reportAlignmentTooBig(123, 456), "Scudo ERROR.*123.*456");
|
|
||||||
EXPECT_DEATH(scudo::reportAllocationSizeTooBig(123, 456, 789),
|
|
||||||
"Scudo ERROR.*123.*456.*789");
|
|
||||||
EXPECT_DEATH(scudo::reportOutOfMemory(4242), "Scudo ERROR.*4242");
|
|
||||||
EXPECT_DEATH(
|
|
||||||
scudo::reportInvalidChunkState(scudo::AllocatorAction::Recycling, P),
|
|
||||||
"Scudo ERROR.*recycling.*42424242");
|
|
||||||
EXPECT_DEATH(
|
|
||||||
scudo::reportInvalidChunkState(scudo::AllocatorAction::Sizing, P),
|
|
||||||
"Scudo ERROR.*sizing.*42424242");
|
|
||||||
EXPECT_DEATH(
|
|
||||||
scudo::reportMisalignedPointer(scudo::AllocatorAction::Deallocating, P),
|
|
||||||
"Scudo ERROR.*deallocating.*42424242");
|
|
||||||
EXPECT_DEATH(scudo::reportDeallocTypeMismatch(
|
|
||||||
scudo::AllocatorAction::Reallocating, P, 0, 1),
|
|
||||||
"Scudo ERROR.*reallocating.*42424242");
|
|
||||||
EXPECT_DEATH(scudo::reportDeleteSizeMismatch(P, 123, 456),
|
|
||||||
"Scudo ERROR.*42424242.*123.*456");
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoReportDeathTest, CSpecific) {
|
|
||||||
EXPECT_DEATH(scudo::reportAlignmentNotPowerOfTwo(123), "Scudo ERROR.*123");
|
|
||||||
EXPECT_DEATH(scudo::reportCallocOverflow(123, 456), "Scudo ERROR.*123.*456");
|
|
||||||
EXPECT_DEATH(scudo::reportInvalidPosixMemalignAlignment(789),
|
|
||||||
"Scudo ERROR.*789");
|
|
||||||
EXPECT_DEATH(scudo::reportPvallocOverflow(123), "Scudo ERROR.*123");
|
|
||||||
EXPECT_DEATH(scudo::reportInvalidAlignedAllocAlignment(123, 456),
|
|
||||||
"Scudo ERROR.*123.*456");
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
//===-- scudo_unit_test.h ---------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
#include <zxtest/zxtest.h>
|
|
||||||
using Test = ::zxtest::Test;
|
|
||||||
#else
|
|
||||||
#include "gtest/gtest.h"
|
|
||||||
using Test = ::testing::Test;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// If EXPECT_DEATH isn't defined, make it a no-op.
|
|
||||||
#ifndef EXPECT_DEATH
|
|
||||||
// If ASSERT_DEATH is defined, make EXPECT_DEATH a wrapper to it.
|
|
||||||
#ifdef ASSERT_DEATH
|
|
||||||
#define EXPECT_DEATH(X, Y) ASSERT_DEATH(([&] { X; }), "")
|
|
||||||
#else
|
|
||||||
#define EXPECT_DEATH(X, Y) \
|
|
||||||
do { \
|
|
||||||
} while (0)
|
|
||||||
#endif // ASSERT_DEATH
|
|
||||||
#endif // EXPECT_DEATH
|
|
||||||
|
|
||||||
// If EXPECT_STREQ isn't defined, define our own simple one.
|
|
||||||
#ifndef EXPECT_STREQ
|
|
||||||
#define EXPECT_STREQ(X, Y) EXPECT_EQ(strcmp(X, Y), 0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
#define SKIP_ON_FUCHSIA(T) DISABLED_##T
|
|
||||||
#else
|
|
||||||
#define SKIP_ON_FUCHSIA(T) T
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SCUDO_DEBUG
|
|
||||||
#define SKIP_NO_DEBUG(T) T
|
|
||||||
#else
|
|
||||||
#define SKIP_NO_DEBUG(T) DISABLED_##T
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
// The zxtest library provides a default main function that does the same thing
|
|
||||||
// for Fuchsia builds.
|
|
||||||
#define SCUDO_NO_TEST_MAIN
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern bool UseQuarantine;
|
|
|
@ -1,54 +0,0 @@
|
||||||
//===-- scudo_unit_test_main.cpp --------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "memtag.h"
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
// Match Android's default configuration, which disables Scudo's mismatch
|
|
||||||
// allocation check, as it is being triggered by some third party code.
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
#define DEALLOC_TYPE_MISMATCH "false"
|
|
||||||
#else
|
|
||||||
#define DEALLOC_TYPE_MISMATCH "true"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void EnableMemoryTaggingIfSupported() {
|
|
||||||
if (!scudo::archSupportsMemoryTagging())
|
|
||||||
return;
|
|
||||||
static bool Done = []() {
|
|
||||||
if (!scudo::systemDetectsMemoryTagFaultsTestOnly())
|
|
||||||
scudo::enableSystemMemoryTaggingTestOnly();
|
|
||||||
return true;
|
|
||||||
}();
|
|
||||||
(void)Done;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This allows us to turn on/off a Quarantine for specific tests. The Quarantine
|
|
||||||
// parameters are on the low end, to avoid having to loop excessively in some
|
|
||||||
// tests.
|
|
||||||
bool UseQuarantine = true;
|
|
||||||
extern "C" __attribute__((visibility("default"))) const char *
|
|
||||||
__scudo_default_options() {
|
|
||||||
// The wrapper tests initialize the global allocator early, before main(). We
|
|
||||||
// need to have Memory Tagging enabled before that happens or the allocator
|
|
||||||
// will disable the feature entirely.
|
|
||||||
EnableMemoryTaggingIfSupported();
|
|
||||||
if (!UseQuarantine)
|
|
||||||
return "dealloc_type_mismatch=" DEALLOC_TYPE_MISMATCH;
|
|
||||||
return "quarantine_size_kb=256:thread_local_quarantine_size_kb=128:"
|
|
||||||
"quarantine_max_chunk_size=512:"
|
|
||||||
"dealloc_type_mismatch=" DEALLOC_TYPE_MISMATCH;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(SCUDO_NO_TEST_MAIN)
|
|
||||||
int main(int argc, char **argv) {
|
|
||||||
EnableMemoryTaggingIfSupported();
|
|
||||||
testing::InitGoogleTest(&argc, argv);
|
|
||||||
return RUN_ALL_TESTS();
|
|
||||||
}
|
|
||||||
#endif
|
|
253
Telegram/ThirdParty/scudo/tests/secondary_test.cpp
vendored
253
Telegram/ThirdParty/scudo/tests/secondary_test.cpp
vendored
|
@ -1,253 +0,0 @@
|
||||||
//===-- secondary_test.cpp --------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "memtag.h"
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "allocator_config.h"
|
|
||||||
#include "secondary.h"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <memory>
|
|
||||||
#include <mutex>
|
|
||||||
#include <random>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
template <typename Config> static scudo::Options getOptionsForConfig() {
|
|
||||||
if (!Config::MaySupportMemoryTagging || !scudo::archSupportsMemoryTagging() ||
|
|
||||||
!scudo::systemSupportsMemoryTagging())
|
|
||||||
return {};
|
|
||||||
scudo::AtomicOptions AO;
|
|
||||||
AO.set(scudo::OptionBit::UseMemoryTagging);
|
|
||||||
return AO.load();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Config> static void testSecondaryBasic(void) {
|
|
||||||
using SecondaryT = scudo::MapAllocator<Config>;
|
|
||||||
scudo::Options Options = getOptionsForConfig<Config>();
|
|
||||||
|
|
||||||
scudo::GlobalStats S;
|
|
||||||
S.init();
|
|
||||||
std::unique_ptr<SecondaryT> L(new SecondaryT);
|
|
||||||
L->init(&S);
|
|
||||||
const scudo::uptr Size = 1U << 16;
|
|
||||||
void *P = L->allocate(Options, Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
memset(P, 'A', Size);
|
|
||||||
EXPECT_GE(SecondaryT::getBlockSize(P), Size);
|
|
||||||
L->deallocate(Options, P);
|
|
||||||
|
|
||||||
// If the Secondary can't cache that pointer, it will be unmapped.
|
|
||||||
if (!L->canCache(Size)) {
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
// Repeat few time to avoid missing crash if it's mmaped by unrelated
|
|
||||||
// code.
|
|
||||||
for (int i = 0; i < 10; ++i) {
|
|
||||||
P = L->allocate(Options, Size);
|
|
||||||
L->deallocate(Options, P);
|
|
||||||
memset(P, 'A', Size);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
}
|
|
||||||
|
|
||||||
const scudo::uptr Align = 1U << 16;
|
|
||||||
P = L->allocate(Options, Size + Align, Align);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
void *AlignedP = reinterpret_cast<void *>(
|
|
||||||
scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
|
|
||||||
memset(AlignedP, 'A', Size);
|
|
||||||
L->deallocate(Options, P);
|
|
||||||
|
|
||||||
std::vector<void *> V;
|
|
||||||
for (scudo::uptr I = 0; I < 32U; I++)
|
|
||||||
V.push_back(L->allocate(Options, Size));
|
|
||||||
std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()()));
|
|
||||||
while (!V.empty()) {
|
|
||||||
L->deallocate(Options, V.back());
|
|
||||||
V.pop_back();
|
|
||||||
}
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
L->getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
L->unmapTestOnly();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct NoCacheConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = false;
|
|
||||||
struct Secondary {
|
|
||||||
template <typename Config>
|
|
||||||
using CacheT = scudo::MapAllocatorNoCache<Config>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TestConfig {
|
|
||||||
static const bool MaySupportMemoryTagging = false;
|
|
||||||
struct Secondary {
|
|
||||||
struct Cache {
|
|
||||||
static const scudo::u32 EntriesArraySize = 128U;
|
|
||||||
static const scudo::u32 QuarantineSize = 0U;
|
|
||||||
static const scudo::u32 DefaultMaxEntriesCount = 64U;
|
|
||||||
static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
|
|
||||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
|
||||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(ScudoSecondaryTest, SecondaryBasic) {
|
|
||||||
testSecondaryBasic<NoCacheConfig>();
|
|
||||||
testSecondaryBasic<scudo::DefaultConfig>();
|
|
||||||
testSecondaryBasic<TestConfig>();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MapAllocatorTest : public Test {
|
|
||||||
using Config = scudo::DefaultConfig;
|
|
||||||
using LargeAllocator = scudo::MapAllocator<Config>;
|
|
||||||
|
|
||||||
void SetUp() override { Allocator->init(nullptr); }
|
|
||||||
|
|
||||||
void TearDown() override { Allocator->unmapTestOnly(); }
|
|
||||||
|
|
||||||
std::unique_ptr<LargeAllocator> Allocator =
|
|
||||||
std::make_unique<LargeAllocator>();
|
|
||||||
scudo::Options Options = getOptionsForConfig<Config>();
|
|
||||||
};
|
|
||||||
|
|
||||||
// This exercises a variety of combinations of size and alignment for the
|
|
||||||
// MapAllocator. The size computation done here mimic the ones done by the
|
|
||||||
// combined allocator.
|
|
||||||
TEST_F(MapAllocatorTest, SecondaryCombinations) {
|
|
||||||
constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
|
|
||||||
constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
|
|
||||||
for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
|
|
||||||
for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
|
|
||||||
AlignLog++) {
|
|
||||||
const scudo::uptr Align = 1U << AlignLog;
|
|
||||||
for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
|
|
||||||
if ((1LL << SizeLog) + Delta <= 0)
|
|
||||||
continue;
|
|
||||||
const scudo::uptr UserSize = scudo::roundUp(
|
|
||||||
static_cast<scudo::uptr>((1LL << SizeLog) + Delta), MinAlign);
|
|
||||||
const scudo::uptr Size =
|
|
||||||
HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
|
|
||||||
void *P = Allocator->allocate(Options, Size, Align);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
void *AlignedP = reinterpret_cast<void *>(
|
|
||||||
scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
|
|
||||||
memset(AlignedP, 0xff, UserSize);
|
|
||||||
Allocator->deallocate(Options, P);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Allocator->getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MapAllocatorTest, SecondaryIterate) {
|
|
||||||
std::vector<void *> V;
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
for (scudo::uptr I = 0; I < 32U; I++)
|
|
||||||
V.push_back(Allocator->allocate(
|
|
||||||
Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize));
|
|
||||||
auto Lambda = [&V](scudo::uptr Block) {
|
|
||||||
EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
|
|
||||||
V.end());
|
|
||||||
};
|
|
||||||
Allocator->disable();
|
|
||||||
Allocator->iterateOverBlocks(Lambda);
|
|
||||||
Allocator->enable();
|
|
||||||
while (!V.empty()) {
|
|
||||||
Allocator->deallocate(Options, V.back());
|
|
||||||
V.pop_back();
|
|
||||||
}
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Allocator->getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MapAllocatorTest, SecondaryOptions) {
|
|
||||||
// Attempt to set a maximum number of entries higher than the array size.
|
|
||||||
EXPECT_FALSE(
|
|
||||||
Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4096U));
|
|
||||||
// A negative number will be cast to a scudo::u32, and fail.
|
|
||||||
EXPECT_FALSE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, -1));
|
|
||||||
if (Allocator->canCache(0U)) {
|
|
||||||
// Various valid combinations.
|
|
||||||
EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
|
|
||||||
EXPECT_TRUE(
|
|
||||||
Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
|
|
||||||
EXPECT_TRUE(Allocator->canCache(1UL << 18));
|
|
||||||
EXPECT_TRUE(
|
|
||||||
Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 17));
|
|
||||||
EXPECT_FALSE(Allocator->canCache(1UL << 18));
|
|
||||||
EXPECT_TRUE(Allocator->canCache(1UL << 16));
|
|
||||||
EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 0U));
|
|
||||||
EXPECT_FALSE(Allocator->canCache(1UL << 16));
|
|
||||||
EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
|
|
||||||
EXPECT_TRUE(
|
|
||||||
Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
|
|
||||||
EXPECT_TRUE(Allocator->canCache(1UL << 16));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MapAllocatorWithReleaseTest : public MapAllocatorTest {
|
|
||||||
void SetUp() override { Allocator->init(nullptr, /*ReleaseToOsInterval=*/0); }
|
|
||||||
|
|
||||||
void performAllocations() {
|
|
||||||
std::vector<void *> V;
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
while (!Ready)
|
|
||||||
Cv.wait(Lock);
|
|
||||||
}
|
|
||||||
for (scudo::uptr I = 0; I < 128U; I++) {
|
|
||||||
// Deallocate 75% of the blocks.
|
|
||||||
const bool Deallocate = (std::rand() & 3) != 0;
|
|
||||||
void *P = Allocator->allocate(
|
|
||||||
Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize);
|
|
||||||
if (Deallocate)
|
|
||||||
Allocator->deallocate(Options, P);
|
|
||||||
else
|
|
||||||
V.push_back(P);
|
|
||||||
}
|
|
||||||
while (!V.empty()) {
|
|
||||||
Allocator->deallocate(Options, V.back());
|
|
||||||
V.pop_back();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::mutex Mutex;
|
|
||||||
std::condition_variable Cv;
|
|
||||||
bool Ready = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(MapAllocatorWithReleaseTest, SecondaryThreadsRace) {
|
|
||||||
std::thread Threads[16];
|
|
||||||
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
|
|
||||||
Threads[I] =
|
|
||||||
std::thread(&MapAllocatorWithReleaseTest::performAllocations, this);
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Ready = true;
|
|
||||||
Cv.notify_all();
|
|
||||||
}
|
|
||||||
for (auto &T : Threads)
|
|
||||||
T.join();
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Allocator->getStats(&Str);
|
|
||||||
Str.output();
|
|
||||||
}
|
|
|
@ -1,55 +0,0 @@
|
||||||
//===-- size_class_map_test.cpp ---------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "size_class_map.h"
|
|
||||||
|
|
||||||
template <class SizeClassMap> void testSizeClassMap() {
|
|
||||||
typedef SizeClassMap SCMap;
|
|
||||||
scudo::printMap<SCMap>();
|
|
||||||
scudo::validateMap<SCMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoSizeClassMapTest, DefaultSizeClassMap) {
|
|
||||||
testSizeClassMap<scudo::DefaultSizeClassMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoSizeClassMapTest, AndroidSizeClassMap) {
|
|
||||||
testSizeClassMap<scudo::AndroidSizeClassMap>();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct OneClassSizeClassConfig {
|
|
||||||
static const scudo::uptr NumBits = 1;
|
|
||||||
static const scudo::uptr MinSizeLog = 5;
|
|
||||||
static const scudo::uptr MidSizeLog = 5;
|
|
||||||
static const scudo::uptr MaxSizeLog = 5;
|
|
||||||
static const scudo::u16 MaxNumCachedHint = 0;
|
|
||||||
static const scudo::uptr MaxBytesCachedLog = 0;
|
|
||||||
static const scudo::uptr SizeDelta = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(ScudoSizeClassMapTest, OneClassSizeClassMap) {
|
|
||||||
testSizeClassMap<scudo::FixedSizeClassMap<OneClassSizeClassConfig>>();
|
|
||||||
}
|
|
||||||
|
|
||||||
#if SCUDO_CAN_USE_PRIMARY64
|
|
||||||
struct LargeMaxSizeClassConfig {
|
|
||||||
static const scudo::uptr NumBits = 3;
|
|
||||||
static const scudo::uptr MinSizeLog = 4;
|
|
||||||
static const scudo::uptr MidSizeLog = 8;
|
|
||||||
static const scudo::uptr MaxSizeLog = 63;
|
|
||||||
static const scudo::u16 MaxNumCachedHint = 128;
|
|
||||||
static const scudo::uptr MaxBytesCachedLog = 16;
|
|
||||||
static const scudo::uptr SizeDelta = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(ScudoSizeClassMapTest, LargeMaxSizeClassMap) {
|
|
||||||
testSizeClassMap<scudo::FixedSizeClassMap<LargeMaxSizeClassConfig>>();
|
|
||||||
}
|
|
||||||
#endif
|
|
46
Telegram/ThirdParty/scudo/tests/stats_test.cpp
vendored
46
Telegram/ThirdParty/scudo/tests/stats_test.cpp
vendored
|
@ -1,46 +0,0 @@
|
||||||
//===-- stats_test.cpp ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "stats.h"
|
|
||||||
|
|
||||||
TEST(ScudoStatsTest, LocalStats) {
|
|
||||||
scudo::LocalStats LStats;
|
|
||||||
LStats.init();
|
|
||||||
for (scudo::uptr I = 0; I < scudo::StatCount; I++)
|
|
||||||
EXPECT_EQ(LStats.get(static_cast<scudo::StatType>(I)), 0U);
|
|
||||||
LStats.add(scudo::StatAllocated, 4096U);
|
|
||||||
EXPECT_EQ(LStats.get(scudo::StatAllocated), 4096U);
|
|
||||||
LStats.sub(scudo::StatAllocated, 4096U);
|
|
||||||
EXPECT_EQ(LStats.get(scudo::StatAllocated), 0U);
|
|
||||||
LStats.set(scudo::StatAllocated, 4096U);
|
|
||||||
EXPECT_EQ(LStats.get(scudo::StatAllocated), 4096U);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStatsTest, GlobalStats) {
|
|
||||||
scudo::GlobalStats GStats;
|
|
||||||
GStats.init();
|
|
||||||
scudo::uptr Counters[scudo::StatCount] = {};
|
|
||||||
GStats.get(Counters);
|
|
||||||
for (scudo::uptr I = 0; I < scudo::StatCount; I++)
|
|
||||||
EXPECT_EQ(Counters[I], 0U);
|
|
||||||
scudo::LocalStats LStats;
|
|
||||||
LStats.init();
|
|
||||||
GStats.link(&LStats);
|
|
||||||
for (scudo::uptr I = 0; I < scudo::StatCount; I++)
|
|
||||||
LStats.add(static_cast<scudo::StatType>(I), 4096U);
|
|
||||||
GStats.get(Counters);
|
|
||||||
for (scudo::uptr I = 0; I < scudo::StatCount; I++)
|
|
||||||
EXPECT_EQ(Counters[I], 4096U);
|
|
||||||
// Unlinking the local stats move numbers to the global stats.
|
|
||||||
GStats.unlink(&LStats);
|
|
||||||
GStats.get(Counters);
|
|
||||||
for (scudo::uptr I = 0; I < scudo::StatCount; I++)
|
|
||||||
EXPECT_EQ(Counters[I], 4096U);
|
|
||||||
}
|
|
125
Telegram/ThirdParty/scudo/tests/strings_test.cpp
vendored
125
Telegram/ThirdParty/scudo/tests/strings_test.cpp
vendored
|
@ -1,125 +0,0 @@
|
||||||
//===-- strings_test.cpp ----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#include <limits.h>
|
|
||||||
|
|
||||||
TEST(ScudoStringsTest, Constructor) {
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
EXPECT_EQ(0ul, Str.length());
|
|
||||||
EXPECT_EQ('\0', *Str.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStringsTest, Basic) {
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Str.append("a%db%zdc%ue%zuf%xh%zxq%pe%sr", static_cast<int>(-1),
|
|
||||||
static_cast<scudo::uptr>(-2), static_cast<unsigned>(-4),
|
|
||||||
static_cast<scudo::uptr>(5), static_cast<unsigned>(10),
|
|
||||||
static_cast<scudo::uptr>(11), reinterpret_cast<void *>(0x123),
|
|
||||||
"_string_");
|
|
||||||
EXPECT_EQ(Str.length(), strlen(Str.data()));
|
|
||||||
|
|
||||||
std::string expectedString = "a-1b-2c4294967292e5fahbq0x";
|
|
||||||
expectedString += std::string(SCUDO_POINTER_FORMAT_LENGTH - 3, '0');
|
|
||||||
expectedString += "123e_string_r";
|
|
||||||
EXPECT_EQ(Str.length(), strlen(Str.data()));
|
|
||||||
EXPECT_STREQ(expectedString.c_str(), Str.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStringsTest, Clear) {
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Str.append("123");
|
|
||||||
Str.clear();
|
|
||||||
EXPECT_EQ(0ul, Str.length());
|
|
||||||
EXPECT_EQ('\0', *Str.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStringsTest, ClearLarge) {
|
|
||||||
constexpr char appendString[] = "123";
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Str.reserve(sizeof(appendString) * 10000);
|
|
||||||
for (int i = 0; i < 10000; ++i)
|
|
||||||
Str.append(appendString);
|
|
||||||
Str.clear();
|
|
||||||
EXPECT_EQ(0ul, Str.length());
|
|
||||||
EXPECT_EQ('\0', *Str.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStringsTest, Precision) {
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Str.append("%.*s", 3, "12345");
|
|
||||||
EXPECT_EQ(Str.length(), strlen(Str.data()));
|
|
||||||
EXPECT_STREQ("123", Str.data());
|
|
||||||
Str.clear();
|
|
||||||
Str.append("%.*s", 6, "12345");
|
|
||||||
EXPECT_EQ(Str.length(), strlen(Str.data()));
|
|
||||||
EXPECT_STREQ("12345", Str.data());
|
|
||||||
Str.clear();
|
|
||||||
Str.append("%-6s", "12345");
|
|
||||||
EXPECT_EQ(Str.length(), strlen(Str.data()));
|
|
||||||
EXPECT_STREQ("12345 ", Str.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
static void fillString(scudo::ScopedString &Str, scudo::uptr Size) {
|
|
||||||
for (scudo::uptr I = 0; I < Size; I++)
|
|
||||||
Str.append("A");
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStringTest, PotentialOverflows) {
|
|
||||||
// Use a ScopedString that spans a page, and attempt to write past the end
|
|
||||||
// of it with variations of append. The expectation is for nothing to crash.
|
|
||||||
const scudo::uptr PageSize = scudo::getPageSizeCached();
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Str.reserve(2 * PageSize);
|
|
||||||
Str.clear();
|
|
||||||
fillString(Str, 2 * PageSize);
|
|
||||||
Str.clear();
|
|
||||||
fillString(Str, PageSize - 64);
|
|
||||||
Str.append("%-128s", "12345");
|
|
||||||
Str.clear();
|
|
||||||
fillString(Str, PageSize - 16);
|
|
||||||
Str.append("%024x", 12345);
|
|
||||||
Str.clear();
|
|
||||||
fillString(Str, PageSize - 16);
|
|
||||||
Str.append("EEEEEEEEEEEEEEEEEEEEEEEE");
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
static void testAgainstLibc(const char *Format, T Arg1, T Arg2) {
|
|
||||||
scudo::ScopedString Str;
|
|
||||||
Str.append(Format, Arg1, Arg2);
|
|
||||||
char Buffer[128];
|
|
||||||
snprintf(Buffer, sizeof(Buffer), Format, Arg1, Arg2);
|
|
||||||
EXPECT_EQ(Str.length(), strlen(Str.data()));
|
|
||||||
EXPECT_STREQ(Buffer, Str.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStringsTest, MinMax) {
|
|
||||||
testAgainstLibc<int>("%d-%d", INT_MIN, INT_MAX);
|
|
||||||
testAgainstLibc<unsigned>("%u-%u", 0, UINT_MAX);
|
|
||||||
testAgainstLibc<unsigned>("%x-%x", 0, UINT_MAX);
|
|
||||||
testAgainstLibc<long>("%zd-%zd", LONG_MIN, LONG_MAX);
|
|
||||||
testAgainstLibc<unsigned long>("%zu-%zu", 0, ULONG_MAX);
|
|
||||||
testAgainstLibc<unsigned long>("%zx-%zx", 0, ULONG_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoStringsTest, Padding) {
|
|
||||||
testAgainstLibc<int>("%3d - %3d", 1, 0);
|
|
||||||
testAgainstLibc<int>("%3d - %3d", -1, 123);
|
|
||||||
testAgainstLibc<int>("%3d - %3d", -1, -123);
|
|
||||||
testAgainstLibc<int>("%3d - %3d", 12, 1234);
|
|
||||||
testAgainstLibc<int>("%3d - %3d", -12, -1234);
|
|
||||||
testAgainstLibc<int>("%03d - %03d", 1, 0);
|
|
||||||
testAgainstLibc<int>("%03d - %03d", -1, 123);
|
|
||||||
testAgainstLibc<int>("%03d - %03d", -1, -123);
|
|
||||||
testAgainstLibc<int>("%03d - %03d", 12, 1234);
|
|
||||||
testAgainstLibc<int>("%03d - %03d", -12, -1234);
|
|
||||||
}
|
|
86
Telegram/ThirdParty/scudo/tests/timing_test.cpp
vendored
86
Telegram/ThirdParty/scudo/tests/timing_test.cpp
vendored
|
@ -1,86 +0,0 @@
|
||||||
//===-- timing_test.cpp -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "timing.h"
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
|
|
||||||
class ScudoTimingTest : public Test {
|
|
||||||
public:
|
|
||||||
void testFunc1() { scudo::ScopedTimer ST(Manager, __func__); }
|
|
||||||
|
|
||||||
void testFunc2() {
|
|
||||||
scudo::ScopedTimer ST(Manager, __func__);
|
|
||||||
testFunc1();
|
|
||||||
}
|
|
||||||
|
|
||||||
void testChainedCalls() {
|
|
||||||
scudo::ScopedTimer ST(Manager, __func__);
|
|
||||||
testFunc2();
|
|
||||||
}
|
|
||||||
|
|
||||||
void testIgnoredTimer() {
|
|
||||||
scudo::ScopedTimer ST(Manager, __func__);
|
|
||||||
ST.ignore();
|
|
||||||
}
|
|
||||||
|
|
||||||
void printAllTimersStats() { Manager.printAll(); }
|
|
||||||
|
|
||||||
scudo::TimingManager &getTimingManager() { return Manager; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
scudo::TimingManager Manager;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Given that the output of statistics of timers are dumped through
|
|
||||||
// `scudo::Printf` which is platform dependent, so we don't have a reliable way
|
|
||||||
// to catch the output and verify the details. Now we only verify the number of
|
|
||||||
// invocations on linux.
|
|
||||||
TEST_F(ScudoTimingTest, SimpleTimer) {
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
testing::internal::LogToStderr();
|
|
||||||
testing::internal::CaptureStderr();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
testIgnoredTimer();
|
|
||||||
testChainedCalls();
|
|
||||||
printAllTimersStats();
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
std::string output = testing::internal::GetCapturedStderr();
|
|
||||||
EXPECT_TRUE(output.find("testIgnoredTimer (1)") == std::string::npos);
|
|
||||||
EXPECT_TRUE(output.find("testChainedCalls (1)") != std::string::npos);
|
|
||||||
EXPECT_TRUE(output.find("testFunc2 (1)") != std::string::npos);
|
|
||||||
EXPECT_TRUE(output.find("testFunc1 (1)") != std::string::npos);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoTimingTest, NestedTimer) {
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
testing::internal::LogToStderr();
|
|
||||||
testing::internal::CaptureStderr();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
{
|
|
||||||
scudo::ScopedTimer Outer(getTimingManager(), "Outer");
|
|
||||||
{
|
|
||||||
scudo::ScopedTimer Inner1(getTimingManager(), Outer, "Inner1");
|
|
||||||
{ scudo::ScopedTimer Inner2(getTimingManager(), Inner1, "Inner2"); }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printAllTimersStats();
|
|
||||||
|
|
||||||
#if SCUDO_LINUX
|
|
||||||
std::string output = testing::internal::GetCapturedStderr();
|
|
||||||
EXPECT_TRUE(output.find("Outer (1)") != std::string::npos);
|
|
||||||
EXPECT_TRUE(output.find("Inner1 (1)") != std::string::npos);
|
|
||||||
EXPECT_TRUE(output.find("Inner2 (1)") != std::string::npos);
|
|
||||||
#endif
|
|
||||||
}
|
|
256
Telegram/ThirdParty/scudo/tests/tsd_test.cpp
vendored
256
Telegram/ThirdParty/scudo/tests/tsd_test.cpp
vendored
|
@ -1,256 +0,0 @@
|
||||||
//===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "tsd_exclusive.h"
|
|
||||||
#include "tsd_shared.h"
|
|
||||||
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <mutex>
|
|
||||||
#include <set>
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
// We mock out an allocator with a TSD registry, mostly using empty stubs. The
|
|
||||||
// cache contains a single volatile uptr, to be able to test that several
|
|
||||||
// concurrent threads will not access or modify the same cache at the same time.
|
|
||||||
template <class Config> class MockAllocator {
|
|
||||||
public:
|
|
||||||
using ThisT = MockAllocator<Config>;
|
|
||||||
using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
|
|
||||||
using CacheT = struct MockCache {
|
|
||||||
volatile scudo::uptr Canary;
|
|
||||||
};
|
|
||||||
using QuarantineCacheT = struct MockQuarantine {};
|
|
||||||
|
|
||||||
void init() {
|
|
||||||
// This should only be called once by the registry.
|
|
||||||
EXPECT_FALSE(Initialized);
|
|
||||||
Initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
|
|
||||||
void initCache(CacheT *Cache) { *Cache = {}; }
|
|
||||||
void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
|
|
||||||
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
|
|
||||||
void callPostInitCallback() {}
|
|
||||||
|
|
||||||
bool isInitialized() { return Initialized; }
|
|
||||||
|
|
||||||
void *operator new(size_t Size) {
|
|
||||||
void *P = nullptr;
|
|
||||||
EXPECT_EQ(0, posix_memalign(&P, alignof(ThisT), Size));
|
|
||||||
return P;
|
|
||||||
}
|
|
||||||
void operator delete(void *P) { free(P); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool Initialized = false;
|
|
||||||
TSDRegistryT TSDRegistry;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct OneCache {
|
|
||||||
template <class Allocator>
|
|
||||||
using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct SharedCaches {
|
|
||||||
template <class Allocator>
|
|
||||||
using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ExclusiveCaches {
|
|
||||||
template <class Allocator>
|
|
||||||
using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(ScudoTSDTest, TSDRegistryInit) {
|
|
||||||
using AllocatorT = MockAllocator<OneCache>;
|
|
||||||
auto Deleter = [](AllocatorT *A) {
|
|
||||||
A->unmapTestOnly();
|
|
||||||
delete A;
|
|
||||||
};
|
|
||||||
std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
|
|
||||||
Deleter);
|
|
||||||
EXPECT_FALSE(Allocator->isInitialized());
|
|
||||||
|
|
||||||
auto Registry = Allocator->getTSDRegistry();
|
|
||||||
Registry->initOnceMaybe(Allocator.get());
|
|
||||||
EXPECT_TRUE(Allocator->isInitialized());
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class AllocatorT> static void testRegistry() {
|
|
||||||
auto Deleter = [](AllocatorT *A) {
|
|
||||||
A->unmapTestOnly();
|
|
||||||
delete A;
|
|
||||||
};
|
|
||||||
std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
|
|
||||||
Deleter);
|
|
||||||
EXPECT_FALSE(Allocator->isInitialized());
|
|
||||||
|
|
||||||
auto Registry = Allocator->getTSDRegistry();
|
|
||||||
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
|
|
||||||
EXPECT_TRUE(Allocator->isInitialized());
|
|
||||||
|
|
||||||
bool UnlockRequired;
|
|
||||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
|
||||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
|
||||||
EXPECT_NE(TSD, nullptr);
|
|
||||||
EXPECT_EQ(TSD->getCache().Canary, 0U);
|
|
||||||
if (UnlockRequired)
|
|
||||||
TSD->unlock();
|
|
||||||
|
|
||||||
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
|
|
||||||
TSD = Registry->getTSDAndLock(&UnlockRequired);
|
|
||||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
|
||||||
EXPECT_NE(TSD, nullptr);
|
|
||||||
EXPECT_EQ(TSD->getCache().Canary, 0U);
|
|
||||||
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
|
|
||||||
if (UnlockRequired)
|
|
||||||
TSD->unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoTSDTest, TSDRegistryBasic) {
|
|
||||||
testRegistry<MockAllocator<OneCache>>();
|
|
||||||
testRegistry<MockAllocator<SharedCaches>>();
|
|
||||||
#if !SCUDO_FUCHSIA
|
|
||||||
testRegistry<MockAllocator<ExclusiveCaches>>();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::mutex Mutex;
|
|
||||||
static std::condition_variable Cv;
|
|
||||||
static bool Ready;
|
|
||||||
|
|
||||||
template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
|
|
||||||
auto Registry = Allocator->getTSDRegistry();
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
while (!Ready)
|
|
||||||
Cv.wait(Lock);
|
|
||||||
}
|
|
||||||
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
|
|
||||||
bool UnlockRequired;
|
|
||||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
|
||||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
|
||||||
EXPECT_NE(TSD, nullptr);
|
|
||||||
// For an exclusive TSD, the cache should be empty. We cannot guarantee the
|
|
||||||
// same for a shared TSD.
|
|
||||||
if (!UnlockRequired)
|
|
||||||
EXPECT_EQ(TSD->getCache().Canary, 0U);
|
|
||||||
// Transform the thread id to a uptr to use it as canary.
|
|
||||||
const scudo::uptr Canary = static_cast<scudo::uptr>(
|
|
||||||
std::hash<std::thread::id>{}(std::this_thread::get_id()));
|
|
||||||
TSD->getCache().Canary = Canary;
|
|
||||||
// Loop a few times to make sure that a concurrent thread isn't modifying it.
|
|
||||||
for (scudo::uptr I = 0; I < 4096U; I++)
|
|
||||||
EXPECT_EQ(TSD->getCache().Canary, Canary);
|
|
||||||
if (UnlockRequired)
|
|
||||||
TSD->unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class AllocatorT> static void testRegistryThreaded() {
|
|
||||||
Ready = false;
|
|
||||||
auto Deleter = [](AllocatorT *A) {
|
|
||||||
A->unmapTestOnly();
|
|
||||||
delete A;
|
|
||||||
};
|
|
||||||
std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
|
|
||||||
Deleter);
|
|
||||||
std::thread Threads[32];
|
|
||||||
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
|
|
||||||
Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Ready = true;
|
|
||||||
Cv.notify_all();
|
|
||||||
}
|
|
||||||
for (auto &T : Threads)
|
|
||||||
T.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoTSDTest, TSDRegistryThreaded) {
|
|
||||||
testRegistryThreaded<MockAllocator<OneCache>>();
|
|
||||||
testRegistryThreaded<MockAllocator<SharedCaches>>();
|
|
||||||
#if !SCUDO_FUCHSIA
|
|
||||||
testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::set<void *> Pointers;
|
|
||||||
|
|
||||||
static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
|
|
||||||
std::set<void *> Set;
|
|
||||||
auto Registry = Allocator->getTSDRegistry();
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
while (!Ready)
|
|
||||||
Cv.wait(Lock);
|
|
||||||
}
|
|
||||||
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
|
|
||||||
bool UnlockRequired;
|
|
||||||
for (scudo::uptr I = 0; I < 4096U; I++) {
|
|
||||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
|
||||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
|
||||||
EXPECT_NE(TSD, nullptr);
|
|
||||||
Set.insert(reinterpret_cast<void *>(TSD));
|
|
||||||
if (UnlockRequired)
|
|
||||||
TSD->unlock();
|
|
||||||
}
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Pointers.insert(Set.begin(), Set.end());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoTSDTest, TSDRegistryTSDsCount) {
|
|
||||||
Ready = false;
|
|
||||||
Pointers.clear();
|
|
||||||
using AllocatorT = MockAllocator<SharedCaches>;
|
|
||||||
auto Deleter = [](AllocatorT *A) {
|
|
||||||
A->unmapTestOnly();
|
|
||||||
delete A;
|
|
||||||
};
|
|
||||||
std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
|
|
||||||
Deleter);
|
|
||||||
// We attempt to use as many TSDs as the shared cache offers by creating a
|
|
||||||
// decent amount of threads that will be run concurrently and attempt to get
|
|
||||||
// and lock TSDs. We put them all in a set and count the number of entries
|
|
||||||
// after we are done.
|
|
||||||
std::thread Threads[32];
|
|
||||||
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
|
|
||||||
Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Ready = true;
|
|
||||||
Cv.notify_all();
|
|
||||||
}
|
|
||||||
for (auto &T : Threads)
|
|
||||||
T.join();
|
|
||||||
// The initial number of TSDs we get will be the minimum of the default count
|
|
||||||
// and the number of CPUs.
|
|
||||||
EXPECT_LE(Pointers.size(), 8U);
|
|
||||||
Pointers.clear();
|
|
||||||
auto Registry = Allocator->getTSDRegistry();
|
|
||||||
// Increase the number of TSDs to 16.
|
|
||||||
Registry->setOption(scudo::Option::MaxTSDsCount, 16);
|
|
||||||
Ready = false;
|
|
||||||
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
|
|
||||||
Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Ready = true;
|
|
||||||
Cv.notify_all();
|
|
||||||
}
|
|
||||||
for (auto &T : Threads)
|
|
||||||
T.join();
|
|
||||||
// We should get 16 distinct TSDs back.
|
|
||||||
EXPECT_EQ(Pointers.size(), 16U);
|
|
||||||
}
|
|
43
Telegram/ThirdParty/scudo/tests/vector_test.cpp
vendored
43
Telegram/ThirdParty/scudo/tests/vector_test.cpp
vendored
|
@ -1,43 +0,0 @@
|
||||||
//===-- vector_test.cpp -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include "vector.h"
|
|
||||||
|
|
||||||
TEST(ScudoVectorTest, Basic) {
|
|
||||||
scudo::Vector<int> V;
|
|
||||||
EXPECT_EQ(V.size(), 0U);
|
|
||||||
V.push_back(42);
|
|
||||||
EXPECT_EQ(V.size(), 1U);
|
|
||||||
EXPECT_EQ(V[0], 42);
|
|
||||||
V.push_back(43);
|
|
||||||
EXPECT_EQ(V.size(), 2U);
|
|
||||||
EXPECT_EQ(V[0], 42);
|
|
||||||
EXPECT_EQ(V[1], 43);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoVectorTest, Stride) {
|
|
||||||
scudo::Vector<scudo::uptr> V;
|
|
||||||
for (scudo::uptr I = 0; I < 1000; I++) {
|
|
||||||
V.push_back(I);
|
|
||||||
EXPECT_EQ(V.size(), I + 1U);
|
|
||||||
EXPECT_EQ(V[I], I);
|
|
||||||
}
|
|
||||||
for (scudo::uptr I = 0; I < 1000; I++)
|
|
||||||
EXPECT_EQ(V[I], I);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ScudoVectorTest, ResizeReduction) {
|
|
||||||
scudo::Vector<int> V;
|
|
||||||
V.push_back(0);
|
|
||||||
V.push_back(0);
|
|
||||||
EXPECT_EQ(V.size(), 2U);
|
|
||||||
V.resize(1);
|
|
||||||
EXPECT_EQ(V.size(), 1U);
|
|
||||||
}
|
|
663
Telegram/ThirdParty/scudo/tests/wrappers_c_test.cpp
vendored
663
Telegram/ThirdParty/scudo/tests/wrappers_c_test.cpp
vendored
|
@ -1,663 +0,0 @@
|
||||||
//===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "memtag.h"
|
|
||||||
#include "scudo/interface.h"
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <malloc.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#ifndef __GLIBC_PREREQ
|
|
||||||
#define __GLIBC_PREREQ(x, y) 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SCUDO_FUCHSIA
|
|
||||||
// Fuchsia only has valloc
|
|
||||||
#define HAVE_VALLOC 1
|
|
||||||
#elif SCUDO_ANDROID
|
|
||||||
// Android only has pvalloc/valloc on 32 bit
|
|
||||||
#if !defined(__LP64__)
|
|
||||||
#define HAVE_PVALLOC 1
|
|
||||||
#define HAVE_VALLOC 1
|
|
||||||
#endif // !defined(__LP64__)
|
|
||||||
#else
|
|
||||||
// All others assumed to support both functions.
|
|
||||||
#define HAVE_PVALLOC 1
|
|
||||||
#define HAVE_VALLOC 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
void malloc_enable(void);
|
|
||||||
void malloc_disable(void);
|
|
||||||
int malloc_iterate(uintptr_t base, size_t size,
|
|
||||||
void (*callback)(uintptr_t base, size_t size, void *arg),
|
|
||||||
void *arg);
|
|
||||||
void *valloc(size_t size);
|
|
||||||
void *pvalloc(size_t size);
|
|
||||||
|
|
||||||
#ifndef SCUDO_ENABLE_HOOKS_TESTS
|
|
||||||
#define SCUDO_ENABLE_HOOKS_TESTS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
|
|
||||||
#error "Hooks tests should have hooks enabled as well!"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct AllocContext {
|
|
||||||
void *Ptr;
|
|
||||||
size_t Size;
|
|
||||||
};
|
|
||||||
struct DeallocContext {
|
|
||||||
void *Ptr;
|
|
||||||
};
|
|
||||||
struct ReallocContext {
|
|
||||||
void *AllocPtr;
|
|
||||||
void *DeallocPtr;
|
|
||||||
size_t Size;
|
|
||||||
};
|
|
||||||
static AllocContext AC;
|
|
||||||
static DeallocContext DC;
|
|
||||||
static ReallocContext RC;
|
|
||||||
|
|
||||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
|
||||||
__attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
|
|
||||||
size_t Size) {
|
|
||||||
AC.Ptr = Ptr;
|
|
||||||
AC.Size = Size;
|
|
||||||
}
|
|
||||||
__attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
|
|
||||||
DC.Ptr = Ptr;
|
|
||||||
}
|
|
||||||
__attribute__((visibility("default"))) void
|
|
||||||
__scudo_realloc_allocate_hook(void *OldPtr, void *NewPtr, size_t Size) {
|
|
||||||
// Verify that __scudo_realloc_deallocate_hook is called first and set the
|
|
||||||
// right pointer.
|
|
||||||
EXPECT_EQ(OldPtr, RC.DeallocPtr);
|
|
||||||
RC.AllocPtr = NewPtr;
|
|
||||||
RC.Size = Size;
|
|
||||||
|
|
||||||
// Note that this is only used for testing. In general, only one pair of hooks
|
|
||||||
// will be invoked in `realloc`. if __scudo_realloc_*_hook are not defined,
|
|
||||||
// it'll call the general hooks only. To make the test easier, we call the
|
|
||||||
// general one here so that either case (whether __scudo_realloc_*_hook are
|
|
||||||
// defined) will be verified without separating them into different tests.
|
|
||||||
__scudo_allocate_hook(NewPtr, Size);
|
|
||||||
}
|
|
||||||
__attribute__((visibility("default"))) void
|
|
||||||
__scudo_realloc_deallocate_hook(void *Ptr) {
|
|
||||||
RC.DeallocPtr = Ptr;
|
|
||||||
|
|
||||||
// See the comment in the __scudo_realloc_allocate_hook above.
|
|
||||||
__scudo_deallocate_hook(Ptr);
|
|
||||||
}
|
|
||||||
#endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
class ScudoWrappersCTest : public Test {
|
|
||||||
protected:
|
|
||||||
void SetUp() override {
|
|
||||||
if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
printf("Hooks are enabled but hooks tests are disabled.\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
void invalidateHookPtrs() {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS) {
|
|
||||||
void *InvalidPtr = reinterpret_cast<void *>(0xdeadbeef);
|
|
||||||
AC.Ptr = InvalidPtr;
|
|
||||||
DC.Ptr = InvalidPtr;
|
|
||||||
RC.AllocPtr = RC.DeallocPtr = InvalidPtr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void verifyAllocHookPtr(UNUSED void *Ptr) {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
EXPECT_EQ(Ptr, AC.Ptr);
|
|
||||||
}
|
|
||||||
void verifyAllocHookSize(UNUSED size_t Size) {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
EXPECT_EQ(Size, AC.Size);
|
|
||||||
}
|
|
||||||
void verifyDeallocHookPtr(UNUSED void *Ptr) {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
EXPECT_EQ(Ptr, DC.Ptr);
|
|
||||||
}
|
|
||||||
void verifyReallocHookPtrs(UNUSED void *OldPtr, void *NewPtr, size_t Size) {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS) {
|
|
||||||
EXPECT_EQ(OldPtr, RC.DeallocPtr);
|
|
||||||
EXPECT_EQ(NewPtr, RC.AllocPtr);
|
|
||||||
EXPECT_EQ(Size, RC.Size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
using ScudoWrappersCDeathTest = ScudoWrappersCTest;
|
|
||||||
|
|
||||||
// Note that every C allocation function in the test binary will be fulfilled
|
|
||||||
// by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
|
|
||||||
// But this might also lead to unexpected side-effects, since the allocation and
|
|
||||||
// deallocation operations in the TEST functions will coexist with others (see
|
|
||||||
// the EXPECT_DEATH comment below).
|
|
||||||
|
|
||||||
// We have to use a small quarantine to make sure that our double-free tests
|
|
||||||
// trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
|
|
||||||
// freed (this depends on the size obviously) and the following free succeeds.
|
|
||||||
|
|
||||||
static const size_t Size = 100U;
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCDeathTest, Malloc) {
|
|
||||||
void *P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size, malloc_usable_size(P));
|
|
||||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size);
|
|
||||||
|
|
||||||
// An update to this warning in Clang now triggers in this line, but it's ok
|
|
||||||
// because the check is expecting a bad pointer and should fail.
|
|
||||||
#if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
|
|
||||||
#endif
|
|
||||||
EXPECT_DEATH(
|
|
||||||
free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
|
|
||||||
#if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
EXPECT_DEATH(free(P), "");
|
|
||||||
|
|
||||||
P = malloc(0U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
free(P);
|
|
||||||
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(malloc(SIZE_MAX), nullptr);
|
|
||||||
EXPECT_EQ(errno, ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCTest, Calloc) {
|
|
||||||
void *P = calloc(1U, Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size, malloc_usable_size(P));
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size);
|
|
||||||
for (size_t I = 0; I < Size; I++)
|
|
||||||
EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
P = calloc(1U, 0U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
free(P);
|
|
||||||
P = calloc(0U, 1U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
free(P);
|
|
||||||
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
|
|
||||||
EXPECT_EQ(errno, ENOMEM);
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
|
|
||||||
if (SCUDO_ANDROID)
|
|
||||||
EXPECT_EQ(errno, ENOMEM);
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
|
|
||||||
EXPECT_EQ(errno, ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCTest, SmallAlign) {
|
|
||||||
// Allocating pointers by the powers of 2 from 1 to 0x10000
|
|
||||||
// Using powers of 2 due to memalign using powers of 2 and test more sizes
|
|
||||||
constexpr size_t MaxSize = 0x10000;
|
|
||||||
std::vector<void *> ptrs;
|
|
||||||
// Reserving space to prevent further allocation during the test
|
|
||||||
ptrs.reserve((scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) *
|
|
||||||
(scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) * 3);
|
|
||||||
for (size_t Size = 1; Size <= MaxSize; Size <<= 1) {
|
|
||||||
for (size_t Align = 1; Align <= MaxSize; Align <<= 1) {
|
|
||||||
for (size_t Count = 0; Count < 3; ++Count) {
|
|
||||||
void *P = memalign(Align, Size);
|
|
||||||
EXPECT_TRUE(reinterpret_cast<uintptr_t>(P) % Align == 0);
|
|
||||||
ptrs.push_back(P);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (void *ptr : ptrs)
|
|
||||||
free(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCTest, Memalign) {
|
|
||||||
void *P;
|
|
||||||
for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
|
|
||||||
const size_t Alignment = 1U << I;
|
|
||||||
|
|
||||||
P = memalign(Alignment, Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size, malloc_usable_size(P));
|
|
||||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size);
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
P = nullptr;
|
|
||||||
EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size, malloc_usable_size(P));
|
|
||||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size);
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
|
|
||||||
EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
|
|
||||||
EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
|
|
||||||
|
|
||||||
// Android's memalign accepts non power-of-2 alignments, and 0.
|
|
||||||
if (SCUDO_ANDROID) {
|
|
||||||
for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
|
|
||||||
P = memalign(Alignment, 1024U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size);
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCTest, AlignedAlloc) {
|
|
||||||
const size_t Alignment = 4096U;
|
|
||||||
void *P = aligned_alloc(Alignment, Alignment * 4U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
|
|
||||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Alignment * 4U);
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
errno = 0;
|
|
||||||
P = aligned_alloc(Alignment, Size);
|
|
||||||
EXPECT_EQ(P, nullptr);
|
|
||||||
EXPECT_EQ(errno, EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCDeathTest, Realloc) {
|
|
||||||
invalidateHookPtrs();
|
|
||||||
// realloc(nullptr, N) is malloc(N)
|
|
||||||
void *P = realloc(nullptr, Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size);
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
invalidateHookPtrs();
|
|
||||||
P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
// realloc(P, 0U) is free(P) and returns nullptr
|
|
||||||
EXPECT_EQ(realloc(P, 0U), nullptr);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size, malloc_usable_size(P));
|
|
||||||
memset(P, 0x42, Size);
|
|
||||||
|
|
||||||
invalidateHookPtrs();
|
|
||||||
void *OldP = P;
|
|
||||||
P = realloc(P, Size * 2U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size * 2U, malloc_usable_size(P));
|
|
||||||
for (size_t I = 0; I < Size; I++)
|
|
||||||
EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
|
|
||||||
if (OldP == P) {
|
|
||||||
verifyDeallocHookPtr(OldP);
|
|
||||||
verifyAllocHookPtr(OldP);
|
|
||||||
} else {
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size * 2U);
|
|
||||||
verifyDeallocHookPtr(OldP);
|
|
||||||
}
|
|
||||||
verifyReallocHookPtrs(OldP, P, Size * 2U);
|
|
||||||
|
|
||||||
invalidateHookPtrs();
|
|
||||||
OldP = P;
|
|
||||||
P = realloc(P, Size / 2U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size / 2U, malloc_usable_size(P));
|
|
||||||
for (size_t I = 0; I < Size / 2U; I++)
|
|
||||||
EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
|
|
||||||
if (OldP == P) {
|
|
||||||
verifyDeallocHookPtr(OldP);
|
|
||||||
verifyAllocHookPtr(OldP);
|
|
||||||
} else {
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(Size / 2U);
|
|
||||||
}
|
|
||||||
verifyReallocHookPtrs(OldP, P, Size / 2U);
|
|
||||||
free(P);
|
|
||||||
|
|
||||||
EXPECT_DEATH(P = realloc(P, Size), "");
|
|
||||||
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
|
|
||||||
EXPECT_EQ(errno, ENOMEM);
|
|
||||||
P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
|
|
||||||
EXPECT_EQ(errno, ENOMEM);
|
|
||||||
free(P);
|
|
||||||
|
|
||||||
// Android allows realloc of memalign pointers.
|
|
||||||
if (SCUDO_ANDROID) {
|
|
||||||
const size_t Alignment = 1024U;
|
|
||||||
P = memalign(Alignment, Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size, malloc_usable_size(P));
|
|
||||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
|
|
||||||
memset(P, 0x42, Size);
|
|
||||||
|
|
||||||
P = realloc(P, Size * 2U);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_LE(Size * 2U, malloc_usable_size(P));
|
|
||||||
for (size_t I = 0; I < Size; I++)
|
|
||||||
EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
|
|
||||||
free(P);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !SCUDO_FUCHSIA
|
|
||||||
TEST_F(ScudoWrappersCTest, MallOpt) {
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(mallopt(-1000, 1), 0);
|
|
||||||
// mallopt doesn't set errno.
|
|
||||||
EXPECT_EQ(errno, 0);
|
|
||||||
|
|
||||||
EXPECT_EQ(mallopt(M_PURGE, 0), 1);
|
|
||||||
|
|
||||||
EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
|
|
||||||
EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
|
|
||||||
EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
|
|
||||||
EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
|
|
||||||
|
|
||||||
if (SCUDO_ANDROID) {
|
|
||||||
EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX, 100), 1);
|
|
||||||
EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2), 1);
|
|
||||||
EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX, 10), 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCTest, OtherAlloc) {
|
|
||||||
#if HAVE_PVALLOC
|
|
||||||
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
|
|
||||||
|
|
||||||
void *P = pvalloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
|
|
||||||
EXPECT_LE(PageSize, malloc_usable_size(P));
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
// Size will be rounded up to PageSize.
|
|
||||||
verifyAllocHookSize(PageSize);
|
|
||||||
free(P);
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
|
|
||||||
|
|
||||||
P = pvalloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
|
|
||||||
free(P);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if HAVE_VALLOC
|
|
||||||
EXPECT_EQ(valloc(SIZE_MAX), nullptr);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename FieldType>
|
|
||||||
void MallInfoTest() {
|
|
||||||
// mallinfo is deprecated.
|
|
||||||
#pragma clang diagnostic push
|
|
||||||
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
const FieldType BypassQuarantineSize = 1024U;
|
|
||||||
struct mallinfo MI = mallinfo();
|
|
||||||
FieldType Allocated = MI.uordblks;
|
|
||||||
void *P = malloc(BypassQuarantineSize);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
MI = mallinfo();
|
|
||||||
EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
|
|
||||||
EXPECT_GT(MI.hblkhd, static_cast<FieldType>(0));
|
|
||||||
FieldType Free = MI.fordblks;
|
|
||||||
free(P);
|
|
||||||
MI = mallinfo();
|
|
||||||
EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
|
|
||||||
#pragma clang diagnostic pop
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !SCUDO_FUCHSIA
|
|
||||||
TEST_F(ScudoWrappersCTest, MallInfo) {
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
// Android accidentally set the fields to size_t instead of int.
|
|
||||||
MallInfoTest<size_t>();
|
|
||||||
#else
|
|
||||||
MallInfoTest<int>();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if __GLIBC_PREREQ(2, 33) || SCUDO_ANDROID
|
|
||||||
TEST_F(ScudoWrappersCTest, MallInfo2) {
|
|
||||||
const size_t BypassQuarantineSize = 1024U;
|
|
||||||
struct mallinfo2 MI = mallinfo2();
|
|
||||||
size_t Allocated = MI.uordblks;
|
|
||||||
void *P = malloc(BypassQuarantineSize);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
MI = mallinfo2();
|
|
||||||
EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
|
|
||||||
EXPECT_GT(MI.hblkhd, 0U);
|
|
||||||
size_t Free = MI.fordblks;
|
|
||||||
free(P);
|
|
||||||
MI = mallinfo2();
|
|
||||||
EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static uintptr_t BoundaryP;
|
|
||||||
static size_t Count;
|
|
||||||
|
|
||||||
static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
|
|
||||||
if (scudo::archSupportsMemoryTagging()) {
|
|
||||||
Base = scudo::untagPointer(Base);
|
|
||||||
BoundaryP = scudo::untagPointer(BoundaryP);
|
|
||||||
}
|
|
||||||
if (Base == BoundaryP)
|
|
||||||
Count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that a block located on an iteration boundary is not mis-accounted.
|
|
||||||
// To achieve this, we allocate a chunk for which the backing block will be
|
|
||||||
// aligned on a page, then run the malloc_iterate on both the pages that the
|
|
||||||
// block is a boundary for. It must only be seen once by the callback function.
|
|
||||||
TEST_F(ScudoWrappersCTest, MallocIterateBoundary) {
|
|
||||||
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
// Android uses a 16 byte alignment for both 32 bit and 64 bit.
|
|
||||||
const size_t BlockDelta = 16U;
|
|
||||||
#else
|
|
||||||
const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
|
|
||||||
#endif
|
|
||||||
const size_t SpecialSize = PageSize - BlockDelta;
|
|
||||||
|
|
||||||
// We aren't guaranteed that any size class is exactly a page wide. So we need
|
|
||||||
// to keep making allocations until we get an allocation that starts exactly
|
|
||||||
// on a page boundary. The BlockDelta value is expected to be the number of
|
|
||||||
// bytes to subtract from a returned pointer to get to the actual start of
|
|
||||||
// the pointer in the size class. In practice, this means BlockDelta should
|
|
||||||
// be set to the minimum alignment in bytes for the allocation.
|
|
||||||
//
|
|
||||||
// With a 16-byte block alignment and 4096-byte page size, each allocation has
|
|
||||||
// a probability of (1 - (16/4096)) of failing to meet the alignment
|
|
||||||
// requirements, and the probability of failing 65536 times is
|
|
||||||
// (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
|
|
||||||
// 65536 tries, give up.
|
|
||||||
uintptr_t Block;
|
|
||||||
void *P = nullptr;
|
|
||||||
for (unsigned I = 0; I != 65536; ++I) {
|
|
||||||
void *PrevP = P;
|
|
||||||
P = malloc(SpecialSize);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
*reinterpret_cast<void **>(P) = PrevP;
|
|
||||||
BoundaryP = reinterpret_cast<uintptr_t>(P);
|
|
||||||
Block = BoundaryP - BlockDelta;
|
|
||||||
if ((Block & (PageSize - 1)) == 0U)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
EXPECT_EQ((Block & (PageSize - 1)), 0U);
|
|
||||||
|
|
||||||
Count = 0U;
|
|
||||||
malloc_disable();
|
|
||||||
malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
|
|
||||||
malloc_iterate(Block, PageSize, callback, nullptr);
|
|
||||||
malloc_enable();
|
|
||||||
EXPECT_EQ(Count, 1U);
|
|
||||||
|
|
||||||
while (P) {
|
|
||||||
void *NextP = *reinterpret_cast<void **>(P);
|
|
||||||
free(P);
|
|
||||||
P = NextP;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fuchsia doesn't have alarm, fork or malloc_info.
|
|
||||||
#if !SCUDO_FUCHSIA
|
|
||||||
TEST_F(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
|
|
||||||
// We expect heap operations within a disable/enable scope to deadlock.
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
void *P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
free(P);
|
|
||||||
malloc_disable();
|
|
||||||
alarm(1);
|
|
||||||
P = malloc(Size);
|
|
||||||
malloc_enable();
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCTest, MallocInfo) {
|
|
||||||
// Use volatile so that the allocations don't get optimized away.
|
|
||||||
void *volatile P1 = malloc(1234);
|
|
||||||
void *volatile P2 = malloc(4321);
|
|
||||||
|
|
||||||
char Buffer[16384];
|
|
||||||
FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
|
|
||||||
EXPECT_NE(F, nullptr);
|
|
||||||
errno = 0;
|
|
||||||
EXPECT_EQ(malloc_info(0, F), 0);
|
|
||||||
EXPECT_EQ(errno, 0);
|
|
||||||
fclose(F);
|
|
||||||
EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
|
|
||||||
EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
|
|
||||||
EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
|
|
||||||
|
|
||||||
free(P1);
|
|
||||||
free(P2);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCDeathTest, Fork) {
|
|
||||||
void *P;
|
|
||||||
pid_t Pid = fork();
|
|
||||||
EXPECT_GE(Pid, 0) << strerror(errno);
|
|
||||||
if (Pid == 0) {
|
|
||||||
P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
memset(P, 0x42, Size);
|
|
||||||
free(P);
|
|
||||||
_exit(0);
|
|
||||||
}
|
|
||||||
waitpid(Pid, nullptr, 0);
|
|
||||||
P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
memset(P, 0x42, Size);
|
|
||||||
free(P);
|
|
||||||
|
|
||||||
// fork should stall if the allocator has been disabled.
|
|
||||||
EXPECT_DEATH(
|
|
||||||
{
|
|
||||||
malloc_disable();
|
|
||||||
alarm(1);
|
|
||||||
Pid = fork();
|
|
||||||
EXPECT_GE(Pid, 0);
|
|
||||||
},
|
|
||||||
"");
|
|
||||||
}
|
|
||||||
|
|
||||||
static pthread_mutex_t Mutex;
|
|
||||||
static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
|
|
||||||
static bool Ready;
|
|
||||||
|
|
||||||
static void *enableMalloc(UNUSED void *Unused) {
|
|
||||||
// Initialize the allocator for this thread.
|
|
||||||
void *P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
memset(P, 0x42, Size);
|
|
||||||
free(P);
|
|
||||||
|
|
||||||
// Signal the main thread we are ready.
|
|
||||||
pthread_mutex_lock(&Mutex);
|
|
||||||
Ready = true;
|
|
||||||
pthread_cond_signal(&Conditional);
|
|
||||||
pthread_mutex_unlock(&Mutex);
|
|
||||||
|
|
||||||
// Wait for the malloc_disable & fork, then enable the allocator again.
|
|
||||||
sleep(1);
|
|
||||||
malloc_enable();
|
|
||||||
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCTest, DisableForkEnable) {
|
|
||||||
pthread_t ThreadId;
|
|
||||||
Ready = false;
|
|
||||||
EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
|
|
||||||
|
|
||||||
// Wait for the thread to be warmed up.
|
|
||||||
pthread_mutex_lock(&Mutex);
|
|
||||||
while (!Ready)
|
|
||||||
pthread_cond_wait(&Conditional, &Mutex);
|
|
||||||
pthread_mutex_unlock(&Mutex);
|
|
||||||
|
|
||||||
// Disable the allocator and fork. fork should succeed after malloc_enable.
|
|
||||||
malloc_disable();
|
|
||||||
pid_t Pid = fork();
|
|
||||||
EXPECT_GE(Pid, 0);
|
|
||||||
if (Pid == 0) {
|
|
||||||
void *P = malloc(Size);
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
memset(P, 0x42, Size);
|
|
||||||
free(P);
|
|
||||||
_exit(0);
|
|
||||||
}
|
|
||||||
waitpid(Pid, nullptr, 0);
|
|
||||||
EXPECT_EQ(pthread_join(ThreadId, 0), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // SCUDO_FUCHSIA
|
|
|
@ -1,273 +0,0 @@
|
||||||
//===-- wrappers_cpp_test.cpp -----------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "memtag.h"
|
|
||||||
#include "tests/scudo_unit_test.h"
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <fstream>
|
|
||||||
#include <memory>
|
|
||||||
#include <mutex>
|
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
// Android does not support checking for new/delete mismatches.
|
|
||||||
#if SCUDO_ANDROID
|
|
||||||
#define SKIP_MISMATCH_TESTS 1
|
|
||||||
#else
|
|
||||||
#define SKIP_MISMATCH_TESTS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void operator delete(void *, size_t) noexcept;
|
|
||||||
void operator delete[](void *, size_t) noexcept;
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
#ifndef SCUDO_ENABLE_HOOKS_TESTS
|
|
||||||
#define SCUDO_ENABLE_HOOKS_TESTS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
|
|
||||||
#error "Hooks tests should have hooks enabled as well!"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct AllocContext {
|
|
||||||
void *Ptr;
|
|
||||||
size_t Size;
|
|
||||||
};
|
|
||||||
struct DeallocContext {
|
|
||||||
void *Ptr;
|
|
||||||
};
|
|
||||||
static AllocContext AC;
|
|
||||||
static DeallocContext DC;
|
|
||||||
|
|
||||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
|
||||||
__attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
|
|
||||||
size_t Size) {
|
|
||||||
AC.Ptr = Ptr;
|
|
||||||
AC.Size = Size;
|
|
||||||
}
|
|
||||||
__attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
|
|
||||||
DC.Ptr = Ptr;
|
|
||||||
}
|
|
||||||
#endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
class ScudoWrappersCppTest : public Test {
|
|
||||||
protected:
|
|
||||||
void SetUp() override {
|
|
||||||
if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
printf("Hooks are enabled but hooks tests are disabled.\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
void verifyAllocHookPtr(UNUSED void *Ptr) {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
EXPECT_EQ(Ptr, AC.Ptr);
|
|
||||||
}
|
|
||||||
void verifyAllocHookSize(UNUSED size_t Size) {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
EXPECT_EQ(Size, AC.Size);
|
|
||||||
}
|
|
||||||
void verifyDeallocHookPtr(UNUSED void *Ptr) {
|
|
||||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
|
||||||
EXPECT_EQ(Ptr, DC.Ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T> void testCxxNew() {
|
|
||||||
T *P = new T;
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(sizeof(T));
|
|
||||||
memset(P, 0x42, sizeof(T));
|
|
||||||
EXPECT_DEATH(delete[] P, "");
|
|
||||||
delete P;
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
EXPECT_DEATH(delete P, "");
|
|
||||||
|
|
||||||
P = new T;
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
memset(P, 0x42, sizeof(T));
|
|
||||||
operator delete(P, sizeof(T));
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
P = new (std::nothrow) T;
|
|
||||||
verifyAllocHookPtr(P);
|
|
||||||
verifyAllocHookSize(sizeof(T));
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
memset(P, 0x42, sizeof(T));
|
|
||||||
delete P;
|
|
||||||
verifyDeallocHookPtr(P);
|
|
||||||
|
|
||||||
const size_t N = 16U;
|
|
||||||
T *A = new T[N];
|
|
||||||
EXPECT_NE(A, nullptr);
|
|
||||||
verifyAllocHookPtr(A);
|
|
||||||
verifyAllocHookSize(sizeof(T) * N);
|
|
||||||
memset(A, 0x42, sizeof(T) * N);
|
|
||||||
EXPECT_DEATH(delete A, "");
|
|
||||||
delete[] A;
|
|
||||||
verifyDeallocHookPtr(A);
|
|
||||||
EXPECT_DEATH(delete[] A, "");
|
|
||||||
|
|
||||||
A = new T[N];
|
|
||||||
EXPECT_NE(A, nullptr);
|
|
||||||
memset(A, 0x42, sizeof(T) * N);
|
|
||||||
operator delete[](A, sizeof(T) * N);
|
|
||||||
verifyDeallocHookPtr(A);
|
|
||||||
|
|
||||||
A = new (std::nothrow) T[N];
|
|
||||||
verifyAllocHookPtr(A);
|
|
||||||
verifyAllocHookSize(sizeof(T) * N);
|
|
||||||
EXPECT_NE(A, nullptr);
|
|
||||||
memset(A, 0x42, sizeof(T) * N);
|
|
||||||
delete[] A;
|
|
||||||
verifyDeallocHookPtr(A);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
using ScudoWrappersCppDeathTest = ScudoWrappersCppTest;
|
|
||||||
|
|
||||||
class Pixel {
|
|
||||||
public:
|
|
||||||
enum class Color { Red, Green, Blue };
|
|
||||||
int X = 0;
|
|
||||||
int Y = 0;
|
|
||||||
Color C = Color::Red;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Note that every Cxx allocation function in the test binary will be fulfilled
|
|
||||||
// by Scudo. See the comment in the C counterpart of this file.
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCppDeathTest, New) {
|
|
||||||
if (getenv("SKIP_TYPE_MISMATCH") || SKIP_MISMATCH_TESTS) {
|
|
||||||
printf("Skipped type mismatch tests.\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
testCxxNew<bool>();
|
|
||||||
testCxxNew<uint8_t>();
|
|
||||||
testCxxNew<uint16_t>();
|
|
||||||
testCxxNew<uint32_t>();
|
|
||||||
testCxxNew<uint64_t>();
|
|
||||||
testCxxNew<float>();
|
|
||||||
testCxxNew<double>();
|
|
||||||
testCxxNew<long double>();
|
|
||||||
testCxxNew<Pixel>();
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::mutex Mutex;
|
|
||||||
static std::condition_variable Cv;
|
|
||||||
static bool Ready;
|
|
||||||
|
|
||||||
static void stressNew() {
|
|
||||||
std::vector<uintptr_t *> V;
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
while (!Ready)
|
|
||||||
Cv.wait(Lock);
|
|
||||||
}
|
|
||||||
for (size_t I = 0; I < 256U; I++) {
|
|
||||||
const size_t N = static_cast<size_t>(std::rand()) % 128U;
|
|
||||||
uintptr_t *P = new uintptr_t[N];
|
|
||||||
if (P) {
|
|
||||||
memset(P, 0x42, sizeof(uintptr_t) * N);
|
|
||||||
V.push_back(P);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while (!V.empty()) {
|
|
||||||
delete[] V.back();
|
|
||||||
V.pop_back();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ScudoWrappersCppTest, ThreadedNew) {
|
|
||||||
// TODO: Investigate why libc sometimes crashes with tag missmatch in
|
|
||||||
// __pthread_clockjoin_ex.
|
|
||||||
std::unique_ptr<scudo::ScopedDisableMemoryTagChecks> NoTags;
|
|
||||||
if (!SCUDO_ANDROID && scudo::archSupportsMemoryTagging() &&
|
|
||||||
scudo::systemSupportsMemoryTagging())
|
|
||||||
NoTags = std::make_unique<scudo::ScopedDisableMemoryTagChecks>();
|
|
||||||
|
|
||||||
Ready = false;
|
|
||||||
std::thread Threads[32];
|
|
||||||
for (size_t I = 0U; I < sizeof(Threads) / sizeof(Threads[0]); I++)
|
|
||||||
Threads[I] = std::thread(stressNew);
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> Lock(Mutex);
|
|
||||||
Ready = true;
|
|
||||||
Cv.notify_all();
|
|
||||||
}
|
|
||||||
for (auto &T : Threads)
|
|
||||||
T.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !SCUDO_FUCHSIA
|
|
||||||
TEST_F(ScudoWrappersCppTest, AllocAfterFork) {
|
|
||||||
// This test can fail flakily when ran as a part of large number of
|
|
||||||
// other tests if the maxmimum number of mappings allowed is low.
|
|
||||||
// We tried to reduce the number of iterations of the loops with
|
|
||||||
// moderate success, so we will now skip this test under those
|
|
||||||
// circumstances.
|
|
||||||
if (SCUDO_LINUX) {
|
|
||||||
long MaxMapCount = 0;
|
|
||||||
// If the file can't be accessed, we proceed with the test.
|
|
||||||
std::ifstream Stream("/proc/sys/vm/max_map_count");
|
|
||||||
if (Stream.good()) {
|
|
||||||
Stream >> MaxMapCount;
|
|
||||||
if (MaxMapCount < 200000)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::atomic_bool Stop;
|
|
||||||
|
|
||||||
// Create threads that simply allocate and free different sizes.
|
|
||||||
std::vector<std::thread *> Threads;
|
|
||||||
for (size_t N = 0; N < 5; N++) {
|
|
||||||
std::thread *T = new std::thread([&Stop] {
|
|
||||||
while (!Stop) {
|
|
||||||
for (size_t SizeLog = 3; SizeLog <= 20; SizeLog++) {
|
|
||||||
char *P = new char[1UL << SizeLog];
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
// Make sure this value is not optimized away.
|
|
||||||
asm volatile("" : : "r,m"(P) : "memory");
|
|
||||||
delete[] P;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Threads.push_back(T);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a thread to fork and allocate.
|
|
||||||
for (size_t N = 0; N < 50; N++) {
|
|
||||||
pid_t Pid;
|
|
||||||
if ((Pid = fork()) == 0) {
|
|
||||||
for (size_t SizeLog = 3; SizeLog <= 20; SizeLog++) {
|
|
||||||
char *P = new char[1UL << SizeLog];
|
|
||||||
EXPECT_NE(P, nullptr);
|
|
||||||
// Make sure this value is not optimized away.
|
|
||||||
asm volatile("" : : "r,m"(P) : "memory");
|
|
||||||
// Make sure we can touch all of the allocation.
|
|
||||||
memset(P, 0x32, 1U << SizeLog);
|
|
||||||
// EXPECT_LE(1U << SizeLog, malloc_usable_size(ptr));
|
|
||||||
delete[] P;
|
|
||||||
}
|
|
||||||
_exit(10);
|
|
||||||
}
|
|
||||||
EXPECT_NE(-1, Pid);
|
|
||||||
int Status;
|
|
||||||
EXPECT_EQ(Pid, waitpid(Pid, &Status, 0));
|
|
||||||
EXPECT_FALSE(WIFSIGNALED(Status));
|
|
||||||
EXPECT_EQ(10, WEXITSTATUS(Status));
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("Waiting for threads to complete\n");
|
|
||||||
Stop = true;
|
|
||||||
for (auto Thread : Threads)
|
|
||||||
Thread->join();
|
|
||||||
Threads.clear();
|
|
||||||
}
|
|
||||||
#endif
|
|
70
Telegram/ThirdParty/scudo/thread_annotations.h
vendored
70
Telegram/ThirdParty/scudo/thread_annotations.h
vendored
|
@ -1,70 +0,0 @@
|
||||||
//===-- thread_annotations.h ------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_THREAD_ANNOTATIONS_
|
|
||||||
#define SCUDO_THREAD_ANNOTATIONS_
|
|
||||||
|
|
||||||
// Enable thread safety attributes only with clang.
|
|
||||||
// The attributes can be safely ignored when compiling with other compilers.
|
|
||||||
#if defined(__clang__)
|
|
||||||
#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
|
|
||||||
#else
|
|
||||||
#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
|
|
||||||
|
|
||||||
#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
|
|
||||||
|
|
||||||
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
|
|
||||||
|
|
||||||
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
|
|
||||||
|
|
||||||
#define ACQUIRED_BEFORE(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define ACQUIRED_AFTER(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define REQUIRES(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define REQUIRES_SHARED(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define ACQUIRE(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define ACQUIRE_SHARED(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define RELEASE(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define RELEASE_SHARED(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define TRY_ACQUIRE(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define TRY_ACQUIRE_SHARED(...) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
|
|
||||||
|
|
||||||
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
|
|
||||||
|
|
||||||
#define ASSERT_SHARED_CAPABILITY(x) \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
|
|
||||||
|
|
||||||
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
|
|
||||||
|
|
||||||
#define NO_THREAD_SAFETY_ANALYSIS \
|
|
||||||
THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
|
|
||||||
|
|
||||||
#endif // SCUDO_THREAD_ANNOTATIONS_
|
|
29
Telegram/ThirdParty/scudo/timing.cpp
vendored
29
Telegram/ThirdParty/scudo/timing.cpp
vendored
|
@ -1,29 +0,0 @@
|
||||||
//===-- timing.cpp ----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "timing.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
Timer::~Timer() {
|
|
||||||
if (Manager)
|
|
||||||
Manager->report(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
ScopedTimer::ScopedTimer(TimingManager &Manager, const char *Name)
|
|
||||||
: Timer(Manager.getOrCreateTimer(Name)) {
|
|
||||||
start();
|
|
||||||
}
|
|
||||||
|
|
||||||
ScopedTimer::ScopedTimer(TimingManager &Manager, const Timer &Nest,
|
|
||||||
const char *Name)
|
|
||||||
: Timer(Manager.nest(Nest, Name)) {
|
|
||||||
start();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
221
Telegram/ThirdParty/scudo/timing.h
vendored
221
Telegram/ThirdParty/scudo/timing.h
vendored
|
@ -1,221 +0,0 @@
|
||||||
//===-- timing.h ------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_TIMING_H_
|
|
||||||
#define SCUDO_TIMING_H_
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "string_utils.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
#include <inttypes.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
class TimingManager;
|
|
||||||
|
|
||||||
// A simple timer for evaluating execution time of code snippets. It can be used
|
|
||||||
// along with TimingManager or standalone.
|
|
||||||
class Timer {
|
|
||||||
public:
|
|
||||||
// The use of Timer without binding to a TimingManager is supposed to do the
|
|
||||||
// timer logging manually. Otherwise, TimingManager will do the logging stuff
|
|
||||||
// for you.
|
|
||||||
Timer() = default;
|
|
||||||
Timer(Timer &&Other)
|
|
||||||
: StartTime(0), AccTime(Other.AccTime), Manager(Other.Manager),
|
|
||||||
HandleId(Other.HandleId) {
|
|
||||||
Other.Manager = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
Timer(const Timer &) = delete;
|
|
||||||
|
|
||||||
~Timer();
|
|
||||||
|
|
||||||
void start() {
|
|
||||||
CHECK_EQ(StartTime, 0U);
|
|
||||||
StartTime = getMonotonicTime();
|
|
||||||
}
|
|
||||||
void stop() {
|
|
||||||
AccTime += getMonotonicTime() - StartTime;
|
|
||||||
StartTime = 0;
|
|
||||||
}
|
|
||||||
u64 getAccumulatedTime() const { return AccTime; }
|
|
||||||
|
|
||||||
// Unset the bound TimingManager so that we don't report the data back. This
|
|
||||||
// is useful if we only want to track subset of certain scope events.
|
|
||||||
void ignore() {
|
|
||||||
StartTime = 0;
|
|
||||||
AccTime = 0;
|
|
||||||
Manager = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
friend class TimingManager;
|
|
||||||
Timer(TimingManager &Manager, u32 HandleId)
|
|
||||||
: Manager(&Manager), HandleId(HandleId) {}
|
|
||||||
|
|
||||||
u64 StartTime = 0;
|
|
||||||
u64 AccTime = 0;
|
|
||||||
TimingManager *Manager = nullptr;
|
|
||||||
u32 HandleId;
|
|
||||||
};
|
|
||||||
|
|
||||||
// A RAII-style wrapper for easy scope execution measurement. Note that in order
|
|
||||||
// not to take additional space for the message like `Name`. It only works with
|
|
||||||
// TimingManager.
|
|
||||||
class ScopedTimer : public Timer {
|
|
||||||
public:
|
|
||||||
ScopedTimer(TimingManager &Manager, const char *Name);
|
|
||||||
ScopedTimer(TimingManager &Manager, const Timer &Nest, const char *Name);
|
|
||||||
~ScopedTimer() { stop(); }
|
|
||||||
};
|
|
||||||
|
|
||||||
// In Scudo, the execution time of single run of code snippets may not be
|
|
||||||
// useful, we are more interested in the average time from several runs.
|
|
||||||
// TimingManager lets the registered timer report their data and reports the
|
|
||||||
// average execution time for each timer periodically.
|
|
||||||
class TimingManager {
|
|
||||||
public:
|
|
||||||
TimingManager(u32 PrintingInterval = DefaultPrintingInterval)
|
|
||||||
: PrintingInterval(PrintingInterval) {}
|
|
||||||
~TimingManager() {
|
|
||||||
if (NumAllocatedTimers != 0)
|
|
||||||
printAll();
|
|
||||||
}
|
|
||||||
|
|
||||||
Timer getOrCreateTimer(const char *Name) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
|
|
||||||
CHECK_LT(strlen(Name), MaxLenOfTimerName);
|
|
||||||
for (u32 I = 0; I < NumAllocatedTimers; ++I) {
|
|
||||||
if (strncmp(Name, Timers[I].Name, MaxLenOfTimerName) == 0)
|
|
||||||
return Timer(*this, I);
|
|
||||||
}
|
|
||||||
|
|
||||||
CHECK_LT(NumAllocatedTimers, MaxNumberOfTimers);
|
|
||||||
strncpy(Timers[NumAllocatedTimers].Name, Name, MaxLenOfTimerName);
|
|
||||||
TimerRecords[NumAllocatedTimers].AccumulatedTime = 0;
|
|
||||||
TimerRecords[NumAllocatedTimers].Occurrence = 0;
|
|
||||||
return Timer(*this, NumAllocatedTimers++);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a sub-Timer associated with another Timer. This is used when we want to
|
|
||||||
// detail the execution time in the scope of a Timer.
|
|
||||||
// For example,
|
|
||||||
// void Foo() {
|
|
||||||
// // T1 records the time spent in both first and second tasks.
|
|
||||||
// ScopedTimer T1(getTimingManager(), "Task1");
|
|
||||||
// {
|
|
||||||
// // T2 records the time spent in first task
|
|
||||||
// ScopedTimer T2(getTimingManager, T1, "Task2");
|
|
||||||
// // Do first task.
|
|
||||||
// }
|
|
||||||
// // Do second task.
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The report will show proper indents to indicate the nested relation like,
|
|
||||||
// -- Average Operation Time -- -- Name (# of Calls) --
|
|
||||||
// 10.0(ns) Task1 (1)
|
|
||||||
// 5.0(ns) Task2 (1)
|
|
||||||
Timer nest(const Timer &T, const char *Name) EXCLUDES(Mutex) {
|
|
||||||
CHECK_EQ(T.Manager, this);
|
|
||||||
Timer Nesting = getOrCreateTimer(Name);
|
|
||||||
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
CHECK_NE(Nesting.HandleId, T.HandleId);
|
|
||||||
Timers[Nesting.HandleId].Nesting = T.HandleId;
|
|
||||||
return Nesting;
|
|
||||||
}
|
|
||||||
|
|
||||||
void report(const Timer &T) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
|
|
||||||
const u32 HandleId = T.HandleId;
|
|
||||||
CHECK_LT(HandleId, MaxNumberOfTimers);
|
|
||||||
TimerRecords[HandleId].AccumulatedTime += T.getAccumulatedTime();
|
|
||||||
++TimerRecords[HandleId].Occurrence;
|
|
||||||
++NumEventsReported;
|
|
||||||
if (NumEventsReported % PrintingInterval == 0)
|
|
||||||
printAllImpl();
|
|
||||||
}
|
|
||||||
|
|
||||||
void printAll() EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
printAllImpl();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
void printAllImpl() REQUIRES(Mutex) {
|
|
||||||
static char NameHeader[] = "-- Name (# of Calls) --";
|
|
||||||
static char AvgHeader[] = "-- Average Operation Time --";
|
|
||||||
ScopedString Str;
|
|
||||||
Str.append("%-15s %-15s\n", AvgHeader, NameHeader);
|
|
||||||
|
|
||||||
for (u32 I = 0; I < NumAllocatedTimers; ++I) {
|
|
||||||
if (Timers[I].Nesting != MaxNumberOfTimers)
|
|
||||||
continue;
|
|
||||||
printImpl(Str, I);
|
|
||||||
}
|
|
||||||
|
|
||||||
Str.output();
|
|
||||||
}
|
|
||||||
|
|
||||||
void printImpl(ScopedString &Str, const u32 HandleId,
|
|
||||||
const u32 ExtraIndent = 0) REQUIRES(Mutex) {
|
|
||||||
const u64 AccumulatedTime = TimerRecords[HandleId].AccumulatedTime;
|
|
||||||
const u64 Occurrence = TimerRecords[HandleId].Occurrence;
|
|
||||||
const u64 Integral = Occurrence == 0 ? 0 : AccumulatedTime / Occurrence;
|
|
||||||
// Only keep single digit of fraction is enough and it enables easier layout
|
|
||||||
// maintenance.
|
|
||||||
const u64 Fraction =
|
|
||||||
Occurrence == 0 ? 0
|
|
||||||
: ((AccumulatedTime % Occurrence) * 10) / Occurrence;
|
|
||||||
|
|
||||||
Str.append("%14" PRId64 ".%" PRId64 "(ns) %-11s", Integral, Fraction, " ");
|
|
||||||
|
|
||||||
for (u32 I = 0; I < ExtraIndent; ++I)
|
|
||||||
Str.append("%s", " ");
|
|
||||||
Str.append("%s (%" PRId64 ")\n", Timers[HandleId].Name, Occurrence);
|
|
||||||
|
|
||||||
for (u32 I = 0; I < NumAllocatedTimers; ++I)
|
|
||||||
if (Timers[I].Nesting == HandleId)
|
|
||||||
printImpl(Str, I, ExtraIndent + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instead of maintaining pages for timer registration, a static buffer is
|
|
||||||
// sufficient for most use cases in Scudo.
|
|
||||||
static constexpr u32 MaxNumberOfTimers = 50;
|
|
||||||
static constexpr u32 MaxLenOfTimerName = 50;
|
|
||||||
static constexpr u32 DefaultPrintingInterval = 100;
|
|
||||||
|
|
||||||
struct Record {
|
|
||||||
u64 AccumulatedTime = 0;
|
|
||||||
u64 Occurrence = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TimerInfo {
|
|
||||||
char Name[MaxLenOfTimerName + 1];
|
|
||||||
u32 Nesting = MaxNumberOfTimers;
|
|
||||||
};
|
|
||||||
|
|
||||||
HybridMutex Mutex;
|
|
||||||
// The frequency of proactively dumping the timer statistics. For example, the
|
|
||||||
// default setting is to dump the statistics every 100 reported events.
|
|
||||||
u32 PrintingInterval GUARDED_BY(Mutex);
|
|
||||||
u64 NumEventsReported GUARDED_BY(Mutex) = 0;
|
|
||||||
u32 NumAllocatedTimers GUARDED_BY(Mutex) = 0;
|
|
||||||
TimerInfo Timers[MaxNumberOfTimers] GUARDED_BY(Mutex);
|
|
||||||
Record TimerRecords[MaxNumberOfTimers] GUARDED_BY(Mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_TIMING_H_
|
|
|
@ -1,162 +0,0 @@
|
||||||
//===-- compute_size_class_config.cpp -------------------------------------===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
struct Alloc {
|
|
||||||
size_t size, count;
|
|
||||||
};
|
|
||||||
|
|
||||||
size_t measureWastage(const std::vector<Alloc> &allocs,
|
|
||||||
const std::vector<size_t> &classes, size_t pageSize,
|
|
||||||
size_t headerSize) {
|
|
||||||
size_t totalWastage = 0;
|
|
||||||
for (auto &a : allocs) {
|
|
||||||
size_t sizePlusHeader = a.size + headerSize;
|
|
||||||
size_t wastage = -1ull;
|
|
||||||
for (auto c : classes)
|
|
||||||
if (c >= sizePlusHeader && c - sizePlusHeader < wastage)
|
|
||||||
wastage = c - sizePlusHeader;
|
|
||||||
if (wastage == -1ull)
|
|
||||||
continue;
|
|
||||||
if (wastage > 2 * pageSize)
|
|
||||||
wastage = 2 * pageSize;
|
|
||||||
totalWastage += wastage * a.count;
|
|
||||||
}
|
|
||||||
return totalWastage;
|
|
||||||
}
|
|
||||||
|
|
||||||
void readAllocs(std::vector<Alloc> &allocs, const char *path) {
|
|
||||||
FILE *f = fopen(path, "r");
|
|
||||||
if (!f) {
|
|
||||||
fprintf(stderr, "compute_size_class_config: could not open %s: %s\n", path,
|
|
||||||
strerror(errno));
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
const char header[] = "<malloc version=\"scudo-1\">\n";
|
|
||||||
char buf[sizeof(header) - 1];
|
|
||||||
if (fread(buf, 1, sizeof(header) - 1, f) != sizeof(header) - 1 ||
|
|
||||||
memcmp(buf, header, sizeof(header) - 1) != 0) {
|
|
||||||
fprintf(stderr, "compute_size_class_config: invalid input format\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
Alloc a;
|
|
||||||
while (fscanf(f, "<alloc size=\"%zu\" count=\"%zu\"/>\n", &a.size,
|
|
||||||
&a.count) == 2)
|
|
||||||
allocs.push_back(a);
|
|
||||||
fclose(f);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t log2Floor(size_t x) { return sizeof(long) * 8 - 1 - __builtin_clzl(x); }
|
|
||||||
|
|
||||||
void usage() {
|
|
||||||
fprintf(stderr,
|
|
||||||
"usage: compute_size_class_config [-p pageSize] [-c largestClass] "
|
|
||||||
"[-h headerSize] [-n numClasses] [-b numBits] profile...\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
|
||||||
size_t pageSize = 4096;
|
|
||||||
size_t largestClass = 65552;
|
|
||||||
size_t headerSize = 16;
|
|
||||||
size_t numClasses = 32;
|
|
||||||
size_t numBits = 5;
|
|
||||||
|
|
||||||
std::vector<Alloc> allocs;
|
|
||||||
for (size_t i = 1; i != argc;) {
|
|
||||||
auto matchArg = [&](size_t &arg, const char *name) {
|
|
||||||
if (strcmp(argv[i], name) == 0) {
|
|
||||||
if (i + 1 != argc) {
|
|
||||||
arg = atoi(argv[i + 1]);
|
|
||||||
i += 2;
|
|
||||||
} else {
|
|
||||||
usage();
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
if (matchArg(pageSize, "-p") || matchArg(largestClass, "-c") ||
|
|
||||||
matchArg(headerSize, "-h") || matchArg(numClasses, "-n") ||
|
|
||||||
matchArg(numBits, "-b"))
|
|
||||||
continue;
|
|
||||||
readAllocs(allocs, argv[i]);
|
|
||||||
++i;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (allocs.empty())
|
|
||||||
usage();
|
|
||||||
|
|
||||||
std::vector<size_t> classes;
|
|
||||||
classes.push_back(largestClass);
|
|
||||||
for (size_t i = 1; i != numClasses; ++i) {
|
|
||||||
size_t minWastage = -1ull;
|
|
||||||
size_t minWastageClass;
|
|
||||||
for (size_t newClass = 16; newClass != largestClass; newClass += 16) {
|
|
||||||
// Skip classes with more than numBits bits, ignoring leading or trailing
|
|
||||||
// zero bits.
|
|
||||||
if (__builtin_ctzl(newClass - headerSize) +
|
|
||||||
__builtin_clzl(newClass - headerSize) <
|
|
||||||
sizeof(long) * 8 - numBits)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
classes.push_back(newClass);
|
|
||||||
size_t newWastage = measureWastage(allocs, classes, pageSize, headerSize);
|
|
||||||
classes.pop_back();
|
|
||||||
if (newWastage < minWastage) {
|
|
||||||
minWastage = newWastage;
|
|
||||||
minWastageClass = newClass;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
classes.push_back(minWastageClass);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::sort(classes.begin(), classes.end());
|
|
||||||
size_t minSizeLog = log2Floor(headerSize);
|
|
||||||
size_t midSizeIndex = 0;
|
|
||||||
while (classes[midSizeIndex + 1] - classes[midSizeIndex] == (1 << minSizeLog))
|
|
||||||
midSizeIndex++;
|
|
||||||
size_t midSizeLog = log2Floor(classes[midSizeIndex] - headerSize);
|
|
||||||
size_t maxSizeLog = log2Floor(classes.back() - headerSize - 1) + 1;
|
|
||||||
|
|
||||||
printf(R"(// wastage = %zu
|
|
||||||
|
|
||||||
struct MySizeClassConfig {
|
|
||||||
static const uptr NumBits = %zu;
|
|
||||||
static const uptr MinSizeLog = %zu;
|
|
||||||
static const uptr MidSizeLog = %zu;
|
|
||||||
static const uptr MaxSizeLog = %zu;
|
|
||||||
static const u16 MaxNumCachedHint = 14;
|
|
||||||
static const uptr MaxBytesCachedLog = 14;
|
|
||||||
|
|
||||||
static constexpr u32 Classes[] = {)",
|
|
||||||
measureWastage(allocs, classes, pageSize, headerSize), numBits,
|
|
||||||
minSizeLog, midSizeLog, maxSizeLog);
|
|
||||||
for (size_t i = 0; i != classes.size(); ++i) {
|
|
||||||
if ((i % 8) == 0)
|
|
||||||
printf("\n ");
|
|
||||||
else
|
|
||||||
printf(" ");
|
|
||||||
printf("0x%05zx,", classes[i]);
|
|
||||||
}
|
|
||||||
printf(R"(
|
|
||||||
};
|
|
||||||
static const uptr SizeDelta = %zu;
|
|
||||||
};
|
|
||||||
)",
|
|
||||||
headerSize);
|
|
||||||
}
|
|
118
Telegram/ThirdParty/scudo/trusty.cpp
vendored
118
Telegram/ThirdParty/scudo/trusty.cpp
vendored
|
@ -1,118 +0,0 @@
|
||||||
//===-- trusty.cpp ---------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_TRUSTY
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "report_linux.h"
|
|
||||||
#include "trusty.h"
|
|
||||||
|
|
||||||
#include <errno.h> // for errno
|
|
||||||
#include <lk/err_ptr.h> // for PTR_ERR and IS_ERR
|
|
||||||
#include <stdio.h> // for printf()
|
|
||||||
#include <stdlib.h> // for getenv()
|
|
||||||
#include <sys/auxv.h> // for getauxval()
|
|
||||||
#include <time.h> // for clock_gettime()
|
|
||||||
#include <trusty_err.h> // for lk_err_to_errno()
|
|
||||||
#include <trusty_syscalls.h> // for _trusty_brk()
|
|
||||||
#include <uapi/mm.h> // for MMAP flags
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
uptr getPageSize() { return getauxval(AT_PAGESZ); }
|
|
||||||
|
|
||||||
void NORETURN die() { abort(); }
|
|
||||||
|
|
||||||
void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
|
|
||||||
UNUSED MapPlatformData *Data) {
|
|
||||||
uint32_t MmapFlags =
|
|
||||||
MMAP_FLAG_ANONYMOUS | MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE;
|
|
||||||
|
|
||||||
// If the MAP_NOACCESS flag is set, Scudo tries to reserve
|
|
||||||
// a memory region without mapping physical pages. This corresponds
|
|
||||||
// to MMAP_FLAG_NO_PHYSICAL in Trusty.
|
|
||||||
if (Flags & MAP_NOACCESS)
|
|
||||||
MmapFlags |= MMAP_FLAG_NO_PHYSICAL;
|
|
||||||
if (Addr)
|
|
||||||
MmapFlags |= MMAP_FLAG_FIXED_NOREPLACE;
|
|
||||||
|
|
||||||
if (Flags & MAP_MEMTAG)
|
|
||||||
MmapFlags |= MMAP_FLAG_PROT_MTE;
|
|
||||||
|
|
||||||
void *P = (void *)_trusty_mmap(Addr, Size, MmapFlags, 0);
|
|
||||||
|
|
||||||
if (IS_ERR(P)) {
|
|
||||||
errno = lk_err_to_errno(PTR_ERR(P));
|
|
||||||
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
|
|
||||||
reportMapError(Size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return P;
|
|
||||||
}
|
|
||||||
|
|
||||||
void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
|
|
||||||
UNUSED MapPlatformData *Data) {
|
|
||||||
if (_trusty_munmap(Addr, Size) != 0)
|
|
||||||
reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
|
|
||||||
UNUSED MapPlatformData *Data) {}
|
|
||||||
|
|
||||||
void releasePagesToOS(UNUSED uptr BaseAddress, UNUSED uptr Offset,
|
|
||||||
UNUSED uptr Size, UNUSED MapPlatformData *Data) {}
|
|
||||||
|
|
||||||
const char *getEnv(const char *Name) { return getenv(Name); }
|
|
||||||
|
|
||||||
// All mutex operations are a no-op since Trusty doesn't currently support
|
|
||||||
// threads.
|
|
||||||
bool HybridMutex::tryLock() { return true; }
|
|
||||||
|
|
||||||
void HybridMutex::lockSlow() {}
|
|
||||||
|
|
||||||
void HybridMutex::unlock() {}
|
|
||||||
|
|
||||||
void HybridMutex::assertHeldImpl() {}
|
|
||||||
|
|
||||||
u64 getMonotonicTime() {
|
|
||||||
timespec TS;
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &TS);
|
|
||||||
return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
|
|
||||||
static_cast<u64>(TS.tv_nsec);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 getMonotonicTimeFast() {
|
|
||||||
#if defined(CLOCK_MONOTONIC_COARSE)
|
|
||||||
timespec TS;
|
|
||||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
|
|
||||||
return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
|
|
||||||
static_cast<u64>(TS.tv_nsec);
|
|
||||||
#else
|
|
||||||
return getMonotonicTime();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 getNumberOfCPUs() { return 0; }
|
|
||||||
|
|
||||||
u32 getThreadID() { return 0; }
|
|
||||||
|
|
||||||
bool getRandom(UNUSED void *Buffer, UNUSED uptr Length, UNUSED bool Blocking) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void outputRaw(const char *Buffer) { printf("%s", Buffer); }
|
|
||||||
|
|
||||||
void setAbortMessage(UNUSED const char *Message) {}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_TRUSTY
|
|
24
Telegram/ThirdParty/scudo/trusty.h
vendored
24
Telegram/ThirdParty/scudo/trusty.h
vendored
|
@ -1,24 +0,0 @@
|
||||||
//===-- trusty.h -----------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_TRUSTY_H_
|
|
||||||
#define SCUDO_TRUSTY_H_
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
#if SCUDO_TRUSTY
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
// MapPlatformData is unused on Trusty, define it as a minimially sized
|
|
||||||
// structure.
|
|
||||||
struct MapPlatformData {};
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_TRUSTY
|
|
||||||
|
|
||||||
#endif // SCUDO_TRUSTY_H_
|
|
90
Telegram/ThirdParty/scudo/tsd.h
vendored
90
Telegram/ThirdParty/scudo/tsd.h
vendored
|
@ -1,90 +0,0 @@
|
||||||
//===-- tsd.h ---------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_TSD_H_
|
|
||||||
#define SCUDO_TSD_H_
|
|
||||||
|
|
||||||
#include "atomic_helpers.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "mutex.h"
|
|
||||||
#include "thread_annotations.h"
|
|
||||||
|
|
||||||
#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
|
|
||||||
#include <pthread.h>
|
|
||||||
|
|
||||||
// With some build setups, this might still not be defined.
|
|
||||||
#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
|
|
||||||
#define PTHREAD_DESTRUCTOR_ITERATIONS 4
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
|
|
||||||
using ThisT = TSD<Allocator>;
|
|
||||||
u8 DestructorIterations = 0;
|
|
||||||
|
|
||||||
void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
DCHECK_EQ(DestructorIterations, 0U);
|
|
||||||
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
|
|
||||||
Instance->initCache(&Cache);
|
|
||||||
DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
if (Mutex.tryLock()) {
|
|
||||||
atomic_store_relaxed(&Precedence, 0);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (atomic_load_relaxed(&Precedence) == 0)
|
|
||||||
atomic_store_relaxed(&Precedence,
|
|
||||||
static_cast<uptr>(getMonotonicTimeFast() >>
|
|
||||||
FIRST_32_SECOND_64(16, 0)));
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
inline void lock() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
atomic_store_relaxed(&Precedence, 0);
|
|
||||||
Mutex.lock();
|
|
||||||
}
|
|
||||||
inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
|
|
||||||
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
|
|
||||||
|
|
||||||
void commitBack(Allocator *Instance) { Instance->commitBack(this); }
|
|
||||||
|
|
||||||
// As the comments attached to `getCache()`, the TSD doesn't always need to be
|
|
||||||
// locked. In that case, we would only skip the check before we have all TSDs
|
|
||||||
// locked in all paths.
|
|
||||||
void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
|
|
||||||
if (SCUDO_DEBUG && !BypassCheck)
|
|
||||||
Mutex.assertHeld();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ideally, we may want to assert that all the operations on
|
|
||||||
// Cache/QuarantineCache always have the `Mutex` acquired. However, the
|
|
||||||
// current architecture of accessing TSD is not easy to cooperate with the
|
|
||||||
// thread-safety analysis because of pointer aliasing. So now we just add the
|
|
||||||
// assertion on the getters of Cache/QuarantineCache.
|
|
||||||
//
|
|
||||||
// TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
|
|
||||||
// TSD doesn't always require holding the lock. Add this assertion while the
|
|
||||||
// lock is always acquired.
|
|
||||||
typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
|
|
||||||
typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
|
|
||||||
return QuarantineCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
HybridMutex Mutex;
|
|
||||||
atomic_uptr Precedence = {};
|
|
||||||
|
|
||||||
typename Allocator::CacheT Cache GUARDED_BY(Mutex);
|
|
||||||
typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_TSD_H_
|
|
178
Telegram/ThirdParty/scudo/tsd_exclusive.h
vendored
178
Telegram/ThirdParty/scudo/tsd_exclusive.h
vendored
|
@ -1,178 +0,0 @@
|
||||||
//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_TSD_EXCLUSIVE_H_
|
|
||||||
#define SCUDO_TSD_EXCLUSIVE_H_
|
|
||||||
|
|
||||||
#include "tsd.h"
|
|
||||||
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
struct ThreadState {
|
|
||||||
bool DisableMemInit : 1;
|
|
||||||
enum : unsigned {
|
|
||||||
NotInitialized = 0,
|
|
||||||
Initialized,
|
|
||||||
TornDown,
|
|
||||||
} InitState : 2;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class Allocator> void teardownThread(void *Ptr);
|
|
||||||
|
|
||||||
template <class Allocator> struct TSDRegistryExT {
|
|
||||||
void init(Allocator *Instance) REQUIRES(Mutex) {
|
|
||||||
DCHECK(!Initialized);
|
|
||||||
Instance->init();
|
|
||||||
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
|
|
||||||
FallbackTSD.init(Instance);
|
|
||||||
Initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
if (LIKELY(Initialized))
|
|
||||||
return;
|
|
||||||
init(Instance); // Sets Initialized.
|
|
||||||
}
|
|
||||||
|
|
||||||
void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
|
|
||||||
DCHECK(Instance);
|
|
||||||
if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
|
|
||||||
DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
|
|
||||||
Instance);
|
|
||||||
ThreadTSD.commitBack(Instance);
|
|
||||||
ThreadTSD = {};
|
|
||||||
}
|
|
||||||
CHECK_EQ(pthread_key_delete(PThreadKey), 0);
|
|
||||||
PThreadKey = {};
|
|
||||||
FallbackTSD.commitBack(Instance);
|
|
||||||
FallbackTSD = {};
|
|
||||||
State = {};
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
Initialized = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void drainCaches(Allocator *Instance) {
|
|
||||||
// We don't have a way to iterate all thread local `ThreadTSD`s. Simply
|
|
||||||
// drain the `ThreadTSD` of current thread and `FallbackTSD`.
|
|
||||||
Instance->drainCache(&ThreadTSD);
|
|
||||||
FallbackTSD.lock();
|
|
||||||
Instance->drainCache(&FallbackTSD);
|
|
||||||
FallbackTSD.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
|
|
||||||
if (LIKELY(State.InitState != ThreadState::NotInitialized))
|
|
||||||
return;
|
|
||||||
initThread(Instance, MinimalInit);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
|
|
||||||
// embedding the logic into TSD or always locking the TSD. It will enable us
|
|
||||||
// to properly mark thread annotation here and adding proper runtime
|
|
||||||
// assertions in the member functions of TSD. For example, assert the lock is
|
|
||||||
// acquired before calling TSD::commitBack().
|
|
||||||
ALWAYS_INLINE TSD<Allocator> *
|
|
||||||
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
if (LIKELY(State.InitState == ThreadState::Initialized &&
|
|
||||||
!atomic_load(&Disabled, memory_order_acquire))) {
|
|
||||||
*UnlockRequired = false;
|
|
||||||
return &ThreadTSD;
|
|
||||||
}
|
|
||||||
FallbackTSD.lock();
|
|
||||||
*UnlockRequired = true;
|
|
||||||
return &FallbackTSD;
|
|
||||||
}
|
|
||||||
|
|
||||||
// To disable the exclusive TSD registry, we effectively lock the fallback TSD
|
|
||||||
// and force all threads to attempt to use it instead of their local one.
|
|
||||||
void disable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
Mutex.lock();
|
|
||||||
FallbackTSD.lock();
|
|
||||||
atomic_store(&Disabled, 1U, memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
void enable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
atomic_store(&Disabled, 0U, memory_order_release);
|
|
||||||
FallbackTSD.unlock();
|
|
||||||
Mutex.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool setOption(Option O, sptr Value) {
|
|
||||||
if (O == Option::ThreadDisableMemInit)
|
|
||||||
State.DisableMemInit = Value;
|
|
||||||
if (O == Option::MaxTSDsCount)
|
|
||||||
return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool getDisableMemInit() { return State.DisableMemInit; }
|
|
||||||
|
|
||||||
void getStats(ScopedString *Str) {
|
|
||||||
// We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
|
|
||||||
// printing only self `ThreadTSD` which may mislead the usage, we just skip
|
|
||||||
// it.
|
|
||||||
Str->append("Exclusive TSD don't support iterating each TSD\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
// Using minimal initialization allows for global initialization while keeping
|
|
||||||
// the thread specific structure untouched. The fallback structure will be
|
|
||||||
// used instead.
|
|
||||||
NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
|
|
||||||
initOnceMaybe(Instance);
|
|
||||||
if (UNLIKELY(MinimalInit))
|
|
||||||
return;
|
|
||||||
CHECK_EQ(
|
|
||||||
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
|
|
||||||
ThreadTSD.init(Instance);
|
|
||||||
State.InitState = ThreadState::Initialized;
|
|
||||||
Instance->callPostInitCallback();
|
|
||||||
}
|
|
||||||
|
|
||||||
pthread_key_t PThreadKey = {};
|
|
||||||
bool Initialized GUARDED_BY(Mutex) = false;
|
|
||||||
atomic_u8 Disabled = {};
|
|
||||||
TSD<Allocator> FallbackTSD;
|
|
||||||
HybridMutex Mutex;
|
|
||||||
static thread_local ThreadState State;
|
|
||||||
static thread_local TSD<Allocator> ThreadTSD;
|
|
||||||
|
|
||||||
friend void teardownThread<Allocator>(void *Ptr);
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class Allocator>
|
|
||||||
thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
|
|
||||||
template <class Allocator>
|
|
||||||
thread_local ThreadState TSDRegistryExT<Allocator>::State;
|
|
||||||
|
|
||||||
template <class Allocator>
|
|
||||||
void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
typedef TSDRegistryExT<Allocator> TSDRegistryT;
|
|
||||||
Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
|
|
||||||
// The glibc POSIX thread-local-storage deallocation routine calls user
|
|
||||||
// provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
|
|
||||||
// We want to be called last since other destructors might call free and the
|
|
||||||
// like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
|
|
||||||
// quarantine and swallowing the cache.
|
|
||||||
if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
|
|
||||||
TSDRegistryT::ThreadTSD.DestructorIterations--;
|
|
||||||
// If pthread_setspecific fails, we will go ahead with the teardown.
|
|
||||||
if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
|
|
||||||
Ptr) == 0))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
TSDRegistryT::ThreadTSD.commitBack(Instance);
|
|
||||||
TSDRegistryT::State.InitState = ThreadState::TornDown;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_TSD_EXCLUSIVE_H_
|
|
252
Telegram/ThirdParty/scudo/tsd_shared.h
vendored
252
Telegram/ThirdParty/scudo/tsd_shared.h
vendored
|
@ -1,252 +0,0 @@
|
||||||
//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_TSD_SHARED_H_
|
|
||||||
#define SCUDO_TSD_SHARED_H_
|
|
||||||
|
|
||||||
#include "tsd.h"
|
|
||||||
|
|
||||||
#include "string_utils.h"
|
|
||||||
|
|
||||||
#if SCUDO_HAS_PLATFORM_TLS_SLOT
|
|
||||||
// This is a platform-provided header that needs to be on the include path when
|
|
||||||
// Scudo is compiled. It must declare a function with the prototype:
|
|
||||||
// uintptr_t *getPlatformAllocatorTlsSlot()
|
|
||||||
// that returns the address of a thread-local word of storage reserved for
|
|
||||||
// Scudo, that must be zero-initialized in newly created threads.
|
|
||||||
#include "scudo_platform_tls_slot.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
|
|
||||||
struct TSDRegistrySharedT {
|
|
||||||
void init(Allocator *Instance) REQUIRES(Mutex) {
|
|
||||||
DCHECK(!Initialized);
|
|
||||||
Instance->init();
|
|
||||||
for (u32 I = 0; I < TSDsArraySize; I++)
|
|
||||||
TSDs[I].init(Instance);
|
|
||||||
const u32 NumberOfCPUs = getNumberOfCPUs();
|
|
||||||
setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount
|
|
||||||
: Min(NumberOfCPUs, DefaultTSDCount));
|
|
||||||
Initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
if (LIKELY(Initialized))
|
|
||||||
return;
|
|
||||||
init(Instance); // Sets Initialized.
|
|
||||||
}
|
|
||||||
|
|
||||||
void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
|
|
||||||
for (u32 I = 0; I < TSDsArraySize; I++) {
|
|
||||||
TSDs[I].commitBack(Instance);
|
|
||||||
TSDs[I] = {};
|
|
||||||
}
|
|
||||||
setCurrentTSD(nullptr);
|
|
||||||
ScopedLock L(Mutex);
|
|
||||||
Initialized = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void drainCaches(Allocator *Instance) {
|
|
||||||
ScopedLock L(MutexTSDs);
|
|
||||||
for (uptr I = 0; I < NumberOfTSDs; ++I) {
|
|
||||||
TSDs[I].lock();
|
|
||||||
Instance->drainCache(&TSDs[I]);
|
|
||||||
TSDs[I].unlock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
|
|
||||||
UNUSED bool MinimalInit) {
|
|
||||||
if (LIKELY(getCurrentTSD()))
|
|
||||||
return;
|
|
||||||
initThread(Instance);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TSDs is an array of locks and which is not supported for marking
|
|
||||||
// thread-safety capability.
|
|
||||||
ALWAYS_INLINE TSD<Allocator> *
|
|
||||||
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
TSD<Allocator> *TSD = getCurrentTSD();
|
|
||||||
DCHECK(TSD);
|
|
||||||
*UnlockRequired = true;
|
|
||||||
// Try to lock the currently associated context.
|
|
||||||
if (TSD->tryLock())
|
|
||||||
return TSD;
|
|
||||||
// If that fails, go down the slow path.
|
|
||||||
if (TSDsArraySize == 1U) {
|
|
||||||
// Only 1 TSD, not need to go any further.
|
|
||||||
// The compiler will optimize this one way or the other.
|
|
||||||
TSD->lock();
|
|
||||||
return TSD;
|
|
||||||
}
|
|
||||||
return getTSDAndLockSlow(TSD);
|
|
||||||
}
|
|
||||||
|
|
||||||
void disable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
Mutex.lock();
|
|
||||||
for (u32 I = 0; I < TSDsArraySize; I++)
|
|
||||||
TSDs[I].lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void enable() NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
|
|
||||||
TSDs[I].unlock();
|
|
||||||
Mutex.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool setOption(Option O, sptr Value) {
|
|
||||||
if (O == Option::MaxTSDsCount)
|
|
||||||
return setNumberOfTSDs(static_cast<u32>(Value));
|
|
||||||
if (O == Option::ThreadDisableMemInit)
|
|
||||||
setDisableMemInit(Value);
|
|
||||||
// Not supported by the TSD Registry, but not an error either.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool getDisableMemInit() const { return *getTlsPtr() & 1; }
|
|
||||||
|
|
||||||
void getStats(ScopedString *Str) EXCLUDES(MutexTSDs) {
|
|
||||||
ScopedLock L(MutexTSDs);
|
|
||||||
|
|
||||||
Str->append("Stats: SharedTSDs: %u available; total %u\n", NumberOfTSDs,
|
|
||||||
TSDsArraySize);
|
|
||||||
for (uptr I = 0; I < NumberOfTSDs; ++I) {
|
|
||||||
TSDs[I].lock();
|
|
||||||
// Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
|
|
||||||
// thread annotations. However, given the TSD is only locked on shared
|
|
||||||
// path, do the assertion in a separate path to avoid confusing the
|
|
||||||
// analyzer.
|
|
||||||
TSDs[I].assertLocked(/*BypassCheck=*/true);
|
|
||||||
Str->append(" Shared TSD[%zu]:\n", I);
|
|
||||||
TSDs[I].getCache().getStats(Str);
|
|
||||||
TSDs[I].unlock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
ALWAYS_INLINE uptr *getTlsPtr() const {
|
|
||||||
#if SCUDO_HAS_PLATFORM_TLS_SLOT
|
|
||||||
return reinterpret_cast<uptr *>(getPlatformAllocatorTlsSlot());
|
|
||||||
#else
|
|
||||||
static thread_local uptr ThreadTSD;
|
|
||||||
return &ThreadTSD;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert(alignof(TSD<Allocator>) >= 2, "");
|
|
||||||
|
|
||||||
ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
|
|
||||||
*getTlsPtr() &= 1;
|
|
||||||
*getTlsPtr() |= reinterpret_cast<uptr>(CurrentTSD);
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
|
|
||||||
return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool setNumberOfTSDs(u32 N) EXCLUDES(MutexTSDs) {
|
|
||||||
ScopedLock L(MutexTSDs);
|
|
||||||
if (N < NumberOfTSDs)
|
|
||||||
return false;
|
|
||||||
if (N > TSDsArraySize)
|
|
||||||
N = TSDsArraySize;
|
|
||||||
NumberOfTSDs = N;
|
|
||||||
NumberOfCoPrimes = 0;
|
|
||||||
// Compute all the coprimes of NumberOfTSDs. This will be used to walk the
|
|
||||||
// array of TSDs in a random order. For details, see:
|
|
||||||
// https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
|
|
||||||
for (u32 I = 0; I < N; I++) {
|
|
||||||
u32 A = I + 1;
|
|
||||||
u32 B = N;
|
|
||||||
// Find the GCD between I + 1 and N. If 1, they are coprimes.
|
|
||||||
while (B != 0) {
|
|
||||||
const u32 T = A;
|
|
||||||
A = B;
|
|
||||||
B = T % B;
|
|
||||||
}
|
|
||||||
if (A == 1)
|
|
||||||
CoPrimes[NumberOfCoPrimes++] = I + 1;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void setDisableMemInit(bool B) {
|
|
||||||
*getTlsPtr() &= ~1ULL;
|
|
||||||
*getTlsPtr() |= B;
|
|
||||||
}
|
|
||||||
|
|
||||||
NOINLINE void initThread(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
|
|
||||||
initOnceMaybe(Instance);
|
|
||||||
// Initial context assignment is done in a plain round-robin fashion.
|
|
||||||
const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
|
|
||||||
setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
|
|
||||||
Instance->callPostInitCallback();
|
|
||||||
}
|
|
||||||
|
|
||||||
// TSDs is an array of locks which is not supported for marking thread-safety
|
|
||||||
// capability.
|
|
||||||
NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD)
|
|
||||||
EXCLUDES(MutexTSDs) {
|
|
||||||
// Use the Precedence of the current TSD as our random seed. Since we are
|
|
||||||
// in the slow path, it means that tryLock failed, and as a result it's
|
|
||||||
// very likely that said Precedence is non-zero.
|
|
||||||
const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
|
|
||||||
u32 N, Inc;
|
|
||||||
{
|
|
||||||
ScopedLock L(MutexTSDs);
|
|
||||||
N = NumberOfTSDs;
|
|
||||||
DCHECK_NE(NumberOfCoPrimes, 0U);
|
|
||||||
Inc = CoPrimes[R % NumberOfCoPrimes];
|
|
||||||
}
|
|
||||||
if (N > 1U) {
|
|
||||||
u32 Index = R % N;
|
|
||||||
uptr LowestPrecedence = UINTPTR_MAX;
|
|
||||||
TSD<Allocator> *CandidateTSD = nullptr;
|
|
||||||
// Go randomly through at most 4 contexts and find a candidate.
|
|
||||||
for (u32 I = 0; I < Min(4U, N); I++) {
|
|
||||||
if (TSDs[Index].tryLock()) {
|
|
||||||
setCurrentTSD(&TSDs[Index]);
|
|
||||||
return &TSDs[Index];
|
|
||||||
}
|
|
||||||
const uptr Precedence = TSDs[Index].getPrecedence();
|
|
||||||
// A 0 precedence here means another thread just locked this TSD.
|
|
||||||
if (Precedence && Precedence < LowestPrecedence) {
|
|
||||||
CandidateTSD = &TSDs[Index];
|
|
||||||
LowestPrecedence = Precedence;
|
|
||||||
}
|
|
||||||
Index += Inc;
|
|
||||||
if (Index >= N)
|
|
||||||
Index -= N;
|
|
||||||
}
|
|
||||||
if (CandidateTSD) {
|
|
||||||
CandidateTSD->lock();
|
|
||||||
setCurrentTSD(CandidateTSD);
|
|
||||||
return CandidateTSD;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Last resort, stick with the current one.
|
|
||||||
CurrentTSD->lock();
|
|
||||||
return CurrentTSD;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_u32 CurrentIndex = {};
|
|
||||||
u32 NumberOfTSDs GUARDED_BY(MutexTSDs) = 0;
|
|
||||||
u32 NumberOfCoPrimes GUARDED_BY(MutexTSDs) = 0;
|
|
||||||
u32 CoPrimes[TSDsArraySize] GUARDED_BY(MutexTSDs) = {};
|
|
||||||
bool Initialized GUARDED_BY(Mutex) = false;
|
|
||||||
HybridMutex Mutex;
|
|
||||||
HybridMutex MutexTSDs;
|
|
||||||
TSD<Allocator> TSDs[TSDsArraySize];
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_TSD_SHARED_H_
|
|
131
Telegram/ThirdParty/scudo/vector.h
vendored
131
Telegram/ThirdParty/scudo/vector.h
vendored
|
@ -1,131 +0,0 @@
|
||||||
//===-- vector.h ------------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#ifndef SCUDO_VECTOR_H_
|
|
||||||
#define SCUDO_VECTOR_H_
|
|
||||||
|
|
||||||
#include "mem_map.h"
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
namespace scudo {
|
|
||||||
|
|
||||||
// A low-level vector based on map. It stores the contents inline up to a fixed
|
|
||||||
// capacity, or in an external memory buffer if it grows bigger than that. May
|
|
||||||
// incur a significant memory overhead for small vectors. The current
|
|
||||||
// implementation supports only POD types.
|
|
||||||
//
|
|
||||||
// NOTE: This class is not meant to be used directly, use Vector<T> instead.
|
|
||||||
template <typename T> class VectorNoCtor {
|
|
||||||
public:
|
|
||||||
T &operator[](uptr I) {
|
|
||||||
DCHECK_LT(I, Size);
|
|
||||||
return Data[I];
|
|
||||||
}
|
|
||||||
const T &operator[](uptr I) const {
|
|
||||||
DCHECK_LT(I, Size);
|
|
||||||
return Data[I];
|
|
||||||
}
|
|
||||||
void push_back(const T &Element) {
|
|
||||||
DCHECK_LE(Size, capacity());
|
|
||||||
if (Size == capacity()) {
|
|
||||||
const uptr NewCapacity = roundUpPowerOfTwo(Size + 1);
|
|
||||||
reallocate(NewCapacity);
|
|
||||||
}
|
|
||||||
memcpy(&Data[Size++], &Element, sizeof(T));
|
|
||||||
}
|
|
||||||
T &back() {
|
|
||||||
DCHECK_GT(Size, 0);
|
|
||||||
return Data[Size - 1];
|
|
||||||
}
|
|
||||||
void pop_back() {
|
|
||||||
DCHECK_GT(Size, 0);
|
|
||||||
Size--;
|
|
||||||
}
|
|
||||||
uptr size() const { return Size; }
|
|
||||||
const T *data() const { return Data; }
|
|
||||||
T *data() { return Data; }
|
|
||||||
constexpr uptr capacity() const { return CapacityBytes / sizeof(T); }
|
|
||||||
void reserve(uptr NewSize) {
|
|
||||||
// Never downsize internal buffer.
|
|
||||||
if (NewSize > capacity())
|
|
||||||
reallocate(NewSize);
|
|
||||||
}
|
|
||||||
void resize(uptr NewSize) {
|
|
||||||
if (NewSize > Size) {
|
|
||||||
reserve(NewSize);
|
|
||||||
memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
|
|
||||||
}
|
|
||||||
Size = NewSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear() { Size = 0; }
|
|
||||||
bool empty() const { return size() == 0; }
|
|
||||||
|
|
||||||
const T *begin() const { return data(); }
|
|
||||||
T *begin() { return data(); }
|
|
||||||
const T *end() const { return data() + size(); }
|
|
||||||
T *end() { return data() + size(); }
|
|
||||||
|
|
||||||
protected:
|
|
||||||
constexpr void init(uptr InitialCapacity = 0) {
|
|
||||||
Data = &LocalData[0];
|
|
||||||
CapacityBytes = sizeof(LocalData);
|
|
||||||
if (InitialCapacity > capacity())
|
|
||||||
reserve(InitialCapacity);
|
|
||||||
}
|
|
||||||
void destroy() {
|
|
||||||
if (Data != &LocalData[0])
|
|
||||||
ExternalBuffer.unmap(ExternalBuffer.getBase(),
|
|
||||||
ExternalBuffer.getCapacity());
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
void reallocate(uptr NewCapacity) {
|
|
||||||
DCHECK_GT(NewCapacity, 0);
|
|
||||||
DCHECK_LE(Size, NewCapacity);
|
|
||||||
|
|
||||||
MemMapT NewExternalBuffer;
|
|
||||||
NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
|
|
||||||
NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector");
|
|
||||||
T *NewExternalData = reinterpret_cast<T *>(NewExternalBuffer.getBase());
|
|
||||||
|
|
||||||
memcpy(NewExternalData, Data, Size * sizeof(T));
|
|
||||||
destroy();
|
|
||||||
|
|
||||||
Data = NewExternalData;
|
|
||||||
CapacityBytes = NewCapacity;
|
|
||||||
ExternalBuffer = NewExternalBuffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
T *Data = nullptr;
|
|
||||||
uptr CapacityBytes = 0;
|
|
||||||
uptr Size = 0;
|
|
||||||
|
|
||||||
T LocalData[256 / sizeof(T)] = {};
|
|
||||||
MemMapT ExternalBuffer;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T> class Vector : public VectorNoCtor<T> {
|
|
||||||
public:
|
|
||||||
constexpr Vector() { VectorNoCtor<T>::init(); }
|
|
||||||
explicit Vector(uptr Count) {
|
|
||||||
VectorNoCtor<T>::init(Count);
|
|
||||||
this->resize(Count);
|
|
||||||
}
|
|
||||||
~Vector() { VectorNoCtor<T>::destroy(); }
|
|
||||||
// Disallow copies and moves.
|
|
||||||
Vector(const Vector &) = delete;
|
|
||||||
Vector &operator=(const Vector &) = delete;
|
|
||||||
Vector(Vector &&) = delete;
|
|
||||||
Vector &operator=(Vector &&) = delete;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace scudo
|
|
||||||
|
|
||||||
#endif // SCUDO_VECTOR_H_
|
|
40
Telegram/ThirdParty/scudo/wrappers_c.cpp
vendored
40
Telegram/ThirdParty/scudo/wrappers_c.cpp
vendored
|
@ -1,40 +0,0 @@
|
||||||
//===-- wrappers_c.cpp ------------------------------------------*- C++ -*-===//
|
|
||||||
//
|
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "platform.h"
|
|
||||||
|
|
||||||
// Skip this compilation unit if compiled as part of Bionic.
|
|
||||||
#if !SCUDO_ANDROID || !_BIONIC
|
|
||||||
|
|
||||||
#include "allocator_config.h"
|
|
||||||
#include "internal_defs.h"
|
|
||||||
#include "platform.h"
|
|
||||||
#include "scudo/interface.h"
|
|
||||||
#include "wrappers_c.h"
|
|
||||||
#include "wrappers_c_checks.h"
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#define SCUDO_PREFIX(name) name
|
|
||||||
#define SCUDO_ALLOCATOR Allocator
|
|
||||||
|
|
||||||
// Export the static allocator so that the C++ wrappers can access it.
|
|
||||||
// Technically we could have a completely separated heap for C & C++ but in
|
|
||||||
// reality the amount of cross pollination between the two is staggering.
|
|
||||||
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
|
|
||||||
scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR;
|
|
||||||
|
|
||||||
#include "wrappers_c.inc"
|
|
||||||
|
|
||||||
#undef SCUDO_ALLOCATOR
|
|
||||||
#undef SCUDO_PREFIX
|
|
||||||
|
|
||||||
extern "C" INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
|
|
||||||
|
|
||||||
#endif // !SCUDO_ANDROID || !_BIONIC
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue