mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-04-15 13:47:05 +02:00
Update scudo to 18.1.1
This commit is contained in:
parent
07ab875655
commit
846a6d8717
65 changed files with 2424 additions and 1395 deletions
13
Telegram/ThirdParty/scudo/CMakeLists.txt
vendored
13
Telegram/ThirdParty/scudo/CMakeLists.txt
vendored
|
@ -25,7 +25,7 @@ append_list_if(COMPILER_RT_HAS_WNO_PEDANTIC -Wno-pedantic SCUDO_CFLAGS)
|
|||
append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SCUDO_CFLAGS)
|
||||
|
||||
if(COMPILER_RT_DEBUG)
|
||||
list(APPEND SCUDO_CFLAGS -O0 -DSCUDO_DEBUG=1)
|
||||
list(APPEND SCUDO_CFLAGS -O0 -DSCUDO_DEBUG=1 -DSCUDO_ENABLE_HOOKS=1)
|
||||
else()
|
||||
list(APPEND SCUDO_CFLAGS -O3)
|
||||
endif()
|
||||
|
@ -56,11 +56,15 @@ if(ANDROID)
|
|||
endif()
|
||||
|
||||
set(SCUDO_HEADERS
|
||||
allocator_common.h
|
||||
allocator_config.h
|
||||
atomic_helpers.h
|
||||
bytemap.h
|
||||
checksum.h
|
||||
chunk.h
|
||||
condition_variable.h
|
||||
condition_variable_base.h
|
||||
condition_variable_linux.h
|
||||
combined.h
|
||||
common.h
|
||||
flags_parser.h
|
||||
|
@ -74,6 +78,7 @@ set(SCUDO_HEADERS
|
|||
mem_map.h
|
||||
mem_map_base.h
|
||||
mem_map_fuchsia.h
|
||||
mem_map_linux.h
|
||||
mutex.h
|
||||
options.h
|
||||
platform.h
|
||||
|
@ -82,7 +87,7 @@ set(SCUDO_HEADERS
|
|||
quarantine.h
|
||||
release.h
|
||||
report.h
|
||||
rss_limit_checker.h
|
||||
report_linux.h
|
||||
secondary.h
|
||||
size_class_map.h
|
||||
stack_depot.h
|
||||
|
@ -102,6 +107,7 @@ set(SCUDO_HEADERS
|
|||
set(SCUDO_SOURCES
|
||||
checksum.cpp
|
||||
common.cpp
|
||||
condition_variable_linux.cpp
|
||||
crc32_hw.cpp
|
||||
flags_parser.cpp
|
||||
flags.cpp
|
||||
|
@ -109,9 +115,10 @@ set(SCUDO_SOURCES
|
|||
linux.cpp
|
||||
mem_map.cpp
|
||||
mem_map_fuchsia.cpp
|
||||
mem_map_linux.cpp
|
||||
release.cpp
|
||||
report.cpp
|
||||
rss_limit_checker.cpp
|
||||
report_linux.cpp
|
||||
string_utils.cpp
|
||||
timing.cpp
|
||||
)
|
||||
|
|
85
Telegram/ThirdParty/scudo/allocator_common.h
vendored
Normal file
85
Telegram/ThirdParty/scudo/allocator_common.h
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
//===-- allocator_common.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SCUDO_ALLOCATOR_COMMON_H_
|
||||
#define SCUDO_ALLOCATOR_COMMON_H_
|
||||
|
||||
#include "common.h"
|
||||
#include "list.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
template <class SizeClassAllocator> struct TransferBatch {
|
||||
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
|
||||
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
|
||||
|
||||
static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
|
||||
void setFromArray(CompactPtrT *Array, u16 N) {
|
||||
DCHECK_LE(N, MaxNumCached);
|
||||
Count = N;
|
||||
memcpy(Batch, Array, sizeof(Batch[0]) * Count);
|
||||
}
|
||||
void appendFromArray(CompactPtrT *Array, u16 N) {
|
||||
DCHECK_LE(N, MaxNumCached - Count);
|
||||
memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
|
||||
// u16 will be promoted to int by arithmetic type conversion.
|
||||
Count = static_cast<u16>(Count + N);
|
||||
}
|
||||
void appendFromTransferBatch(TransferBatch *B, u16 N) {
|
||||
DCHECK_LE(N, MaxNumCached - Count);
|
||||
DCHECK_GE(B->Count, N);
|
||||
// Append from the back of `B`.
|
||||
memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
|
||||
// u16 will be promoted to int by arithmetic type conversion.
|
||||
Count = static_cast<u16>(Count + N);
|
||||
B->Count = static_cast<u16>(B->Count - N);
|
||||
}
|
||||
void clear() { Count = 0; }
|
||||
void add(CompactPtrT P) {
|
||||
DCHECK_LT(Count, MaxNumCached);
|
||||
Batch[Count++] = P;
|
||||
}
|
||||
void moveToArray(CompactPtrT *Array) {
|
||||
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
|
||||
clear();
|
||||
}
|
||||
u16 getCount() const { return Count; }
|
||||
bool isEmpty() const { return Count == 0U; }
|
||||
CompactPtrT get(u16 I) const {
|
||||
DCHECK_LE(I, Count);
|
||||
return Batch[I];
|
||||
}
|
||||
TransferBatch *Next;
|
||||
|
||||
private:
|
||||
CompactPtrT Batch[MaxNumCached];
|
||||
u16 Count;
|
||||
};
|
||||
|
||||
// A BatchGroup is used to collect blocks. Each group has a group id to
|
||||
// identify the group kind of contained blocks.
|
||||
template <class SizeClassAllocator> struct BatchGroup {
|
||||
// `Next` is used by IntrusiveList.
|
||||
BatchGroup *Next;
|
||||
// The compact base address of each group
|
||||
uptr CompactPtrGroupBase;
|
||||
// Cache value of SizeClassAllocatorLocalCache::getMaxCached()
|
||||
u16 MaxCachedPerBatch;
|
||||
// Number of blocks pushed into this group. This is an increment-only
|
||||
// counter.
|
||||
uptr PushedBlocks;
|
||||
// This is used to track how many bytes are not in-use since last time we
|
||||
// tried to release pages.
|
||||
uptr BytesInBGAtLastCheckpoint;
|
||||
// Blocks are managed by TransferBatch in a list.
|
||||
SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
|
||||
};
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_ALLOCATOR_COMMON_H_
|
53
Telegram/ThirdParty/scudo/allocator_config.h
vendored
53
Telegram/ThirdParty/scudo/allocator_config.h
vendored
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include "combined.h"
|
||||
#include "common.h"
|
||||
#include "condition_variable.h"
|
||||
#include "flags.h"
|
||||
#include "primary32.h"
|
||||
#include "primary64.h"
|
||||
|
@ -82,6 +83,14 @@ namespace scudo {
|
|||
// // Defines the minimal & maximal release interval that can be set.
|
||||
// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
||||
// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
||||
//
|
||||
// // Use condition variable to shorten the waiting time of refillment of
|
||||
// // freelist. Note that this depends on the implementation of condition
|
||||
// // variable on each platform and the performance may vary so that it
|
||||
// // doesn't guarantee a performance benefit.
|
||||
// // Note that both variables have to be defined to enable it.
|
||||
// static const bool UseConditionVariable = true;
|
||||
// using ConditionVariableT = ConditionVariableLinux;
|
||||
// };
|
||||
// // Defines the type of Primary allocator to use.
|
||||
// template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
|
||||
|
@ -195,50 +204,6 @@ struct AndroidConfig {
|
|||
template <typename Config> using SecondaryT = MapAllocator<Config>;
|
||||
};
|
||||
|
||||
struct AndroidSvelteConfig {
|
||||
static const bool MaySupportMemoryTagging = false;
|
||||
template <class A>
|
||||
using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
|
||||
|
||||
struct Primary {
|
||||
using SizeClassMap = SvelteSizeClassMap;
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
static const uptr RegionSizeLog = 27U;
|
||||
typedef u32 CompactPtrT;
|
||||
static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
||||
static const uptr GroupSizeLog = 18U;
|
||||
static const bool EnableRandomOffset = true;
|
||||
static const uptr MapSizeIncrement = 1UL << 18;
|
||||
#else
|
||||
static const uptr RegionSizeLog = 16U;
|
||||
static const uptr GroupSizeLog = 16U;
|
||||
typedef uptr CompactPtrT;
|
||||
#endif
|
||||
static const s32 MinReleaseToOsIntervalMs = 1000;
|
||||
static const s32 MaxReleaseToOsIntervalMs = 1000;
|
||||
};
|
||||
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
|
||||
#else
|
||||
template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
|
||||
#endif
|
||||
|
||||
struct Secondary {
|
||||
struct Cache {
|
||||
static const u32 EntriesArraySize = 16U;
|
||||
static const u32 QuarantineSize = 32U;
|
||||
static const u32 DefaultMaxEntriesCount = 4U;
|
||||
static const uptr DefaultMaxEntrySize = 1UL << 18;
|
||||
static const s32 MinReleaseToOsIntervalMs = 0;
|
||||
static const s32 MaxReleaseToOsIntervalMs = 0;
|
||||
};
|
||||
template <typename Config> using CacheT = MapAllocatorCache<Config>;
|
||||
};
|
||||
|
||||
template <typename Config> using SecondaryT = MapAllocator<Config>;
|
||||
};
|
||||
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
struct FuchsiaConfig {
|
||||
static const bool MaySupportMemoryTagging = false;
|
||||
|
|
8
Telegram/ThirdParty/scudo/atomic_helpers.h
vendored
8
Telegram/ThirdParty/scudo/atomic_helpers.h
vendored
|
@ -133,10 +133,10 @@ inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
inline typename T::Type atomic_compare_exchange(volatile T *A,
|
||||
typename T::Type Cmp,
|
||||
typename T::Type Xchg) {
|
||||
atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
|
||||
inline typename T::Type
|
||||
atomic_compare_exchange_strong(volatile T *A, typename T::Type Cmp,
|
||||
typename T::Type Xchg, memory_order MO) {
|
||||
atomic_compare_exchange_strong(A, &Cmp, Xchg, MO);
|
||||
return Cmp;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,8 +52,6 @@ static const size_t MaxSize = 128 * 1024;
|
|||
// cleanly.
|
||||
BENCHMARK_TEMPLATE(BM_malloc_free, scudo::AndroidConfig)
|
||||
->Range(MinSize, MaxSize);
|
||||
BENCHMARK_TEMPLATE(BM_malloc_free, scudo::AndroidSvelteConfig)
|
||||
->Range(MinSize, MaxSize);
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
BENCHMARK_TEMPLATE(BM_malloc_free, scudo::FuchsiaConfig)
|
||||
->Range(MinSize, MaxSize);
|
||||
|
@ -99,8 +97,6 @@ static const size_t MaxIters = 32 * 1024;
|
|||
// cleanly.
|
||||
BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::AndroidConfig)
|
||||
->Range(MinIters, MaxIters);
|
||||
BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::AndroidSvelteConfig)
|
||||
->Range(MinIters, MaxIters);
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::FuchsiaConfig)
|
||||
->Range(MinIters, MaxIters);
|
||||
|
|
13
Telegram/ThirdParty/scudo/chunk.h
vendored
13
Telegram/ThirdParty/scudo/chunk.h
vendored
|
@ -128,19 +128,6 @@ inline void loadHeader(u32 Cookie, const void *Ptr,
|
|||
reportHeaderCorruption(const_cast<void *>(Ptr));
|
||||
}
|
||||
|
||||
inline void compareExchangeHeader(u32 Cookie, void *Ptr,
|
||||
UnpackedHeader *NewUnpackedHeader,
|
||||
UnpackedHeader *OldUnpackedHeader) {
|
||||
NewUnpackedHeader->Checksum =
|
||||
computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
|
||||
PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
|
||||
PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
|
||||
if (UNLIKELY(!atomic_compare_exchange_strong(
|
||||
getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
|
||||
memory_order_relaxed)))
|
||||
reportHeaderRace(Ptr);
|
||||
}
|
||||
|
||||
inline bool isValid(u32 Cookie, const void *Ptr,
|
||||
UnpackedHeader *NewUnpackedHeader) {
|
||||
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
|
||||
|
|
230
Telegram/ThirdParty/scudo/combined.h
vendored
230
Telegram/ThirdParty/scudo/combined.h
vendored
|
@ -14,11 +14,11 @@
|
|||
#include "flags.h"
|
||||
#include "flags_parser.h"
|
||||
#include "local_cache.h"
|
||||
#include "mem_map.h"
|
||||
#include "memtag.h"
|
||||
#include "options.h"
|
||||
#include "quarantine.h"
|
||||
#include "report.h"
|
||||
#include "rss_limit_checker.h"
|
||||
#include "secondary.h"
|
||||
#include "stack_depot.h"
|
||||
#include "string_utils.h"
|
||||
|
@ -68,14 +68,13 @@ public:
|
|||
if (UNLIKELY(Header.State != Chunk::State::Quarantined))
|
||||
reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
|
||||
|
||||
Chunk::UnpackedHeader NewHeader = Header;
|
||||
NewHeader.State = Chunk::State::Available;
|
||||
Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
|
||||
Header.State = Chunk::State::Available;
|
||||
Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
|
||||
|
||||
if (allocatorSupportsMemoryTagging<Config>())
|
||||
Ptr = untagPointer(Ptr);
|
||||
void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
|
||||
Cache.deallocate(NewHeader.ClassId, BlockBegin);
|
||||
void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
|
||||
Cache.deallocate(Header.ClassId, BlockBegin);
|
||||
}
|
||||
|
||||
// We take a shortcut when allocating a quarantine batch by working with the
|
||||
|
@ -118,9 +117,8 @@ public:
|
|||
DCHECK_EQ(Header.Offset, 0);
|
||||
DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
|
||||
|
||||
Chunk::UnpackedHeader NewHeader = Header;
|
||||
NewHeader.State = Chunk::State::Available;
|
||||
Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
|
||||
Header.State = Chunk::State::Available;
|
||||
Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
|
||||
Cache.deallocate(QuarantineClassId,
|
||||
reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
|
||||
Chunk::getHeaderSize()));
|
||||
|
@ -149,9 +147,6 @@ public:
|
|||
initFlags();
|
||||
reportUnrecognizedFlags();
|
||||
|
||||
RssChecker.init(scudo::getFlags()->soft_rss_limit_mb,
|
||||
scudo::getFlags()->hard_rss_limit_mb);
|
||||
|
||||
// Store some flags locally.
|
||||
if (getFlags()->may_return_null)
|
||||
Primary.Options.set(OptionBit::MayReturnNull);
|
||||
|
@ -251,12 +246,14 @@ public:
|
|||
// - unlinking the local stats from the global ones (destroying the cache does
|
||||
// the last two items).
|
||||
void commitBack(TSD<ThisT> *TSD) {
|
||||
TSD->assertLocked(/*BypassCheck=*/true);
|
||||
Quarantine.drain(&TSD->getQuarantineCache(),
|
||||
QuarantineCallback(*this, TSD->getCache()));
|
||||
TSD->getCache().destroy(&Stats);
|
||||
}
|
||||
|
||||
void drainCache(TSD<ThisT> *TSD) {
|
||||
TSD->assertLocked(/*BypassCheck=*/true);
|
||||
Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
|
||||
QuarantineCallback(*this, TSD->getCache()));
|
||||
TSD->getCache().drain();
|
||||
|
@ -299,7 +296,7 @@ public:
|
|||
#endif
|
||||
}
|
||||
|
||||
uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
|
||||
uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
|
||||
uptr ClassId) {
|
||||
if (!Options.get(OptionBit::UseOddEvenTags))
|
||||
return 0;
|
||||
|
@ -329,8 +326,6 @@ public:
|
|||
#ifdef GWP_ASAN_HOOKS
|
||||
if (UNLIKELY(GuardedAlloc.shouldSample())) {
|
||||
if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
|
||||
if (UNLIKELY(&__scudo_allocate_hook))
|
||||
__scudo_allocate_hook(Ptr, Size);
|
||||
Stats.lock();
|
||||
Stats.add(StatAllocated, GuardedAllocSlotSize);
|
||||
Stats.sub(StatFree, GuardedAllocSlotSize);
|
||||
|
@ -363,19 +358,6 @@ public:
|
|||
}
|
||||
DCHECK_LE(Size, NeededSize);
|
||||
|
||||
switch (RssChecker.getRssLimitExceeded()) {
|
||||
case RssLimitChecker::Neither:
|
||||
break;
|
||||
case RssLimitChecker::Soft:
|
||||
if (Options.get(OptionBit::MayReturnNull))
|
||||
return nullptr;
|
||||
reportSoftRSSLimit(RssChecker.getSoftRssLimit());
|
||||
break;
|
||||
case RssLimitChecker::Hard:
|
||||
reportHardRSSLimit(RssChecker.getHardRssLimit());
|
||||
break;
|
||||
}
|
||||
|
||||
void *Block = nullptr;
|
||||
uptr ClassId = 0;
|
||||
uptr SecondaryBlockEnd = 0;
|
||||
|
@ -384,11 +366,11 @@ public:
|
|||
DCHECK_NE(ClassId, 0U);
|
||||
bool UnlockRequired;
|
||||
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
Block = TSD->getCache().allocate(ClassId);
|
||||
// If the allocation failed, the most likely reason with a 32-bit primary
|
||||
// is the region being full. In that event, retry in each successively
|
||||
// larger class until it fits. If it fails to fit in the largest class,
|
||||
// fallback to the Secondary.
|
||||
// If the allocation failed, retry in each successively larger class until
|
||||
// it fits. If it fails to fit in the largest class, fallback to the
|
||||
// Secondary.
|
||||
if (UNLIKELY(!Block)) {
|
||||
while (ClassId < SizeClassMap::LargestClassId && !Block)
|
||||
Block = TSD->getCache().allocate(++ClassId);
|
||||
|
@ -406,6 +388,7 @@ public:
|
|||
if (UNLIKELY(!Block)) {
|
||||
if (Options.get(OptionBit::MayReturnNull))
|
||||
return nullptr;
|
||||
printStats();
|
||||
reportOutOfMemory(NeededSize);
|
||||
}
|
||||
|
||||
|
@ -535,14 +518,14 @@ public:
|
|||
Chunk::SizeOrUnusedBytesMask;
|
||||
Chunk::storeHeader(Cookie, Ptr, &Header);
|
||||
|
||||
if (UNLIKELY(&__scudo_allocate_hook))
|
||||
__scudo_allocate_hook(TaggedPtr, Size);
|
||||
|
||||
return TaggedPtr;
|
||||
}
|
||||
|
||||
NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
|
||||
UNUSED uptr Alignment = MinAlignment) {
|
||||
if (UNLIKELY(!Ptr))
|
||||
return;
|
||||
|
||||
// For a deallocation, we only ensure minimal initialization, meaning thread
|
||||
// local data will be left uninitialized for now (when using ELF TLS). The
|
||||
// fallback cache will be used instead. This is a workaround for a situation
|
||||
|
@ -551,12 +534,6 @@ public:
|
|||
// being destroyed properly. Any other heap operation will do a full init.
|
||||
initThreadMaybe(/*MinimalInit=*/true);
|
||||
|
||||
if (UNLIKELY(&__scudo_deallocate_hook))
|
||||
__scudo_deallocate_hook(Ptr);
|
||||
|
||||
if (UNLIKELY(!Ptr))
|
||||
return;
|
||||
|
||||
#ifdef GWP_ASAN_HOOKS
|
||||
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
|
||||
GuardedAlloc.deallocate(Ptr);
|
||||
|
@ -635,47 +612,46 @@ public:
|
|||
if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
|
||||
reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
|
||||
|
||||
Chunk::UnpackedHeader OldHeader;
|
||||
Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
|
||||
Chunk::UnpackedHeader Header;
|
||||
Chunk::loadHeader(Cookie, OldPtr, &Header);
|
||||
|
||||
if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
|
||||
if (UNLIKELY(Header.State != Chunk::State::Allocated))
|
||||
reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
|
||||
|
||||
// Pointer has to be allocated with a malloc-type function. Some
|
||||
// applications think that it is OK to realloc a memalign'ed pointer, which
|
||||
// will trigger this check. It really isn't.
|
||||
if (Options.get(OptionBit::DeallocTypeMismatch)) {
|
||||
if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
|
||||
if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
|
||||
reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
|
||||
OldHeader.OriginOrWasZeroed,
|
||||
Header.OriginOrWasZeroed,
|
||||
Chunk::Origin::Malloc);
|
||||
}
|
||||
|
||||
void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
|
||||
void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
|
||||
uptr BlockEnd;
|
||||
uptr OldSize;
|
||||
const uptr ClassId = OldHeader.ClassId;
|
||||
const uptr ClassId = Header.ClassId;
|
||||
if (LIKELY(ClassId)) {
|
||||
BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
|
||||
SizeClassMap::getSizeByClassId(ClassId);
|
||||
OldSize = OldHeader.SizeOrUnusedBytes;
|
||||
OldSize = Header.SizeOrUnusedBytes;
|
||||
} else {
|
||||
BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
|
||||
OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
|
||||
OldHeader.SizeOrUnusedBytes);
|
||||
Header.SizeOrUnusedBytes);
|
||||
}
|
||||
// If the new chunk still fits in the previously allocated block (with a
|
||||
// reasonable delta), we just keep the old block, and update the chunk
|
||||
// header to reflect the size change.
|
||||
if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
|
||||
if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
|
||||
Chunk::UnpackedHeader NewHeader = OldHeader;
|
||||
NewHeader.SizeOrUnusedBytes =
|
||||
Header.SizeOrUnusedBytes =
|
||||
(ClassId ? NewSize
|
||||
: BlockEnd -
|
||||
(reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
|
||||
Chunk::SizeOrUnusedBytesMask;
|
||||
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
|
||||
Chunk::storeHeader(Cookie, OldPtr, &Header);
|
||||
if (UNLIKELY(useMemoryTagging<Config>(Options))) {
|
||||
if (ClassId) {
|
||||
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
|
||||
|
@ -697,9 +673,7 @@ public:
|
|||
void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
|
||||
if (LIKELY(NewPtr)) {
|
||||
memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
|
||||
if (UNLIKELY(&__scudo_deallocate_hook))
|
||||
__scudo_deallocate_hook(OldTaggedPtr);
|
||||
quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
|
||||
quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
|
||||
}
|
||||
return NewPtr;
|
||||
}
|
||||
|
@ -754,6 +728,13 @@ public:
|
|||
Str.output();
|
||||
}
|
||||
|
||||
void printFragmentationInfo() {
|
||||
ScopedString Str;
|
||||
Primary.getFragmentationInfo(&Str);
|
||||
// Secondary allocator dumps the fragmentation data in getStats().
|
||||
Str.output();
|
||||
}
|
||||
|
||||
void releaseToOS(ReleaseToOS ReleaseType) {
|
||||
initThreadMaybe();
|
||||
if (ReleaseType == ReleaseToOS::ForceAll)
|
||||
|
@ -847,10 +828,15 @@ public:
|
|||
// for it, which then forces realloc to copy the usable size of a chunk as
|
||||
// opposed to its actual size.
|
||||
uptr getUsableSize(const void *Ptr) {
|
||||
initThreadMaybe();
|
||||
if (UNLIKELY(!Ptr))
|
||||
return 0;
|
||||
|
||||
return getAllocSize(Ptr);
|
||||
}
|
||||
|
||||
uptr getAllocSize(const void *Ptr) {
|
||||
initThreadMaybe();
|
||||
|
||||
#ifdef GWP_ASAN_HOOKS
|
||||
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
|
||||
return GuardedAlloc.getSize(Ptr);
|
||||
|
@ -859,9 +845,11 @@ public:
|
|||
Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
|
||||
Chunk::UnpackedHeader Header;
|
||||
Chunk::loadHeader(Cookie, Ptr, &Header);
|
||||
// Getting the usable size of a chunk only makes sense if it's allocated.
|
||||
|
||||
// Getting the alloc size of a chunk only makes sense if it's allocated.
|
||||
if (UNLIKELY(Header.State != Chunk::State::Allocated))
|
||||
reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
|
||||
|
||||
return getSize(Ptr, &Header);
|
||||
}
|
||||
|
||||
|
@ -887,13 +875,6 @@ public:
|
|||
Header.State == Chunk::State::Allocated;
|
||||
}
|
||||
|
||||
void setRssLimitsTestOnly(int SoftRssLimitMb, int HardRssLimitMb,
|
||||
bool MayReturnNull) {
|
||||
RssChecker.init(SoftRssLimitMb, HardRssLimitMb);
|
||||
if (MayReturnNull)
|
||||
Primary.Options.set(OptionBit::MayReturnNull);
|
||||
}
|
||||
|
||||
bool useMemoryTaggingTestOnly() const {
|
||||
return useMemoryTagging<Config>(Primary.Options.load());
|
||||
}
|
||||
|
@ -913,7 +894,7 @@ public:
|
|||
|
||||
void setTrackAllocationStacks(bool Track) {
|
||||
initThreadMaybe();
|
||||
if (getFlags()->allocation_ring_buffer_size == 0) {
|
||||
if (getFlags()->allocation_ring_buffer_size <= 0) {
|
||||
DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
|
||||
return;
|
||||
}
|
||||
|
@ -955,21 +936,7 @@ public:
|
|||
|
||||
uptr getRingBufferSize() {
|
||||
initThreadMaybe();
|
||||
auto *RingBuffer = getRingBuffer();
|
||||
return RingBuffer ? ringBufferSizeInBytes(RingBuffer->Size) : 0;
|
||||
}
|
||||
|
||||
static bool setRingBufferSizeForBuffer(char *Buffer, size_t Size) {
|
||||
// Need at least one entry.
|
||||
if (Size < sizeof(AllocationRingBuffer) +
|
||||
sizeof(typename AllocationRingBuffer::Entry)) {
|
||||
return false;
|
||||
}
|
||||
AllocationRingBuffer *RingBuffer =
|
||||
reinterpret_cast<AllocationRingBuffer *>(Buffer);
|
||||
RingBuffer->Size = (Size - sizeof(AllocationRingBuffer)) /
|
||||
sizeof(typename AllocationRingBuffer::Entry);
|
||||
return true;
|
||||
return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
|
||||
}
|
||||
|
||||
static const uptr MaxTraceSize = 64;
|
||||
|
@ -986,8 +953,9 @@ public:
|
|||
static void getErrorInfo(struct scudo_error_info *ErrorInfo,
|
||||
uintptr_t FaultAddr, const char *DepotPtr,
|
||||
const char *RegionInfoPtr, const char *RingBufferPtr,
|
||||
const char *Memory, const char *MemoryTags,
|
||||
uintptr_t MemoryAddr, size_t MemorySize) {
|
||||
size_t RingBufferSize, const char *Memory,
|
||||
const char *MemoryTags, uintptr_t MemoryAddr,
|
||||
size_t MemorySize) {
|
||||
*ErrorInfo = {};
|
||||
if (!allocatorSupportsMemoryTagging<Config>() ||
|
||||
MemoryAddr + MemorySize < MemoryAddr)
|
||||
|
@ -1006,7 +974,7 @@ public:
|
|||
// Check the ring buffer. For primary allocations this will only find UAF;
|
||||
// for secondary allocations we can find either UAF or OOB.
|
||||
getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
|
||||
RingBufferPtr);
|
||||
RingBufferPtr, RingBufferSize);
|
||||
|
||||
// Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
|
||||
// Beyond that we are likely to hit false positives.
|
||||
|
@ -1053,7 +1021,6 @@ private:
|
|||
QuarantineT Quarantine;
|
||||
TSDRegistryT TSDRegistry;
|
||||
pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
|
||||
RssLimitChecker RssChecker;
|
||||
|
||||
#ifdef GWP_ASAN_HOOKS
|
||||
gwp_asan::GuardedPoolAllocator GuardedAlloc;
|
||||
|
@ -1073,13 +1040,14 @@ private:
|
|||
};
|
||||
|
||||
atomic_uptr Pos;
|
||||
u32 Size;
|
||||
// An array of Size (at least one) elements of type Entry is immediately
|
||||
// following to this struct.
|
||||
};
|
||||
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
|
||||
// and immediately followed by Size elements of type Entry.
|
||||
char *RawRingBuffer = {};
|
||||
u32 RingBufferElements = 0;
|
||||
MemMapT RawRingBufferMap;
|
||||
|
||||
// The following might get optimized out by the compiler.
|
||||
NOINLINE void performSanityChecks() {
|
||||
|
@ -1134,35 +1102,34 @@ private:
|
|||
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
|
||||
}
|
||||
|
||||
void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
|
||||
void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
|
||||
Chunk::UnpackedHeader *Header,
|
||||
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
|
||||
void *Ptr = getHeaderTaggedPointer(TaggedPtr);
|
||||
Chunk::UnpackedHeader NewHeader = *Header;
|
||||
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
|
||||
// than the maximum allowed, we return a chunk directly to the backend.
|
||||
// This purposefully underflows for Size == 0.
|
||||
const bool BypassQuarantine = !Quarantine.getCacheSize() ||
|
||||
((Size - 1) >= QuarantineMaxChunkSize) ||
|
||||
!NewHeader.ClassId;
|
||||
!Header->ClassId;
|
||||
if (BypassQuarantine)
|
||||
NewHeader.State = Chunk::State::Available;
|
||||
Header->State = Chunk::State::Available;
|
||||
else
|
||||
NewHeader.State = Chunk::State::Quarantined;
|
||||
NewHeader.OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
|
||||
NewHeader.ClassId &&
|
||||
!TSDRegistry.getDisableMemInit();
|
||||
Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
|
||||
Header->State = Chunk::State::Quarantined;
|
||||
Header->OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
|
||||
Header->ClassId &&
|
||||
!TSDRegistry.getDisableMemInit();
|
||||
Chunk::storeHeader(Cookie, Ptr, Header);
|
||||
|
||||
if (UNLIKELY(useMemoryTagging<Config>(Options))) {
|
||||
u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
|
||||
storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
|
||||
if (NewHeader.ClassId) {
|
||||
if (Header->ClassId) {
|
||||
if (!TSDRegistry.getDisableMemInit()) {
|
||||
uptr TaggedBegin, TaggedEnd;
|
||||
const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
|
||||
Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
|
||||
NewHeader.ClassId);
|
||||
Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
|
||||
Header->ClassId);
|
||||
// Exclude the previous tag so that immediate use after free is
|
||||
// detected 100% of the time.
|
||||
setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
|
||||
|
@ -1173,11 +1140,12 @@ private:
|
|||
if (BypassQuarantine) {
|
||||
if (allocatorSupportsMemoryTagging<Config>())
|
||||
Ptr = untagPointer(Ptr);
|
||||
void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
|
||||
const uptr ClassId = NewHeader.ClassId;
|
||||
void *BlockBegin = getBlockBegin(Ptr, Header);
|
||||
const uptr ClassId = Header->ClassId;
|
||||
if (LIKELY(ClassId)) {
|
||||
bool UnlockRequired;
|
||||
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
const bool CacheDrained =
|
||||
TSD->getCache().deallocate(ClassId, BlockBegin);
|
||||
if (UnlockRequired)
|
||||
|
@ -1197,6 +1165,7 @@ private:
|
|||
} else {
|
||||
bool UnlockRequired;
|
||||
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
Quarantine.put(&TSD->getQuarantineCache(),
|
||||
QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
|
||||
if (UnlockRequired)
|
||||
|
@ -1273,7 +1242,7 @@ private:
|
|||
storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
|
||||
}
|
||||
|
||||
void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
|
||||
void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
|
||||
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
|
||||
return;
|
||||
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
|
||||
|
@ -1286,7 +1255,7 @@ private:
|
|||
u32 DeallocationTid) {
|
||||
uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
|
||||
typename AllocationRingBuffer::Entry *Entry =
|
||||
getRingBufferEntry(RawRingBuffer, Pos % getRingBuffer()->Size);
|
||||
getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
|
||||
|
||||
// First invalidate our entry so that we don't attempt to interpret a
|
||||
// partially written state in getSecondaryErrorInfo(). The fences below
|
||||
|
@ -1305,7 +1274,7 @@ private:
|
|||
atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
|
||||
}
|
||||
|
||||
void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
|
||||
void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
|
||||
uptr Size) {
|
||||
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
|
||||
return;
|
||||
|
@ -1320,8 +1289,8 @@ private:
|
|||
storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
|
||||
}
|
||||
|
||||
void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
|
||||
uptr Size) {
|
||||
void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
|
||||
u8 PrevTag, uptr Size) {
|
||||
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
|
||||
return;
|
||||
|
||||
|
@ -1427,17 +1396,19 @@ private:
|
|||
size_t &NextErrorReport,
|
||||
uintptr_t FaultAddr,
|
||||
const StackDepot *Depot,
|
||||
const char *RingBufferPtr) {
|
||||
const char *RingBufferPtr,
|
||||
size_t RingBufferSize) {
|
||||
auto *RingBuffer =
|
||||
reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
|
||||
if (!RingBuffer || RingBuffer->Size == 0)
|
||||
size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
|
||||
if (!RingBuffer || RingBufferElements == 0)
|
||||
return;
|
||||
uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
|
||||
|
||||
for (uptr I = Pos - 1;
|
||||
I != Pos - 1 - RingBuffer->Size && NextErrorReport != NumErrorReports;
|
||||
for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
|
||||
NextErrorReport != NumErrorReports;
|
||||
--I) {
|
||||
auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBuffer->Size);
|
||||
auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
|
||||
uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
|
||||
if (!EntryPtr)
|
||||
continue;
|
||||
|
@ -1516,17 +1487,19 @@ private:
|
|||
}
|
||||
|
||||
void mapAndInitializeRingBuffer() {
|
||||
if (getFlags()->allocation_ring_buffer_size <= 0)
|
||||
return;
|
||||
u32 AllocationRingBufferSize =
|
||||
static_cast<u32>(getFlags()->allocation_ring_buffer_size);
|
||||
if (AllocationRingBufferSize < 1)
|
||||
return;
|
||||
RawRingBuffer = static_cast<char *>(
|
||||
map(/*Addr=*/nullptr,
|
||||
roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
|
||||
getPageSizeCached()),
|
||||
"AllocatorRingBuffer"));
|
||||
auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
|
||||
RingBuffer->Size = AllocationRingBufferSize;
|
||||
MemMapT MemMap;
|
||||
MemMap.map(
|
||||
/*Addr=*/0U,
|
||||
roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
|
||||
getPageSizeCached()),
|
||||
"scudo:ring_buffer");
|
||||
RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
|
||||
RawRingBufferMap = MemMap;
|
||||
RingBufferElements = AllocationRingBufferSize;
|
||||
static_assert(sizeof(AllocationRingBuffer) %
|
||||
alignof(typename AllocationRingBuffer::Entry) ==
|
||||
0,
|
||||
|
@ -1534,14 +1507,25 @@ private:
|
|||
}
|
||||
|
||||
void unmapRingBuffer() {
|
||||
unmap(RawRingBuffer, roundUp(getRingBufferSize(), getPageSizeCached()));
|
||||
auto *RingBuffer = getRingBuffer();
|
||||
if (RingBuffer != nullptr) {
|
||||
RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
|
||||
RawRingBufferMap.getCapacity());
|
||||
}
|
||||
RawRingBuffer = nullptr;
|
||||
}
|
||||
|
||||
static constexpr size_t ringBufferSizeInBytes(u32 AllocationRingBufferSize) {
|
||||
static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
|
||||
return sizeof(AllocationRingBuffer) +
|
||||
AllocationRingBufferSize *
|
||||
sizeof(typename AllocationRingBuffer::Entry);
|
||||
RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
|
||||
}
|
||||
|
||||
static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
|
||||
if (Bytes < sizeof(AllocationRingBuffer)) {
|
||||
return 0;
|
||||
}
|
||||
return (Bytes - sizeof(AllocationRingBuffer)) /
|
||||
sizeof(typename AllocationRingBuffer::Entry);
|
||||
}
|
||||
|
||||
inline AllocationRingBuffer *getRingBuffer() {
|
||||
|
|
18
Telegram/ThirdParty/scudo/common.cpp
vendored
18
Telegram/ThirdParty/scudo/common.cpp
vendored
|
@ -21,22 +21,4 @@ uptr getPageSizeSlow() {
|
|||
return PageSizeCached;
|
||||
}
|
||||
|
||||
// Fatal internal map() or unmap() error (potentially OOM related).
|
||||
void NORETURN dieOnMapUnmapError(uptr SizeIfOOM) {
|
||||
char Error[128] = "Scudo ERROR: internal map or unmap failure\n";
|
||||
if (SizeIfOOM) {
|
||||
formatString(
|
||||
Error, sizeof(Error),
|
||||
"Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
|
||||
SizeIfOOM >> 10);
|
||||
}
|
||||
outputRaw(Error);
|
||||
setAbortMessage(Error);
|
||||
die();
|
||||
}
|
||||
|
||||
#if !SCUDO_LINUX
|
||||
uptr GetRSS() { return 0; }
|
||||
#endif
|
||||
|
||||
} // namespace scudo
|
||||
|
|
38
Telegram/ThirdParty/scudo/common.h
vendored
38
Telegram/ThirdParty/scudo/common.h
vendored
|
@ -17,6 +17,7 @@
|
|||
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace scudo {
|
||||
|
||||
|
@ -111,19 +112,19 @@ template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
|
|||
*RandState = State;
|
||||
}
|
||||
|
||||
// Hardware specific inlinable functions.
|
||||
inline void computePercentage(uptr Numerator, uptr Denominator, uptr *Integral,
|
||||
uptr *Fractional) {
|
||||
constexpr uptr Digits = 100;
|
||||
if (Denominator == 0) {
|
||||
*Integral = 100;
|
||||
*Fractional = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
inline void yieldProcessor(UNUSED u8 Count) {
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
for (u8 I = 0; I < Count; I++)
|
||||
__asm__ __volatile__("pause");
|
||||
#elif defined(__aarch64__) || defined(__arm__)
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
for (u8 I = 0; I < Count; I++)
|
||||
__asm__ __volatile__("yield");
|
||||
#endif
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
*Integral = Numerator * Digits / Denominator;
|
||||
*Fractional =
|
||||
(((Numerator * Digits) % Denominator) * Digits + Denominator / 2) /
|
||||
Denominator;
|
||||
}
|
||||
|
||||
// Platform specific functions.
|
||||
|
@ -131,9 +132,10 @@ inline void yieldProcessor(UNUSED u8 Count) {
|
|||
extern uptr PageSizeCached;
|
||||
uptr getPageSizeSlow();
|
||||
inline uptr getPageSizeCached() {
|
||||
// Bionic uses a hardcoded value.
|
||||
if (SCUDO_ANDROID)
|
||||
return 4096U;
|
||||
#if SCUDO_ANDROID && defined(PAGE_SIZE)
|
||||
// Most Android builds have a build-time constant page size.
|
||||
return PAGE_SIZE;
|
||||
#endif
|
||||
if (LIKELY(PageSizeCached))
|
||||
return PageSizeCached;
|
||||
return getPageSizeSlow();
|
||||
|
@ -144,8 +146,6 @@ u32 getNumberOfCPUs();
|
|||
|
||||
const char *getEnv(const char *Name);
|
||||
|
||||
uptr GetRSS();
|
||||
|
||||
u64 getMonotonicTime();
|
||||
// Gets the time faster but with less accuracy. Can call getMonotonicTime
|
||||
// if no fast version is available.
|
||||
|
@ -190,10 +190,6 @@ void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
|
|||
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
|
||||
MapPlatformData *Data = nullptr);
|
||||
|
||||
// Internal map & unmap fatal error. This must not call map(). SizeIfOOM shall
|
||||
// hold the requested size on an out-of-memory error, 0 otherwise.
|
||||
void NORETURN dieOnMapUnmapError(uptr SizeIfOOM = 0);
|
||||
|
||||
// Logging related functions.
|
||||
|
||||
void setAbortMessage(const char *Message);
|
||||
|
|
60
Telegram/ThirdParty/scudo/condition_variable.h
vendored
Normal file
60
Telegram/ThirdParty/scudo/condition_variable.h
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
//===-- condition_variable.h ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SCUDO_CONDITION_VARIABLE_H_
|
||||
#define SCUDO_CONDITION_VARIABLE_H_
|
||||
|
||||
#include "condition_variable_base.h"
|
||||
|
||||
#include "common.h"
|
||||
#include "platform.h"
|
||||
|
||||
#include "condition_variable_linux.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
// A default implementation of default condition variable. It doesn't do a real
|
||||
// `wait`, instead it spins a short amount of time only.
|
||||
class ConditionVariableDummy
|
||||
: public ConditionVariableBase<ConditionVariableDummy> {
|
||||
public:
|
||||
void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {}
|
||||
|
||||
void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) {
|
||||
M.unlock();
|
||||
|
||||
constexpr u32 SpinTimes = 64;
|
||||
volatile u32 V = 0;
|
||||
for (u32 I = 0; I < SpinTimes; ++I) {
|
||||
u32 Tmp = V + 1;
|
||||
V = Tmp;
|
||||
}
|
||||
|
||||
M.lock();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Config, typename = const bool>
|
||||
struct ConditionVariableState {
|
||||
static constexpr bool enabled() { return false; }
|
||||
// This is only used for compilation purpose so that we won't end up having
|
||||
// many conditional compilations. If you want to use `ConditionVariableDummy`,
|
||||
// define `ConditionVariableT` in your allocator configuration. See
|
||||
// allocator_config.h for more details.
|
||||
using ConditionVariableT = ConditionVariableDummy;
|
||||
};
|
||||
|
||||
template <typename Config>
|
||||
struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
|
||||
static constexpr bool enabled() { return Config::UseConditionVariable; }
|
||||
using ConditionVariableT = typename Config::ConditionVariableT;
|
||||
};
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_CONDITION_VARIABLE_H_
|
56
Telegram/ThirdParty/scudo/condition_variable_base.h
vendored
Normal file
56
Telegram/ThirdParty/scudo/condition_variable_base.h
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
//===-- condition_variable_base.h -------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_
|
||||
#define SCUDO_CONDITION_VARIABLE_BASE_H_
|
||||
|
||||
#include "mutex.h"
|
||||
#include "thread_annotations.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
template <typename Derived> class ConditionVariableBase {
|
||||
public:
|
||||
constexpr ConditionVariableBase() = default;
|
||||
|
||||
void bindTestOnly(HybridMutex &Mutex) {
|
||||
#if SCUDO_DEBUG
|
||||
boundMutex = &Mutex;
|
||||
#else
|
||||
(void)Mutex;
|
||||
#endif
|
||||
}
|
||||
|
||||
void notifyAll(HybridMutex &M) REQUIRES(M) {
|
||||
#if SCUDO_DEBUG
|
||||
CHECK_EQ(&M, boundMutex);
|
||||
#endif
|
||||
getDerived()->notifyAllImpl(M);
|
||||
}
|
||||
|
||||
void wait(HybridMutex &M) REQUIRES(M) {
|
||||
#if SCUDO_DEBUG
|
||||
CHECK_EQ(&M, boundMutex);
|
||||
#endif
|
||||
getDerived()->waitImpl(M);
|
||||
}
|
||||
|
||||
protected:
|
||||
Derived *getDerived() { return static_cast<Derived *>(this); }
|
||||
|
||||
#if SCUDO_DEBUG
|
||||
// Because thread-safety analysis doesn't support pointer aliasing, we are not
|
||||
// able to mark the proper annotations without false positive. Instead, we
|
||||
// pass the lock and do the same-lock check separately.
|
||||
HybridMutex *boundMutex = nullptr;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_CONDITION_VARIABLE_BASE_H_
|
52
Telegram/ThirdParty/scudo/condition_variable_linux.cpp
vendored
Normal file
52
Telegram/ThirdParty/scudo/condition_variable_linux.cpp
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
#if SCUDO_LINUX
|
||||
|
||||
#include "condition_variable_linux.h"
|
||||
|
||||
#include "atomic_helpers.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <linux/futex.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace scudo {
|
||||
|
||||
void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) {
|
||||
const u32 V = atomic_load_relaxed(&Counter);
|
||||
atomic_store_relaxed(&Counter, V + 1);
|
||||
|
||||
// TODO(chiahungduan): Move the waiters from the futex waiting queue
|
||||
// `Counter` to futex waiting queue `M` so that the awoken threads won't be
|
||||
// blocked again due to locked `M` by current thread.
|
||||
if (LastNotifyAll != V) {
|
||||
syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAKE_PRIVATE,
|
||||
INT_MAX, nullptr, nullptr, 0);
|
||||
}
|
||||
|
||||
LastNotifyAll = V + 1;
|
||||
}
|
||||
|
||||
void ConditionVariableLinux::waitImpl(HybridMutex &M) {
|
||||
const u32 V = atomic_load_relaxed(&Counter) + 1;
|
||||
atomic_store_relaxed(&Counter, V);
|
||||
|
||||
// TODO: Use ScopedUnlock when it's supported.
|
||||
M.unlock();
|
||||
syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAIT_PRIVATE, V,
|
||||
nullptr, nullptr, 0);
|
||||
M.lock();
|
||||
}
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_LINUX
|
38
Telegram/ThirdParty/scudo/condition_variable_linux.h
vendored
Normal file
38
Telegram/ThirdParty/scudo/condition_variable_linux.h
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
//===-- condition_variable_linux.h ------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_
|
||||
#define SCUDO_CONDITION_VARIABLE_LINUX_H_
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
#if SCUDO_LINUX
|
||||
|
||||
#include "atomic_helpers.h"
|
||||
#include "condition_variable_base.h"
|
||||
#include "thread_annotations.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
class ConditionVariableLinux
|
||||
: public ConditionVariableBase<ConditionVariableLinux> {
|
||||
public:
|
||||
void notifyAllImpl(HybridMutex &M) REQUIRES(M);
|
||||
|
||||
void waitImpl(HybridMutex &M) REQUIRES(M);
|
||||
|
||||
private:
|
||||
u32 LastNotifyAll = 0;
|
||||
atomic_u32 Counter = {};
|
||||
};
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_LINUX
|
||||
|
||||
#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_
|
3
Telegram/ThirdParty/scudo/flags.cpp
vendored
3
Telegram/ThirdParty/scudo/flags.cpp
vendored
|
@ -68,6 +68,9 @@ void initFlags() {
|
|||
Parser.parseString(getCompileDefinitionScudoDefaultOptions());
|
||||
Parser.parseString(getScudoDefaultOptions());
|
||||
Parser.parseString(getEnv("SCUDO_OPTIONS"));
|
||||
if (const char *V = getEnv("SCUDO_ALLOCATION_RING_BUFFER_SIZE")) {
|
||||
Parser.parseStringPair("allocation_ring_buffer_size", V);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace scudo
|
||||
|
|
12
Telegram/ThirdParty/scudo/flags.inc
vendored
12
Telegram/ThirdParty/scudo/flags.inc
vendored
|
@ -46,14 +46,6 @@ SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
|
|||
"Interval (in milliseconds) at which to attempt release of unused "
|
||||
"memory to the OS. Negative values disable the feature.")
|
||||
|
||||
SCUDO_FLAG(int, hard_rss_limit_mb, 0,
|
||||
"Hard RSS Limit in Mb. If non-zero, once the limit is achieved, "
|
||||
"abort the process")
|
||||
|
||||
SCUDO_FLAG(int, soft_rss_limit_mb, 0,
|
||||
"Soft RSS Limit in Mb. If non-zero, once the limit is reached, all "
|
||||
"subsequent calls will fail or return NULL until the RSS goes below "
|
||||
"the soft limit")
|
||||
|
||||
SCUDO_FLAG(int, allocation_ring_buffer_size, 32768,
|
||||
"Entries to keep in the allocation ring buffer for scudo.")
|
||||
"Entries to keep in the allocation ring buffer for scudo. "
|
||||
"Values less or equal to zero disable the buffer.")
|
||||
|
|
30
Telegram/ThirdParty/scudo/flags_parser.cpp
vendored
30
Telegram/ThirdParty/scudo/flags_parser.cpp
vendored
|
@ -10,6 +10,8 @@
|
|||
#include "common.h"
|
||||
#include "report.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
|
@ -80,7 +82,7 @@ void FlagParser::parseFlag() {
|
|||
++Pos;
|
||||
Value = Buffer + ValueStart;
|
||||
}
|
||||
if (!runHandler(Name, Value))
|
||||
if (!runHandler(Name, Value, '='))
|
||||
reportError("flag parsing failed.");
|
||||
}
|
||||
|
||||
|
@ -122,10 +124,16 @@ inline bool parseBool(const char *Value, bool *b) {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool FlagParser::runHandler(const char *Name, const char *Value) {
|
||||
void FlagParser::parseStringPair(const char *Name, const char *Value) {
|
||||
if (!runHandler(Name, Value, '\0'))
|
||||
reportError("flag parsing failed.");
|
||||
}
|
||||
|
||||
bool FlagParser::runHandler(const char *Name, const char *Value,
|
||||
const char Sep) {
|
||||
for (u32 I = 0; I < NumberOfFlags; ++I) {
|
||||
const uptr Len = strlen(Flags[I].Name);
|
||||
if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != '=')
|
||||
if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != Sep)
|
||||
continue;
|
||||
bool Ok = false;
|
||||
switch (Flags[I].Type) {
|
||||
|
@ -136,12 +144,18 @@ bool FlagParser::runHandler(const char *Name, const char *Value) {
|
|||
break;
|
||||
case FlagType::FT_int:
|
||||
char *ValueEnd;
|
||||
*reinterpret_cast<int *>(Flags[I].Var) =
|
||||
static_cast<int>(strtol(Value, &ValueEnd, 10));
|
||||
Ok =
|
||||
*ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
|
||||
if (!Ok)
|
||||
errno = 0;
|
||||
long V = strtol(Value, &ValueEnd, 10);
|
||||
if (errno != 0 || // strtol failed (over or underflow)
|
||||
V > INT_MAX || V < INT_MIN || // overflows integer
|
||||
// contains unexpected characters
|
||||
(*ValueEnd != '"' && *ValueEnd != '\'' &&
|
||||
!isSeparatorOrNull(*ValueEnd))) {
|
||||
reportInvalidFlag("int", Value);
|
||||
break;
|
||||
}
|
||||
*reinterpret_cast<int *>(Flags[I].Var) = static_cast<int>(V);
|
||||
Ok = true;
|
||||
break;
|
||||
}
|
||||
return Ok;
|
||||
|
|
3
Telegram/ThirdParty/scudo/flags_parser.h
vendored
3
Telegram/ThirdParty/scudo/flags_parser.h
vendored
|
@ -27,6 +27,7 @@ public:
|
|||
void *Var);
|
||||
void parseString(const char *S);
|
||||
void printFlagDescriptions();
|
||||
void parseStringPair(const char *Name, const char *Value);
|
||||
|
||||
private:
|
||||
static const u32 MaxFlags = 20;
|
||||
|
@ -45,7 +46,7 @@ private:
|
|||
void skipWhitespace();
|
||||
void parseFlags();
|
||||
void parseFlag();
|
||||
bool runHandler(const char *Name, const char *Value);
|
||||
bool runHandler(const char *Name, const char *Value, char Sep);
|
||||
};
|
||||
|
||||
void reportUnrecognizedFlags();
|
||||
|
|
|
@ -46,14 +46,11 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
|
|||
}
|
||||
|
||||
std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
|
||||
// RingBuffer is too short.
|
||||
if (!AllocatorT::setRingBufferSizeForBuffer(RingBufferBytes.data(),
|
||||
RingBufferBytes.size()))
|
||||
return 0;
|
||||
|
||||
scudo_error_info ErrorInfo;
|
||||
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
|
||||
RegionInfo.data(), RingBufferBytes.data(), Memory,
|
||||
MemoryTags, MemoryAddr, MemorySize);
|
||||
RegionInfo.data(), RingBufferBytes.data(),
|
||||
RingBufferBytes.size(), Memory, MemoryTags,
|
||||
MemoryAddr, MemorySize);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -17,10 +17,22 @@ extern "C" {
|
|||
__attribute__((weak)) const char *__scudo_default_options(void);
|
||||
|
||||
// Post-allocation & pre-deallocation hooks.
|
||||
// They must be thread-safe and not use heap related functions.
|
||||
__attribute__((weak)) void __scudo_allocate_hook(void *ptr, size_t size);
|
||||
__attribute__((weak)) void __scudo_deallocate_hook(void *ptr);
|
||||
|
||||
// `realloc` involves both deallocation and allocation but they are not reported
|
||||
// atomically. In one specific case which may keep taking a snapshot right in
|
||||
// the middle of `realloc` reporting the deallocation and allocation, it may
|
||||
// confuse the user by missing memory from `realloc`. To alleviate that case,
|
||||
// define the two `realloc` hooks to get the knowledge of the bundled hook
|
||||
// calls. These hooks are optional and should only be used when a hooks user
|
||||
// wants to track reallocs more closely.
|
||||
//
|
||||
// See more details in the comment of `realloc` in wrapper_c.inc.
|
||||
__attribute__((weak)) void
|
||||
__scudo_realloc_allocate_hook(void *old_ptr, void *new_ptr, size_t size);
|
||||
__attribute__((weak)) void __scudo_realloc_deallocate_hook(void *old_ptr);
|
||||
|
||||
void __scudo_print_stats(void);
|
||||
|
||||
typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
|
||||
|
@ -73,7 +85,8 @@ typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
|
|||
// pointer.
|
||||
void __scudo_get_error_info(struct scudo_error_info *error_info,
|
||||
uintptr_t fault_addr, const char *stack_depot,
|
||||
const char *region_info, const char *ring_buffer,
|
||||
size_t stack_depot_size, const char *region_info,
|
||||
const char *ring_buffer, size_t ring_buffer_size,
|
||||
const char *memory, const char *memory_tags,
|
||||
uintptr_t memory_addr, size_t memory_size);
|
||||
|
||||
|
|
50
Telegram/ThirdParty/scudo/linux.cpp
vendored
50
Telegram/ThirdParty/scudo/linux.cpp
vendored
|
@ -14,6 +14,7 @@
|
|||
#include "internal_defs.h"
|
||||
#include "linux.h"
|
||||
#include "mutex.h"
|
||||
#include "report_linux.h"
|
||||
#include "string_utils.h"
|
||||
|
||||
#include <errno.h>
|
||||
|
@ -43,6 +44,7 @@ uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
|
|||
|
||||
void NORETURN die() { abort(); }
|
||||
|
||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
||||
void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
|
||||
UNUSED MapPlatformData *Data) {
|
||||
int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
@ -65,7 +67,7 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
|
|||
void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
|
||||
if (P == MAP_FAILED) {
|
||||
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
|
||||
dieOnMapUnmapError(errno == ENOMEM ? Size : 0);
|
||||
reportMapError(errno == ENOMEM ? Size : 0);
|
||||
return nullptr;
|
||||
}
|
||||
#if SCUDO_ANDROID
|
||||
|
@ -75,19 +77,22 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
|
|||
return P;
|
||||
}
|
||||
|
||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
||||
void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
|
||||
UNUSED MapPlatformData *Data) {
|
||||
if (munmap(Addr, Size) != 0)
|
||||
dieOnMapUnmapError();
|
||||
reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
|
||||
}
|
||||
|
||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
||||
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
|
||||
UNUSED MapPlatformData *Data) {
|
||||
int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
|
||||
if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
|
||||
dieOnMapUnmapError();
|
||||
reportProtectError(Addr, Size, Prot);
|
||||
}
|
||||
|
||||
// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
|
||||
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
|
||||
UNUSED MapPlatformData *Data) {
|
||||
void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
|
||||
|
@ -104,12 +109,14 @@ enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
|
|||
}
|
||||
|
||||
bool HybridMutex::tryLock() {
|
||||
return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
|
||||
return atomic_compare_exchange_strong(&M, Unlocked, Locked,
|
||||
memory_order_acquire) == Unlocked;
|
||||
}
|
||||
|
||||
// The following is based on https://akkadia.org/drepper/futex.pdf.
|
||||
void HybridMutex::lockSlow() {
|
||||
u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
|
||||
u32 V = atomic_compare_exchange_strong(&M, Unlocked, Locked,
|
||||
memory_order_acquire);
|
||||
if (V == Unlocked)
|
||||
return;
|
||||
if (V != Sleeping)
|
||||
|
@ -197,39 +204,6 @@ bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
|
|||
extern "C" WEAK int async_safe_write_log(int pri, const char *tag,
|
||||
const char *msg);
|
||||
|
||||
static uptr GetRSSFromBuffer(const char *Buf) {
|
||||
// The format of the file is:
|
||||
// 1084 89 69 11 0 79 0
|
||||
// We need the second number which is RSS in pages.
|
||||
const char *Pos = Buf;
|
||||
// Skip the first number.
|
||||
while (*Pos >= '0' && *Pos <= '9')
|
||||
Pos++;
|
||||
// Skip whitespaces.
|
||||
while (!(*Pos >= '0' && *Pos <= '9') && *Pos != 0)
|
||||
Pos++;
|
||||
// Read the number.
|
||||
u64 Rss = 0;
|
||||
for (; *Pos >= '0' && *Pos <= '9'; Pos++)
|
||||
Rss = Rss * 10 + static_cast<u64>(*Pos) - '0';
|
||||
return static_cast<uptr>(Rss * getPageSizeCached());
|
||||
}
|
||||
|
||||
uptr GetRSS() {
|
||||
// TODO: We currently use sanitizer_common's GetRSS which reads the
|
||||
// RSS from /proc/self/statm by default. We might want to
|
||||
// call getrusage directly, even if it's less accurate.
|
||||
auto Fd = open("/proc/self/statm", O_RDONLY);
|
||||
char Buf[64];
|
||||
s64 Len = read(Fd, Buf, sizeof(Buf) - 1);
|
||||
close(Fd);
|
||||
if (Len <= 0)
|
||||
return 0;
|
||||
Buf[Len] = 0;
|
||||
|
||||
return GetRSSFromBuffer(Buf);
|
||||
}
|
||||
|
||||
void outputRaw(const char *Buffer) {
|
||||
if (&async_safe_write_log) {
|
||||
constexpr s32 AndroidLogInfo = 4;
|
||||
|
|
125
Telegram/ThirdParty/scudo/local_cache.h
vendored
125
Telegram/ThirdParty/scudo/local_cache.h
vendored
|
@ -22,80 +22,13 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
|
|||
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
|
||||
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
|
||||
|
||||
struct TransferBatch {
|
||||
static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
|
||||
void setFromArray(CompactPtrT *Array, u16 N) {
|
||||
DCHECK_LE(N, MaxNumCached);
|
||||
Count = N;
|
||||
memcpy(Batch, Array, sizeof(Batch[0]) * Count);
|
||||
}
|
||||
void appendFromArray(CompactPtrT *Array, u16 N) {
|
||||
DCHECK_LE(N, MaxNumCached - Count);
|
||||
memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
|
||||
// u16 will be promoted to int by arithmetic type conversion.
|
||||
Count = static_cast<u16>(Count + N);
|
||||
}
|
||||
void appendFromTransferBatch(TransferBatch *B, u16 N) {
|
||||
DCHECK_LE(N, MaxNumCached - Count);
|
||||
DCHECK_GE(B->Count, N);
|
||||
// Append from the back of `B`.
|
||||
memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
|
||||
// u16 will be promoted to int by arithmetic type conversion.
|
||||
Count = static_cast<u16>(Count + N);
|
||||
B->Count = static_cast<u16>(B->Count - N);
|
||||
}
|
||||
void clear() { Count = 0; }
|
||||
void add(CompactPtrT P) {
|
||||
DCHECK_LT(Count, MaxNumCached);
|
||||
Batch[Count++] = P;
|
||||
}
|
||||
void copyToArray(CompactPtrT *Array) const {
|
||||
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
|
||||
}
|
||||
u16 getCount() const { return Count; }
|
||||
bool isEmpty() const { return Count == 0U; }
|
||||
CompactPtrT get(u16 I) const {
|
||||
DCHECK_LE(I, Count);
|
||||
return Batch[I];
|
||||
}
|
||||
static u16 getMaxCached(uptr Size) {
|
||||
return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
|
||||
}
|
||||
TransferBatch *Next;
|
||||
|
||||
private:
|
||||
CompactPtrT Batch[MaxNumCached];
|
||||
u16 Count;
|
||||
};
|
||||
|
||||
// A BatchGroup is used to collect blocks. Each group has a group id to
|
||||
// identify the group kind of contained blocks.
|
||||
struct BatchGroup {
|
||||
// `Next` is used by IntrusiveList.
|
||||
BatchGroup *Next;
|
||||
// The compact base address of each group
|
||||
uptr CompactPtrGroupBase;
|
||||
// Cache value of TransferBatch::getMaxCached()
|
||||
u16 MaxCachedPerBatch;
|
||||
// Number of blocks pushed into this group. This is an increment-only
|
||||
// counter.
|
||||
uptr PushedBlocks;
|
||||
// This is used to track how many bytes are not in-use since last time we
|
||||
// tried to release pages.
|
||||
uptr BytesInBGAtLastCheckpoint;
|
||||
// Blocks are managed by TransferBatch in a list.
|
||||
SinglyLinkedList<TransferBatch> Batches;
|
||||
};
|
||||
|
||||
static_assert(sizeof(BatchGroup) <= sizeof(TransferBatch),
|
||||
"BatchGroup uses the same class size as TransferBatch");
|
||||
|
||||
void init(GlobalStats *S, SizeClassAllocator *A) {
|
||||
DCHECK(isEmpty());
|
||||
Stats.init();
|
||||
if (LIKELY(S))
|
||||
S->link(&Stats);
|
||||
Allocator = A;
|
||||
initCache();
|
||||
}
|
||||
|
||||
void destroy(GlobalStats *S) {
|
||||
|
@ -108,7 +41,9 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
|
|||
DCHECK_LT(ClassId, NumClasses);
|
||||
PerClass *C = &PerClassArray[ClassId];
|
||||
if (C->Count == 0) {
|
||||
if (UNLIKELY(!refill(C, ClassId)))
|
||||
// Refill half of the number of max cached.
|
||||
DCHECK_GT(C->MaxCount / 2, 0U);
|
||||
if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
|
||||
return nullptr;
|
||||
DCHECK_GT(C->Count, 0);
|
||||
}
|
||||
|
@ -125,9 +60,6 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
|
|||
bool deallocate(uptr ClassId, void *P) {
|
||||
CHECK_LT(ClassId, NumClasses);
|
||||
PerClass *C = &PerClassArray[ClassId];
|
||||
// We still have to initialize the cache in the event that the first heap
|
||||
// operation in a thread is a deallocation.
|
||||
initCacheMaybe(C);
|
||||
|
||||
// If the cache is full, drain half of blocks back to the main allocator.
|
||||
const bool NeedToDrainCache = C->Count == C->MaxCount;
|
||||
|
@ -151,7 +83,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
|
|||
}
|
||||
|
||||
void drain() {
|
||||
// Drain BatchClassId last as createBatch can refill it.
|
||||
// Drain BatchClassId last as it may be needed while draining normal blocks.
|
||||
for (uptr I = 0; I < NumClasses; ++I) {
|
||||
if (I == BatchClassId)
|
||||
continue;
|
||||
|
@ -163,19 +95,11 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
|
|||
DCHECK(isEmpty());
|
||||
}
|
||||
|
||||
TransferBatch *createBatch(uptr ClassId, void *B) {
|
||||
if (ClassId != BatchClassId)
|
||||
B = allocate(BatchClassId);
|
||||
void *getBatchClassBlock() {
|
||||
void *B = allocate(BatchClassId);
|
||||
if (UNLIKELY(!B))
|
||||
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
|
||||
return reinterpret_cast<TransferBatch *>(B);
|
||||
}
|
||||
|
||||
BatchGroup *createGroup() {
|
||||
void *Ptr = allocate(BatchClassId);
|
||||
if (UNLIKELY(!Ptr))
|
||||
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
|
||||
return reinterpret_cast<BatchGroup *>(Ptr);
|
||||
return B;
|
||||
}
|
||||
|
||||
LocalStats &getStats() { return Stats; }
|
||||
|
@ -203,6 +127,11 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
|
|||
Str->append(" No block is cached.\n");
|
||||
}
|
||||
|
||||
static u16 getMaxCached(uptr Size) {
|
||||
return Min(SizeClassMap::MaxNumCachedHint,
|
||||
SizeClassMap::getMaxCachedHint(Size));
|
||||
}
|
||||
|
||||
private:
|
||||
static const uptr NumClasses = SizeClassMap::NumClasses;
|
||||
static const uptr BatchClassId = SizeClassMap::BatchClassId;
|
||||
|
@ -211,24 +140,17 @@ private:
|
|||
u16 MaxCount;
|
||||
// Note: ClassSize is zero for the transfer batch.
|
||||
uptr ClassSize;
|
||||
CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
|
||||
CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
|
||||
};
|
||||
PerClass PerClassArray[NumClasses] = {};
|
||||
LocalStats Stats;
|
||||
SizeClassAllocator *Allocator = nullptr;
|
||||
|
||||
ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
|
||||
if (LIKELY(C->MaxCount))
|
||||
return;
|
||||
initCache();
|
||||
DCHECK_NE(C->MaxCount, 0U);
|
||||
}
|
||||
|
||||
NOINLINE void initCache() {
|
||||
for (uptr I = 0; I < NumClasses; I++) {
|
||||
PerClass *P = &PerClassArray[I];
|
||||
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
|
||||
P->MaxCount = static_cast<u16>(2 * TransferBatch::getMaxCached(Size));
|
||||
P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
|
||||
if (I != BatchClassId) {
|
||||
P->ClassSize = Size;
|
||||
} else {
|
||||
|
@ -244,17 +166,12 @@ private:
|
|||
deallocate(BatchClassId, B);
|
||||
}
|
||||
|
||||
NOINLINE bool refill(PerClass *C, uptr ClassId) {
|
||||
initCacheMaybe(C);
|
||||
TransferBatch *B = Allocator->popBatch(this, ClassId);
|
||||
if (UNLIKELY(!B))
|
||||
return false;
|
||||
DCHECK_GT(B->getCount(), 0);
|
||||
C->Count = B->getCount();
|
||||
B->copyToArray(C->Chunks);
|
||||
B->clear();
|
||||
destroyBatch(ClassId, B);
|
||||
return true;
|
||||
NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
|
||||
const u16 NumBlocksRefilled =
|
||||
Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
|
||||
DCHECK_LE(NumBlocksRefilled, MaxRefill);
|
||||
C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
|
||||
return NumBlocksRefilled != 0;
|
||||
}
|
||||
|
||||
NOINLINE void drain(PerClass *C, uptr ClassId) {
|
||||
|
|
5
Telegram/ThirdParty/scudo/mem_map.h
vendored
5
Telegram/ThirdParty/scudo/mem_map.h
vendored
|
@ -22,6 +22,7 @@
|
|||
#include "trusty.h"
|
||||
|
||||
#include "mem_map_fuchsia.h"
|
||||
#include "mem_map_linux.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
|
@ -73,10 +74,10 @@ private:
|
|||
};
|
||||
|
||||
#if SCUDO_LINUX
|
||||
using ReservedMemoryT = ReservedMemoryDefault;
|
||||
using ReservedMemoryT = ReservedMemoryLinux;
|
||||
using MemMapT = ReservedMemoryT::MemMapT;
|
||||
#elif SCUDO_FUCHSIA
|
||||
using ReservedMemoryT = ReservedMemoryDefault;
|
||||
using ReservedMemoryT = ReservedMemoryFuchsia;
|
||||
using MemMapT = ReservedMemoryT::MemMapT;
|
||||
#elif SCUDO_TRUSTY
|
||||
using ReservedMemoryT = ReservedMemoryDefault;
|
||||
|
|
12
Telegram/ThirdParty/scudo/mem_map_fuchsia.cpp
vendored
12
Telegram/ThirdParty/scudo/mem_map_fuchsia.cpp
vendored
|
@ -41,7 +41,7 @@ static void setVmoName(zx_handle_t Vmo, const char *Name) {
|
|||
static uptr getRootVmarBase() {
|
||||
static atomic_uptr CachedResult = {0};
|
||||
|
||||
uptr Result = atomic_load_relaxed(&CachedResult);
|
||||
uptr Result = atomic_load(&CachedResult, memory_order_acquire);
|
||||
if (UNLIKELY(!Result)) {
|
||||
zx_info_vmar_t VmarInfo;
|
||||
zx_status_t Status =
|
||||
|
@ -50,7 +50,7 @@ static uptr getRootVmarBase() {
|
|||
CHECK_EQ(Status, ZX_OK);
|
||||
CHECK_NE(VmarInfo.base, 0);
|
||||
|
||||
atomic_store_relaxed(&CachedResult, VmarInfo.base);
|
||||
atomic_store(&CachedResult, VmarInfo.base, memory_order_release);
|
||||
Result = VmarInfo.base;
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ static uptr getRootVmarBase() {
|
|||
static zx_handle_t getPlaceholderVmo() {
|
||||
static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
|
||||
|
||||
zx_handle_t Vmo = atomic_load_relaxed(&StoredVmo);
|
||||
zx_handle_t Vmo = atomic_load(&StoredVmo, memory_order_acquire);
|
||||
if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
|
||||
// Create a zero-sized placeholder VMO.
|
||||
zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
|
||||
|
@ -72,9 +72,9 @@ static zx_handle_t getPlaceholderVmo() {
|
|||
|
||||
// Atomically store its handle. If some other thread wins the race, use its
|
||||
// handle and discard ours.
|
||||
zx_handle_t OldValue =
|
||||
atomic_compare_exchange(&StoredVmo, ZX_HANDLE_INVALID, Vmo);
|
||||
if (OldValue != ZX_HANDLE_INVALID) {
|
||||
zx_handle_t OldValue = atomic_compare_exchange_strong(
|
||||
&StoredVmo, ZX_HANDLE_INVALID, Vmo, memory_order_acq_rel);
|
||||
if (UNLIKELY(OldValue != ZX_HANDLE_INVALID)) {
|
||||
Status = _zx_handle_close(Vmo);
|
||||
CHECK_EQ(Status, ZX_OK);
|
||||
|
||||
|
|
153
Telegram/ThirdParty/scudo/mem_map_linux.cpp
vendored
Normal file
153
Telegram/ThirdParty/scudo/mem_map_linux.cpp
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
//===-- mem_map_linux.cpp ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
#if SCUDO_LINUX
|
||||
|
||||
#include "mem_map_linux.h"
|
||||
|
||||
#include "common.h"
|
||||
#include "internal_defs.h"
|
||||
#include "linux.h"
|
||||
#include "mutex.h"
|
||||
#include "report_linux.h"
|
||||
#include "string_utils.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <linux/futex.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/time.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#if SCUDO_ANDROID
|
||||
// TODO(chiahungduan): Review if we still need the followings macros.
|
||||
#include <sys/prctl.h>
|
||||
// Definitions of prctl arguments to set a vma name in Android kernels.
|
||||
#define ANDROID_PR_SET_VMA 0x53564d41
|
||||
#define ANDROID_PR_SET_VMA_ANON_NAME 0
|
||||
#endif
|
||||
|
||||
namespace scudo {
|
||||
|
||||
static void *mmapWrapper(uptr Addr, uptr Size, const char *Name, uptr Flags) {
|
||||
int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
int MmapProt;
|
||||
if (Flags & MAP_NOACCESS) {
|
||||
MmapFlags |= MAP_NORESERVE;
|
||||
MmapProt = PROT_NONE;
|
||||
} else {
|
||||
MmapProt = PROT_READ | PROT_WRITE;
|
||||
}
|
||||
#if defined(__aarch64__)
|
||||
#ifndef PROT_MTE
|
||||
#define PROT_MTE 0x20
|
||||
#endif
|
||||
if (Flags & MAP_MEMTAG)
|
||||
MmapProt |= PROT_MTE;
|
||||
#endif
|
||||
if (Addr)
|
||||
MmapFlags |= MAP_FIXED;
|
||||
void *P =
|
||||
mmap(reinterpret_cast<void *>(Addr), Size, MmapProt, MmapFlags, -1, 0);
|
||||
if (P == MAP_FAILED) {
|
||||
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
|
||||
reportMapError(errno == ENOMEM ? Size : 0);
|
||||
return nullptr;
|
||||
}
|
||||
#if SCUDO_ANDROID
|
||||
if (Name)
|
||||
prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
|
||||
#else
|
||||
(void)Name;
|
||||
#endif
|
||||
|
||||
return P;
|
||||
}
|
||||
|
||||
bool MemMapLinux::mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags) {
|
||||
void *P = mmapWrapper(Addr, Size, Name, Flags);
|
||||
if (P == nullptr)
|
||||
return false;
|
||||
|
||||
MapBase = reinterpret_cast<uptr>(P);
|
||||
MapCapacity = Size;
|
||||
return true;
|
||||
}
|
||||
|
||||
void MemMapLinux::unmapImpl(uptr Addr, uptr Size) {
|
||||
// If we unmap all the pages, also mark `MapBase` to 0 to indicate invalid
|
||||
// status.
|
||||
if (Size == MapCapacity) {
|
||||
MapBase = MapCapacity = 0;
|
||||
} else {
|
||||
// This is partial unmap and is unmapping the pages from the beginning,
|
||||
// shift `MapBase` to the new base.
|
||||
if (MapBase == Addr)
|
||||
MapBase = Addr + Size;
|
||||
MapCapacity -= Size;
|
||||
}
|
||||
|
||||
if (munmap(reinterpret_cast<void *>(Addr), Size) != 0)
|
||||
reportUnmapError(Addr, Size);
|
||||
}
|
||||
|
||||
bool MemMapLinux::remapImpl(uptr Addr, uptr Size, const char *Name,
|
||||
uptr Flags) {
|
||||
void *P = mmapWrapper(Addr, Size, Name, Flags);
|
||||
if (reinterpret_cast<uptr>(P) != Addr)
|
||||
reportMapError();
|
||||
return true;
|
||||
}
|
||||
|
||||
void MemMapLinux::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
|
||||
int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
|
||||
if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
|
||||
reportProtectError(Addr, Size, Prot);
|
||||
}
|
||||
|
||||
void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
|
||||
void *Addr = reinterpret_cast<void *>(From);
|
||||
|
||||
while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
|
||||
}
|
||||
}
|
||||
|
||||
bool ReservedMemoryLinux::createImpl(uptr Addr, uptr Size, const char *Name,
|
||||
uptr Flags) {
|
||||
ReservedMemoryLinux::MemMapT MemMap;
|
||||
if (!MemMap.map(Addr, Size, Name, Flags | MAP_NOACCESS))
|
||||
return false;
|
||||
|
||||
MapBase = MemMap.getBase();
|
||||
MapCapacity = MemMap.getCapacity();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ReservedMemoryLinux::releaseImpl() {
|
||||
if (munmap(reinterpret_cast<void *>(getBase()), getCapacity()) != 0)
|
||||
reportUnmapError(getBase(), getCapacity());
|
||||
}
|
||||
|
||||
ReservedMemoryLinux::MemMapT ReservedMemoryLinux::dispatchImpl(uptr Addr,
|
||||
uptr Size) {
|
||||
return ReservedMemoryLinux::MemMapT(Addr, Size);
|
||||
}
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_LINUX
|
67
Telegram/ThirdParty/scudo/mem_map_linux.h
vendored
Normal file
67
Telegram/ThirdParty/scudo/mem_map_linux.h
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
//===-- mem_map_linux.h -----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SCUDO_MEM_MAP_LINUX_H_
|
||||
#define SCUDO_MEM_MAP_LINUX_H_
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
#if SCUDO_LINUX
|
||||
|
||||
#include "common.h"
|
||||
#include "mem_map_base.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
class MemMapLinux final : public MemMapBase<MemMapLinux> {
|
||||
public:
|
||||
constexpr MemMapLinux() = default;
|
||||
MemMapLinux(uptr Base, uptr Capacity)
|
||||
: MapBase(Base), MapCapacity(Capacity) {}
|
||||
|
||||
// Impls for base functions.
|
||||
bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
|
||||
void unmapImpl(uptr Addr, uptr Size);
|
||||
bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
|
||||
void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
|
||||
void releasePagesToOSImpl(uptr From, uptr Size) {
|
||||
return releaseAndZeroPagesToOSImpl(From, Size);
|
||||
}
|
||||
void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
|
||||
uptr getBaseImpl() { return MapBase; }
|
||||
uptr getCapacityImpl() { return MapCapacity; }
|
||||
|
||||
private:
|
||||
uptr MapBase = 0;
|
||||
uptr MapCapacity = 0;
|
||||
};
|
||||
|
||||
// This will be deprecated when every allocator has been supported by each
|
||||
// platform's `MemMap` implementation.
|
||||
class ReservedMemoryLinux final
|
||||
: public ReservedMemory<ReservedMemoryLinux, MemMapLinux> {
|
||||
public:
|
||||
// The following two are the Impls for function in `MemMapBase`.
|
||||
uptr getBaseImpl() { return MapBase; }
|
||||
uptr getCapacityImpl() { return MapCapacity; }
|
||||
|
||||
// These threes are specific to `ReservedMemory`.
|
||||
bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
|
||||
void releaseImpl();
|
||||
MemMapT dispatchImpl(uptr Addr, uptr Size);
|
||||
|
||||
private:
|
||||
uptr MapBase = 0;
|
||||
uptr MapCapacity = 0;
|
||||
};
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_LINUX
|
||||
|
||||
#endif // SCUDO_MEM_MAP_LINUX_H_
|
19
Telegram/ThirdParty/scudo/mutex.h
vendored
19
Telegram/ThirdParty/scudo/mutex.h
vendored
|
@ -35,7 +35,7 @@ public:
|
|||
#pragma nounroll
|
||||
#endif
|
||||
for (u8 I = 0U; I < NumberOfTries; I++) {
|
||||
yieldProcessor(NumberOfYields);
|
||||
delayLoop();
|
||||
if (tryLock())
|
||||
return;
|
||||
}
|
||||
|
@ -53,10 +53,23 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
void delayLoop() {
|
||||
// The value comes from the average time spent in accessing caches (which
|
||||
// are the fastest operations) so that we are unlikely to wait too long for
|
||||
// fast operations.
|
||||
constexpr u32 SpinTimes = 16;
|
||||
volatile u32 V = 0;
|
||||
for (u32 I = 0; I < SpinTimes; ++I) {
|
||||
u32 Tmp = V + 1;
|
||||
V = Tmp;
|
||||
}
|
||||
}
|
||||
|
||||
void assertHeldImpl();
|
||||
|
||||
static constexpr u8 NumberOfTries = 8U;
|
||||
static constexpr u8 NumberOfYields = 8U;
|
||||
// TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and
|
||||
// secondary allocator have different allocation times.
|
||||
static constexpr u8 NumberOfTries = 32U;
|
||||
|
||||
#if SCUDO_LINUX
|
||||
atomic_u32 M = {};
|
||||
|
|
2
Telegram/ThirdParty/scudo/options.h
vendored
2
Telegram/ThirdParty/scudo/options.h
vendored
|
@ -38,7 +38,7 @@ struct Options {
|
|||
}
|
||||
};
|
||||
|
||||
template <typename Config> bool useMemoryTagging(Options Options) {
|
||||
template <typename Config> bool useMemoryTagging(const Options &Options) {
|
||||
return allocatorSupportsMemoryTagging<Config>() &&
|
||||
Options.get(OptionBit::UseMemoryTagging);
|
||||
}
|
||||
|
|
14
Telegram/ThirdParty/scudo/platform.h
vendored
14
Telegram/ThirdParty/scudo/platform.h
vendored
|
@ -63,6 +63,20 @@
|
|||
#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
|
||||
#endif
|
||||
|
||||
// Use smaller table sizes for fuzzing in order to reduce input size.
|
||||
// Trusty just has less available memory.
|
||||
#ifndef SCUDO_SMALL_STACK_DEPOT
|
||||
#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
|
||||
#define SCUDO_SMALL_STACK_DEPOT 1
|
||||
#else
|
||||
#define SCUDO_SMALL_STACK_DEPOT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef SCUDO_ENABLE_HOOKS
|
||||
#define SCUDO_ENABLE_HOOKS 0
|
||||
#endif
|
||||
|
||||
#ifndef SCUDO_MIN_ALIGNMENT_LOG
|
||||
// We force malloc-type functions to be aligned to std::max_align_t, but there
|
||||
// is no reason why the minimum alignment for all other functions can't be 8
|
||||
|
|
298
Telegram/ThirdParty/scudo/primary32.h
vendored
298
Telegram/ThirdParty/scudo/primary32.h
vendored
|
@ -9,6 +9,7 @@
|
|||
#ifndef SCUDO_PRIMARY32_H_
|
||||
#define SCUDO_PRIMARY32_H_
|
||||
|
||||
#include "allocator_common.h"
|
||||
#include "bytemap.h"
|
||||
#include "common.h"
|
||||
#include "list.h"
|
||||
|
@ -53,12 +54,15 @@ public:
|
|||
"");
|
||||
typedef SizeClassAllocator32<Config> ThisT;
|
||||
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
|
||||
typedef typename CacheT::TransferBatch TransferBatch;
|
||||
typedef typename CacheT::BatchGroup BatchGroup;
|
||||
typedef TransferBatch<ThisT> TransferBatchT;
|
||||
typedef BatchGroup<ThisT> BatchGroupT;
|
||||
|
||||
static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
|
||||
"BatchGroupT uses the same class size as TransferBatchT");
|
||||
|
||||
static uptr getSizeByClassId(uptr ClassId) {
|
||||
return (ClassId == SizeClassMap::BatchClassId)
|
||||
? sizeof(TransferBatch)
|
||||
? sizeof(TransferBatchT)
|
||||
: SizeClassMap::getSizeByClassId(ClassId);
|
||||
}
|
||||
|
||||
|
@ -126,7 +130,7 @@ public:
|
|||
SizeClassInfo *Sci = getSizeClassInfo(I);
|
||||
ScopedLock L1(Sci->Mutex);
|
||||
uptr TotalBlocks = 0;
|
||||
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
|
||||
for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
|
||||
// `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
|
||||
BatchClassUsedInFreeLists += BG.Batches.size() + 1;
|
||||
for (const auto &It : BG.Batches)
|
||||
|
@ -141,7 +145,7 @@ public:
|
|||
SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
|
||||
ScopedLock L1(Sci->Mutex);
|
||||
uptr TotalBlocks = 0;
|
||||
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
|
||||
for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
|
||||
if (LIKELY(!BG.Batches.empty())) {
|
||||
for (const auto &It : BG.Batches)
|
||||
TotalBlocks += It.getCount();
|
||||
|
@ -187,11 +191,30 @@ public:
|
|||
return BlockSize > PageSize;
|
||||
}
|
||||
|
||||
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
|
||||
// Note that the `MaxBlockCount` will be used when we support arbitrary blocks
|
||||
// count. Now it's the same as the number of blocks stored in the
|
||||
// `TransferBatch`.
|
||||
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
|
||||
UNUSED const u16 MaxBlockCount) {
|
||||
TransferBatchT *B = popBatch(C, ClassId);
|
||||
if (!B)
|
||||
return 0;
|
||||
|
||||
const u16 Count = B->getCount();
|
||||
DCHECK_GT(Count, 0U);
|
||||
B->moveToArray(ToArray);
|
||||
|
||||
if (ClassId != SizeClassMap::BatchClassId)
|
||||
C->deallocate(SizeClassMap::BatchClassId, B);
|
||||
|
||||
return Count;
|
||||
}
|
||||
|
||||
TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
|
||||
DCHECK_LT(ClassId, NumClasses);
|
||||
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
|
||||
ScopedLock L(Sci->Mutex);
|
||||
TransferBatch *B = popBatchImpl(C, ClassId, Sci);
|
||||
TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
|
||||
if (UNLIKELY(!B)) {
|
||||
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
|
||||
return nullptr;
|
||||
|
@ -311,6 +334,18 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void getFragmentationInfo(ScopedString *Str) {
|
||||
Str->append(
|
||||
"Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
|
||||
getPageSizeCached());
|
||||
|
||||
for (uptr I = 1; I < NumClasses; I++) {
|
||||
SizeClassInfo *Sci = getSizeClassInfo(I);
|
||||
ScopedLock L(Sci->Mutex);
|
||||
getSizeClassFragmentationInfo(Sci, I, Str);
|
||||
}
|
||||
}
|
||||
|
||||
bool setOption(Option O, sptr Value) {
|
||||
if (O == Option::ReleaseInterval) {
|
||||
const s32 Interval = Max(Min(static_cast<s32>(Value),
|
||||
|
@ -369,7 +404,7 @@ private:
|
|||
};
|
||||
|
||||
struct BlocksInfo {
|
||||
SinglyLinkedList<BatchGroup> BlockList = {};
|
||||
SinglyLinkedList<BatchGroupT> BlockList = {};
|
||||
uptr PoppedBlocks = 0;
|
||||
uptr PushedBlocks = 0;
|
||||
};
|
||||
|
@ -493,11 +528,11 @@ private:
|
|||
// reusable and don't need additional space for them.
|
||||
|
||||
Sci->FreeListInfo.PushedBlocks += Size;
|
||||
BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
|
||||
BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
|
||||
|
||||
if (BG == nullptr) {
|
||||
// Construct `BatchGroup` on the last element.
|
||||
BG = reinterpret_cast<BatchGroup *>(
|
||||
BG = reinterpret_cast<BatchGroupT *>(
|
||||
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
|
||||
--Size;
|
||||
BG->Batches.clear();
|
||||
|
@ -508,8 +543,8 @@ private:
|
|||
// from `CreateGroup` in `pushBlocksImpl`
|
||||
BG->PushedBlocks = 1;
|
||||
BG->BytesInBGAtLastCheckpoint = 0;
|
||||
BG->MaxCachedPerBatch = TransferBatch::getMaxCached(
|
||||
getSizeByClassId(SizeClassMap::BatchClassId));
|
||||
BG->MaxCachedPerBatch =
|
||||
CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
|
||||
|
||||
Sci->FreeListInfo.BlockList.push_front(BG);
|
||||
}
|
||||
|
@ -522,7 +557,7 @@ private:
|
|||
// 2. Only 1 block is pushed when the freelist is empty.
|
||||
if (BG->Batches.empty()) {
|
||||
// Construct the `TransferBatch` on the last element.
|
||||
TransferBatch *TB = reinterpret_cast<TransferBatch *>(
|
||||
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
|
||||
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
|
||||
TB->clear();
|
||||
// As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
|
||||
|
@ -537,14 +572,14 @@ private:
|
|||
BG->Batches.push_front(TB);
|
||||
}
|
||||
|
||||
TransferBatch *CurBatch = BG->Batches.front();
|
||||
TransferBatchT *CurBatch = BG->Batches.front();
|
||||
DCHECK_NE(CurBatch, nullptr);
|
||||
|
||||
for (u32 I = 0; I < Size;) {
|
||||
u16 UnusedSlots =
|
||||
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
|
||||
if (UnusedSlots == 0) {
|
||||
CurBatch = reinterpret_cast<TransferBatch *>(
|
||||
CurBatch = reinterpret_cast<TransferBatchT *>(
|
||||
decompactPtr(SizeClassMap::BatchClassId, Array[I]));
|
||||
CurBatch->clear();
|
||||
// Self-contained
|
||||
|
@ -588,24 +623,25 @@ private:
|
|||
DCHECK_GT(Size, 0U);
|
||||
|
||||
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
|
||||
BatchGroup *BG = C->createGroup();
|
||||
BatchGroupT *BG =
|
||||
reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
|
||||
BG->Batches.clear();
|
||||
TransferBatch *TB = C->createBatch(ClassId, nullptr);
|
||||
TransferBatchT *TB =
|
||||
reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
|
||||
TB->clear();
|
||||
|
||||
BG->CompactPtrGroupBase = CompactPtrGroupBase;
|
||||
BG->Batches.push_front(TB);
|
||||
BG->PushedBlocks = 0;
|
||||
BG->BytesInBGAtLastCheckpoint = 0;
|
||||
BG->MaxCachedPerBatch =
|
||||
TransferBatch::getMaxCached(getSizeByClassId(ClassId));
|
||||
BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
|
||||
|
||||
return BG;
|
||||
};
|
||||
|
||||
auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size) {
|
||||
SinglyLinkedList<TransferBatch> &Batches = BG->Batches;
|
||||
TransferBatch *CurBatch = Batches.front();
|
||||
auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
|
||||
SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
|
||||
TransferBatchT *CurBatch = Batches.front();
|
||||
DCHECK_NE(CurBatch, nullptr);
|
||||
|
||||
for (u32 I = 0; I < Size;) {
|
||||
|
@ -613,9 +649,8 @@ private:
|
|||
u16 UnusedSlots =
|
||||
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
|
||||
if (UnusedSlots == 0) {
|
||||
CurBatch = C->createBatch(
|
||||
ClassId,
|
||||
reinterpret_cast<void *>(decompactPtr(ClassId, Array[I])));
|
||||
CurBatch =
|
||||
reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
|
||||
CurBatch->clear();
|
||||
Batches.push_front(CurBatch);
|
||||
UnusedSlots = BG->MaxCachedPerBatch;
|
||||
|
@ -630,11 +665,11 @@ private:
|
|||
};
|
||||
|
||||
Sci->FreeListInfo.PushedBlocks += Size;
|
||||
BatchGroup *Cur = Sci->FreeListInfo.BlockList.front();
|
||||
BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
|
||||
|
||||
// In the following, `Cur` always points to the BatchGroup for blocks that
|
||||
// will be pushed next. `Prev` is the element right before `Cur`.
|
||||
BatchGroup *Prev = nullptr;
|
||||
BatchGroupT *Prev = nullptr;
|
||||
|
||||
while (Cur != nullptr &&
|
||||
compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
|
||||
|
@ -695,22 +730,22 @@ private:
|
|||
// group id will be considered first.
|
||||
//
|
||||
// The region mutex needs to be held while calling this method.
|
||||
TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
|
||||
TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
|
||||
REQUIRES(Sci->Mutex) {
|
||||
if (Sci->FreeListInfo.BlockList.empty())
|
||||
return nullptr;
|
||||
|
||||
SinglyLinkedList<TransferBatch> &Batches =
|
||||
SinglyLinkedList<TransferBatchT> &Batches =
|
||||
Sci->FreeListInfo.BlockList.front()->Batches;
|
||||
|
||||
if (Batches.empty()) {
|
||||
DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
|
||||
BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
|
||||
BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
|
||||
Sci->FreeListInfo.BlockList.pop_front();
|
||||
|
||||
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
|
||||
// `TransferBatch` with single block.
|
||||
TransferBatch *TB = reinterpret_cast<TransferBatch *>(BG);
|
||||
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
|
||||
TB->clear();
|
||||
TB->add(
|
||||
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
|
||||
|
@ -718,13 +753,13 @@ private:
|
|||
return TB;
|
||||
}
|
||||
|
||||
TransferBatch *B = Batches.front();
|
||||
TransferBatchT *B = Batches.front();
|
||||
Batches.pop_front();
|
||||
DCHECK_NE(B, nullptr);
|
||||
DCHECK_GT(B->getCount(), 0U);
|
||||
|
||||
if (Batches.empty()) {
|
||||
BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
|
||||
BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
|
||||
Sci->FreeListInfo.BlockList.pop_front();
|
||||
|
||||
// We don't keep BatchGroup with zero blocks to avoid empty-checking while
|
||||
|
@ -763,7 +798,7 @@ private:
|
|||
}
|
||||
|
||||
const uptr Size = getSizeByClassId(ClassId);
|
||||
const u16 MaxCount = TransferBatch::getMaxCached(Size);
|
||||
const u16 MaxCount = CacheT::getMaxCached(Size);
|
||||
DCHECK_GT(MaxCount, 0U);
|
||||
// The maximum number of blocks we should carve in the region is dictated
|
||||
// by the maximum number of batches we want to fill, and the amount of
|
||||
|
@ -776,7 +811,7 @@ private:
|
|||
DCHECK_GT(NumberOfBlocks, 0U);
|
||||
|
||||
constexpr u32 ShuffleArraySize =
|
||||
MaxNumBatches * TransferBatch::MaxNumCached;
|
||||
MaxNumBatches * TransferBatchT::MaxNumCached;
|
||||
// Fill the transfer batches and put them in the size-class freelist. We
|
||||
// need to randomize the blocks for security purposes, so we first fill a
|
||||
// local array that we then shuffle before populating the batches.
|
||||
|
@ -856,11 +891,60 @@ private:
|
|||
PushedBytesDelta >> 10);
|
||||
}
|
||||
|
||||
void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
|
||||
ScopedString *Str) REQUIRES(Sci->Mutex) {
|
||||
const uptr BlockSize = getSizeByClassId(ClassId);
|
||||
const uptr First = Sci->MinRegionIndex;
|
||||
const uptr Last = Sci->MaxRegionIndex;
|
||||
const uptr Base = First * RegionSize;
|
||||
const uptr NumberOfRegions = Last - First + 1U;
|
||||
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
|
||||
ScopedLock L(ByteMapMutex);
|
||||
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
|
||||
};
|
||||
|
||||
FragmentationRecorder Recorder;
|
||||
if (!Sci->FreeListInfo.BlockList.empty()) {
|
||||
PageReleaseContext Context =
|
||||
markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
|
||||
ReleaseToOS::ForceAll);
|
||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
||||
}
|
||||
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
|
||||
const uptr InUseBlocks =
|
||||
Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
|
||||
uptr AllocatedPagesCount = 0;
|
||||
if (TotalBlocks != 0U) {
|
||||
for (uptr I = 0; I < NumberOfRegions; ++I) {
|
||||
if (SkipRegion(I))
|
||||
continue;
|
||||
AllocatedPagesCount += RegionSize / PageSize;
|
||||
}
|
||||
|
||||
DCHECK_NE(AllocatedPagesCount, 0U);
|
||||
}
|
||||
|
||||
DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
|
||||
const uptr InUsePages =
|
||||
AllocatedPagesCount - Recorder.getReleasedPagesCount();
|
||||
const uptr InUseBytes = InUsePages * PageSize;
|
||||
|
||||
uptr Integral;
|
||||
uptr Fractional;
|
||||
computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
|
||||
&Fractional);
|
||||
Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
|
||||
"pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
|
||||
ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
|
||||
AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
|
||||
}
|
||||
|
||||
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
|
||||
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
|
||||
REQUIRES(Sci->Mutex) {
|
||||
const uptr BlockSize = getSizeByClassId(ClassId);
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
|
||||
DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
|
||||
const uptr BytesInFreeList =
|
||||
|
@ -871,6 +955,60 @@ private:
|
|||
if (UNLIKELY(BytesInFreeList == 0))
|
||||
return 0;
|
||||
|
||||
// ====================================================================== //
|
||||
// 1. Check if we have enough free blocks and if it's worth doing a page
|
||||
// release.
|
||||
// ====================================================================== //
|
||||
if (ReleaseType != ReleaseToOS::ForceAll &&
|
||||
!hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
|
||||
ReleaseType)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const uptr First = Sci->MinRegionIndex;
|
||||
const uptr Last = Sci->MaxRegionIndex;
|
||||
DCHECK_NE(Last, 0U);
|
||||
DCHECK_LE(First, Last);
|
||||
uptr TotalReleasedBytes = 0;
|
||||
const uptr Base = First * RegionSize;
|
||||
const uptr NumberOfRegions = Last - First + 1U;
|
||||
|
||||
// ==================================================================== //
|
||||
// 2. Mark the free blocks and we can tell which pages are in-use by
|
||||
// querying `PageReleaseContext`.
|
||||
// ==================================================================== //
|
||||
PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
|
||||
NumberOfRegions, ReleaseType);
|
||||
if (!Context.hasBlockMarked())
|
||||
return 0;
|
||||
|
||||
// ==================================================================== //
|
||||
// 3. Release the unused physical pages back to the OS.
|
||||
// ==================================================================== //
|
||||
ReleaseRecorder Recorder(Base);
|
||||
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
|
||||
ScopedLock L(ByteMapMutex);
|
||||
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
|
||||
};
|
||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
||||
|
||||
if (Recorder.getReleasedRangesCount() > 0) {
|
||||
Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
|
||||
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
|
||||
Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
|
||||
TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
|
||||
}
|
||||
Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
|
||||
|
||||
return TotalReleasedBytes;
|
||||
}
|
||||
|
||||
bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
|
||||
uptr BytesInFreeList, ReleaseToOS ReleaseType)
|
||||
REQUIRES(Sci->Mutex) {
|
||||
DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
|
||||
if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
|
||||
Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
|
||||
|
||||
|
@ -892,22 +1030,20 @@ private:
|
|||
// (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
|
||||
const uptr PushedBytesDelta =
|
||||
BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
|
||||
if (PushedBytesDelta < PageSize && ReleaseType != ReleaseToOS::ForceAll)
|
||||
return 0;
|
||||
if (PushedBytesDelta < PageSize)
|
||||
return false;
|
||||
|
||||
const bool CheckDensity =
|
||||
isSmallBlock(BlockSize) && ReleaseType != ReleaseToOS::ForceAll;
|
||||
// Releasing smaller blocks is expensive, so we want to make sure that a
|
||||
// significant amount of bytes are free, and that there has been a good
|
||||
// amount of batches pushed to the freelist before attempting to release.
|
||||
if (CheckDensity && ReleaseType == ReleaseToOS::Normal)
|
||||
if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
|
||||
if (PushedBytesDelta < Sci->AllocatedUser / 16U)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (ReleaseType == ReleaseToOS::Normal) {
|
||||
const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
|
||||
if (IntervalMs < 0)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
// The constant 8 here is selected from profiling some apps and the number
|
||||
// of unreleased pages in the large size classes is around 16 pages or
|
||||
|
@ -920,30 +1056,31 @@ private:
|
|||
static_cast<u64>(IntervalMs) * 1000000 >
|
||||
getMonotonicTimeFast()) {
|
||||
// Memory was returned recently.
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} // if (ReleaseType == ReleaseToOS::Normal)
|
||||
|
||||
const uptr First = Sci->MinRegionIndex;
|
||||
const uptr Last = Sci->MaxRegionIndex;
|
||||
DCHECK_NE(Last, 0U);
|
||||
DCHECK_LE(First, Last);
|
||||
uptr TotalReleasedBytes = 0;
|
||||
const uptr Base = First * RegionSize;
|
||||
const uptr NumberOfRegions = Last - First + 1U;
|
||||
const uptr GroupSize = (1U << GroupSizeLog);
|
||||
return true;
|
||||
}
|
||||
|
||||
PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
|
||||
const uptr BlockSize, const uptr Base,
|
||||
const uptr NumberOfRegions,
|
||||
ReleaseToOS ReleaseType)
|
||||
REQUIRES(Sci->Mutex) {
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
const uptr GroupSize = (1UL << GroupSizeLog);
|
||||
const uptr CurGroupBase =
|
||||
compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
|
||||
|
||||
ReleaseRecorder Recorder(Base);
|
||||
PageReleaseContext Context(BlockSize, NumberOfRegions,
|
||||
/*ReleaseSize=*/RegionSize);
|
||||
|
||||
auto DecompactPtr = [](CompactPtrT CompactPtr) {
|
||||
return reinterpret_cast<uptr>(CompactPtr);
|
||||
};
|
||||
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
|
||||
for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
|
||||
const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
|
||||
// The `GroupSize` may not be divided by `BlockSize`, which means there is
|
||||
// an unused space at the end of Region. Exclude that space to avoid
|
||||
|
@ -960,25 +1097,27 @@ private:
|
|||
BG.Batches.front()->getCount();
|
||||
const uptr BytesInBG = NumBlocks * BlockSize;
|
||||
|
||||
if (ReleaseType != ReleaseToOS::ForceAll &&
|
||||
BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
|
||||
BG.BytesInBGAtLastCheckpoint = BytesInBG;
|
||||
continue;
|
||||
}
|
||||
const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
|
||||
if (ReleaseType != ReleaseToOS::ForceAll && PushedBytesDelta < PageSize)
|
||||
continue;
|
||||
if (ReleaseType != ReleaseToOS::ForceAll) {
|
||||
if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
|
||||
BG.BytesInBGAtLastCheckpoint = BytesInBG;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Given the randomness property, we try to release the pages only if the
|
||||
// bytes used by free blocks exceed certain proportion of allocated
|
||||
// spaces.
|
||||
if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
|
||||
(100U - 1U - BlockSize / 16U)) {
|
||||
continue;
|
||||
const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
|
||||
if (PushedBytesDelta < PageSize)
|
||||
continue;
|
||||
|
||||
// Given the randomness property, we try to release the pages only if
|
||||
// the bytes used by free blocks exceed certain proportion of allocated
|
||||
// spaces.
|
||||
if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
|
||||
(100U - 1U - BlockSize / 16U)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Consider updating this after page release if `ReleaseRecorder`
|
||||
// can tell the releasd bytes in each group.
|
||||
// can tell the released bytes in each group.
|
||||
BG.BytesInBGAtLastCheckpoint = BytesInBG;
|
||||
|
||||
const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
|
||||
|
@ -1006,27 +1145,10 @@ private:
|
|||
// We may not be able to do the page release In a rare case that we may
|
||||
// fail on PageMap allocation.
|
||||
if (UNLIKELY(!Context.hasBlockMarked()))
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!Context.hasBlockMarked())
|
||||
return 0;
|
||||
|
||||
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
|
||||
ScopedLock L(ByteMapMutex);
|
||||
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
|
||||
};
|
||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
||||
|
||||
if (Recorder.getReleasedRangesCount() > 0) {
|
||||
Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
|
||||
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
|
||||
Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
|
||||
TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
|
||||
}
|
||||
Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
|
||||
|
||||
return TotalReleasedBytes;
|
||||
return Context;
|
||||
}
|
||||
|
||||
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
|
||||
|
|
551
Telegram/ThirdParty/scudo/primary64.h
vendored
551
Telegram/ThirdParty/scudo/primary64.h
vendored
|
@ -9,6 +9,7 @@
|
|||
#ifndef SCUDO_PRIMARY64_H_
|
||||
#define SCUDO_PRIMARY64_H_
|
||||
|
||||
#include "allocator_common.h"
|
||||
#include "bytemap.h"
|
||||
#include "common.h"
|
||||
#include "list.h"
|
||||
|
@ -21,6 +22,8 @@
|
|||
#include "string_utils.h"
|
||||
#include "thread_annotations.h"
|
||||
|
||||
#include "condition_variable.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
|
||||
|
@ -47,27 +50,39 @@ template <typename Config> class SizeClassAllocator64 {
|
|||
public:
|
||||
typedef typename Config::Primary::CompactPtrT CompactPtrT;
|
||||
typedef typename Config::Primary::SizeClassMap SizeClassMap;
|
||||
typedef typename ConditionVariableState<
|
||||
typename Config::Primary>::ConditionVariableT ConditionVariableT;
|
||||
static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
|
||||
static const uptr RegionSizeLog = Config::Primary::RegionSizeLog;
|
||||
static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
|
||||
static_assert(RegionSizeLog >= GroupSizeLog,
|
||||
"Group size shouldn't be greater than the region size");
|
||||
static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
|
||||
typedef SizeClassAllocator64<Config> ThisT;
|
||||
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
|
||||
typedef typename CacheT::TransferBatch TransferBatch;
|
||||
typedef typename CacheT::BatchGroup BatchGroup;
|
||||
typedef TransferBatch<ThisT> TransferBatchT;
|
||||
typedef BatchGroup<ThisT> BatchGroupT;
|
||||
|
||||
static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
|
||||
"BatchGroupT uses the same class size as TransferBatchT");
|
||||
|
||||
static uptr getSizeByClassId(uptr ClassId) {
|
||||
return (ClassId == SizeClassMap::BatchClassId)
|
||||
? roundUp(sizeof(TransferBatch), 1U << CompactPtrScale)
|
||||
? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
|
||||
: SizeClassMap::getSizeByClassId(ClassId);
|
||||
}
|
||||
|
||||
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
|
||||
|
||||
static bool conditionVariableEnabled() {
|
||||
return ConditionVariableState<typename Config::Primary>::enabled();
|
||||
}
|
||||
|
||||
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
|
||||
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
|
||||
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
const uptr GroupSize = (1U << GroupSizeLog);
|
||||
const uptr GroupSize = (1UL << GroupSizeLog);
|
||||
const uptr PagesInGroup = GroupSize / PageSize;
|
||||
const uptr MinSizeClass = getSizeByClassId(1);
|
||||
// When trying to release pages back to memory, visiting smaller size
|
||||
|
@ -117,13 +132,13 @@ public:
|
|||
|
||||
for (uptr I = 0; I < NumClasses; I++) {
|
||||
RegionInfo *Region = getRegionInfo(I);
|
||||
|
||||
// The actual start of a region is offset by a random number of pages
|
||||
// when PrimaryEnableRandomOffset is set.
|
||||
Region->RegionBeg =
|
||||
(PrimaryBase + (I << Config::Primary::RegionSizeLog)) +
|
||||
(Config::Primary::EnableRandomOffset
|
||||
? ((getRandomModN(&Seed, 16) + 1) * PageSize)
|
||||
: 0);
|
||||
Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
|
||||
(Config::Primary::EnableRandomOffset
|
||||
? ((getRandomModN(&Seed, 16) + 1) * PageSize)
|
||||
: 0);
|
||||
Region->RandState = getRandomU32(&Seed);
|
||||
// Releasing small blocks is expensive, set a higher threshold to avoid
|
||||
// frequent page releases.
|
||||
|
@ -134,11 +149,16 @@ public:
|
|||
Region->ReleaseInfo.LastReleaseAtNs = Time;
|
||||
|
||||
Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
|
||||
PrimaryBase + (I << Config::Primary::RegionSizeLog), RegionSize);
|
||||
PrimaryBase + (I << RegionSizeLog), RegionSize);
|
||||
CHECK(Region->MemMapInfo.MemMap.isAllocated());
|
||||
}
|
||||
shuffle(RegionInfoArray, NumClasses, &Seed);
|
||||
|
||||
// The binding should be done after region shuffling so that it won't bind
|
||||
// the FLLock from the wrong region.
|
||||
for (uptr I = 0; I < NumClasses; I++)
|
||||
getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
|
||||
|
||||
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
|
||||
}
|
||||
|
||||
|
@ -165,7 +185,7 @@ public:
|
|||
ScopedLock FL(Region->FLLock);
|
||||
const uptr BlockSize = getSizeByClassId(I);
|
||||
uptr TotalBlocks = 0;
|
||||
for (BatchGroup &BG : Region->FreeListInfo.BlockList) {
|
||||
for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
|
||||
// `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
|
||||
BatchClassUsedInFreeLists += BG.Batches.size() + 1;
|
||||
for (const auto &It : BG.Batches)
|
||||
|
@ -182,7 +202,7 @@ public:
|
|||
ScopedLock FL(Region->FLLock);
|
||||
const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
|
||||
uptr TotalBlocks = 0;
|
||||
for (BatchGroup &BG : Region->FreeListInfo.BlockList) {
|
||||
for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
|
||||
if (LIKELY(!BG.Batches.empty())) {
|
||||
for (const auto &It : BG.Batches)
|
||||
TotalBlocks += It.getCount();
|
||||
|
@ -201,51 +221,64 @@ public:
|
|||
DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
|
||||
}
|
||||
|
||||
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
|
||||
// Note that the `MaxBlockCount` will be used when we support arbitrary blocks
|
||||
// count. Now it's the same as the number of blocks stored in the
|
||||
// `TransferBatch`.
|
||||
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
|
||||
UNUSED const u16 MaxBlockCount) {
|
||||
TransferBatchT *B = popBatch(C, ClassId);
|
||||
if (!B)
|
||||
return 0;
|
||||
|
||||
const u16 Count = B->getCount();
|
||||
DCHECK_GT(Count, 0U);
|
||||
B->moveToArray(ToArray);
|
||||
|
||||
if (ClassId != SizeClassMap::BatchClassId)
|
||||
C->deallocate(SizeClassMap::BatchClassId, B);
|
||||
|
||||
return Count;
|
||||
}
|
||||
|
||||
TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
|
||||
DCHECK_LT(ClassId, NumClasses);
|
||||
RegionInfo *Region = getRegionInfo(ClassId);
|
||||
|
||||
{
|
||||
ScopedLock L(Region->FLLock);
|
||||
TransferBatch *B = popBatchImpl(C, ClassId, Region);
|
||||
TransferBatchT *B = popBatchImpl(C, ClassId, Region);
|
||||
if (LIKELY(B))
|
||||
return B;
|
||||
}
|
||||
|
||||
bool PrintStats = false;
|
||||
TransferBatch *B = nullptr;
|
||||
bool ReportRegionExhausted = false;
|
||||
TransferBatchT *B = nullptr;
|
||||
|
||||
while (true) {
|
||||
// When two threads compete for `Region->MMLock`, we only want one of them
|
||||
// does the populateFreeListAndPopBatch(). To avoid both of them doing
|
||||
// that, always check the freelist before mapping new pages.
|
||||
//
|
||||
// TODO(chiahungduan): Use a condition variable so that we don't need to
|
||||
// hold `Region->MMLock` here.
|
||||
ScopedLock ML(Region->MMLock);
|
||||
{
|
||||
ScopedLock FL(Region->FLLock);
|
||||
B = popBatchImpl(C, ClassId, Region);
|
||||
if (LIKELY(B))
|
||||
return B;
|
||||
if (conditionVariableEnabled()) {
|
||||
B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
|
||||
} else {
|
||||
while (true) {
|
||||
// When two threads compete for `Region->MMLock`, we only want one of
|
||||
// them to call populateFreeListAndPopBatch(). To avoid both of them
|
||||
// doing that, always check the freelist before mapping new pages.
|
||||
ScopedLock ML(Region->MMLock);
|
||||
{
|
||||
ScopedLock FL(Region->FLLock);
|
||||
if ((B = popBatchImpl(C, ClassId, Region)))
|
||||
break;
|
||||
}
|
||||
|
||||
const bool RegionIsExhausted = Region->Exhausted;
|
||||
if (!RegionIsExhausted)
|
||||
B = populateFreeListAndPopBatch(C, ClassId, Region);
|
||||
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
|
||||
break;
|
||||
}
|
||||
|
||||
const bool RegionIsExhausted = Region->Exhausted;
|
||||
if (!RegionIsExhausted)
|
||||
B = populateFreeListAndPopBatch(C, ClassId, Region);
|
||||
PrintStats = !RegionIsExhausted && Region->Exhausted;
|
||||
break;
|
||||
}
|
||||
|
||||
// Note that `getStats()` requires locking each region so we can't call it
|
||||
// while locking the Region->Mutex in the above.
|
||||
if (UNLIKELY(PrintStats)) {
|
||||
ScopedString Str;
|
||||
getStats(&Str);
|
||||
Str.append(
|
||||
"Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
|
||||
RegionSize >> 20, getSizeByClassId(ClassId));
|
||||
Str.output();
|
||||
if (UNLIKELY(ReportRegionExhausted)) {
|
||||
Printf("Can't populate more pages for size class %zu.\n",
|
||||
getSizeByClassId(ClassId));
|
||||
|
||||
// Theoretically, BatchClass shouldn't be used up. Abort immediately when
|
||||
// it happens.
|
||||
|
@ -265,30 +298,36 @@ public:
|
|||
if (ClassId == SizeClassMap::BatchClassId) {
|
||||
ScopedLock L(Region->FLLock);
|
||||
pushBatchClassBlocks(Region, Array, Size);
|
||||
if (conditionVariableEnabled())
|
||||
Region->FLLockCV.notifyAll(Region->FLLock);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(chiahungduan): Consider not doing grouping if the group size is not
|
||||
// greater than the block size with a certain scale.
|
||||
|
||||
// Sort the blocks so that blocks belonging to the same group can be pushed
|
||||
// together.
|
||||
bool SameGroup = true;
|
||||
for (u32 I = 1; I < Size; ++I) {
|
||||
if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
|
||||
SameGroup = false;
|
||||
CompactPtrT Cur = Array[I];
|
||||
u32 J = I;
|
||||
while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
|
||||
Array[J] = Array[J - 1];
|
||||
--J;
|
||||
if (GroupSizeLog < RegionSizeLog) {
|
||||
// Sort the blocks so that blocks belonging to the same group can be
|
||||
// pushed together.
|
||||
for (u32 I = 1; I < Size; ++I) {
|
||||
if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
|
||||
SameGroup = false;
|
||||
CompactPtrT Cur = Array[I];
|
||||
u32 J = I;
|
||||
while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
|
||||
Array[J] = Array[J - 1];
|
||||
--J;
|
||||
}
|
||||
Array[J] = Cur;
|
||||
}
|
||||
Array[J] = Cur;
|
||||
}
|
||||
|
||||
{
|
||||
ScopedLock L(Region->FLLock);
|
||||
pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
|
||||
if (conditionVariableEnabled())
|
||||
Region->FLLockCV.notifyAll(Region->FLLock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -363,6 +402,18 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void getFragmentationInfo(ScopedString *Str) {
|
||||
Str->append(
|
||||
"Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n",
|
||||
getPageSizeCached());
|
||||
|
||||
for (uptr I = 1; I < NumClasses; I++) {
|
||||
RegionInfo *Region = getRegionInfo(I);
|
||||
ScopedLock L(Region->MMLock);
|
||||
getRegionFragmentationInfo(Region, I, Str);
|
||||
}
|
||||
}
|
||||
|
||||
bool setOption(Option O, sptr Value) {
|
||||
if (O == Option::ReleaseInterval) {
|
||||
const s32 Interval = Max(Min(static_cast<s32>(Value),
|
||||
|
@ -477,7 +528,7 @@ public:
|
|||
AtomicOptions Options;
|
||||
|
||||
private:
|
||||
static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
|
||||
static const uptr RegionSize = 1UL << RegionSizeLog;
|
||||
static const uptr NumClasses = SizeClassMap::NumClasses;
|
||||
static const uptr PrimarySize = RegionSize * NumClasses;
|
||||
|
||||
|
@ -493,7 +544,7 @@ private:
|
|||
};
|
||||
|
||||
struct BlocksInfo {
|
||||
SinglyLinkedList<BatchGroup> BlockList = {};
|
||||
SinglyLinkedList<BatchGroupT> BlockList = {};
|
||||
uptr PoppedBlocks = 0;
|
||||
uptr PushedBlocks = 0;
|
||||
};
|
||||
|
@ -509,6 +560,7 @@ private:
|
|||
struct UnpaddedRegionInfo {
|
||||
// Mutex for operations on freelist
|
||||
HybridMutex FLLock;
|
||||
ConditionVariableT FLLockCV GUARDED_BY(FLLock);
|
||||
// Mutex for memmap operations
|
||||
HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
|
||||
// `RegionBeg` is initialized before thread creation and won't be changed.
|
||||
|
@ -520,6 +572,7 @@ private:
|
|||
uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
|
||||
ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
|
||||
bool Exhausted GUARDED_BY(MMLock) = false;
|
||||
bool isPopulatingFreeList GUARDED_BY(FLLock) = false;
|
||||
};
|
||||
struct RegionInfo : UnpaddedRegionInfo {
|
||||
char Padding[SCUDO_CACHE_LINE_SIZE -
|
||||
|
@ -605,11 +658,11 @@ private:
|
|||
// reusable and don't need additional space for them.
|
||||
|
||||
Region->FreeListInfo.PushedBlocks += Size;
|
||||
BatchGroup *BG = Region->FreeListInfo.BlockList.front();
|
||||
BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
|
||||
|
||||
if (BG == nullptr) {
|
||||
// Construct `BatchGroup` on the last element.
|
||||
BG = reinterpret_cast<BatchGroup *>(
|
||||
BG = reinterpret_cast<BatchGroupT *>(
|
||||
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
|
||||
--Size;
|
||||
BG->Batches.clear();
|
||||
|
@ -620,8 +673,8 @@ private:
|
|||
// from `CreateGroup` in `pushBlocksImpl`
|
||||
BG->PushedBlocks = 1;
|
||||
BG->BytesInBGAtLastCheckpoint = 0;
|
||||
BG->MaxCachedPerBatch = TransferBatch::getMaxCached(
|
||||
getSizeByClassId(SizeClassMap::BatchClassId));
|
||||
BG->MaxCachedPerBatch =
|
||||
CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
|
||||
|
||||
Region->FreeListInfo.BlockList.push_front(BG);
|
||||
}
|
||||
|
@ -634,7 +687,7 @@ private:
|
|||
// 2. Only 1 block is pushed when the freelist is empty.
|
||||
if (BG->Batches.empty()) {
|
||||
// Construct the `TransferBatch` on the last element.
|
||||
TransferBatch *TB = reinterpret_cast<TransferBatch *>(
|
||||
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
|
||||
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
|
||||
TB->clear();
|
||||
// As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
|
||||
|
@ -649,14 +702,14 @@ private:
|
|||
BG->Batches.push_front(TB);
|
||||
}
|
||||
|
||||
TransferBatch *CurBatch = BG->Batches.front();
|
||||
TransferBatchT *CurBatch = BG->Batches.front();
|
||||
DCHECK_NE(CurBatch, nullptr);
|
||||
|
||||
for (u32 I = 0; I < Size;) {
|
||||
u16 UnusedSlots =
|
||||
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
|
||||
if (UnusedSlots == 0) {
|
||||
CurBatch = reinterpret_cast<TransferBatch *>(
|
||||
CurBatch = reinterpret_cast<TransferBatchT *>(
|
||||
decompactPtr(SizeClassMap::BatchClassId, Array[I]));
|
||||
CurBatch->clear();
|
||||
// Self-contained
|
||||
|
@ -699,24 +752,25 @@ private:
|
|||
DCHECK_GT(Size, 0U);
|
||||
|
||||
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
|
||||
BatchGroup *BG = C->createGroup();
|
||||
BatchGroupT *BG =
|
||||
reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
|
||||
BG->Batches.clear();
|
||||
TransferBatch *TB = C->createBatch(ClassId, nullptr);
|
||||
TransferBatchT *TB =
|
||||
reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
|
||||
TB->clear();
|
||||
|
||||
BG->CompactPtrGroupBase = CompactPtrGroupBase;
|
||||
BG->Batches.push_front(TB);
|
||||
BG->PushedBlocks = 0;
|
||||
BG->BytesInBGAtLastCheckpoint = 0;
|
||||
BG->MaxCachedPerBatch =
|
||||
TransferBatch::getMaxCached(getSizeByClassId(ClassId));
|
||||
BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
|
||||
|
||||
return BG;
|
||||
};
|
||||
|
||||
auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size) {
|
||||
SinglyLinkedList<TransferBatch> &Batches = BG->Batches;
|
||||
TransferBatch *CurBatch = Batches.front();
|
||||
auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
|
||||
SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
|
||||
TransferBatchT *CurBatch = Batches.front();
|
||||
DCHECK_NE(CurBatch, nullptr);
|
||||
|
||||
for (u32 I = 0; I < Size;) {
|
||||
|
@ -724,9 +778,8 @@ private:
|
|||
u16 UnusedSlots =
|
||||
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
|
||||
if (UnusedSlots == 0) {
|
||||
CurBatch = C->createBatch(
|
||||
ClassId,
|
||||
reinterpret_cast<void *>(decompactPtr(ClassId, Array[I])));
|
||||
CurBatch =
|
||||
reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
|
||||
CurBatch->clear();
|
||||
Batches.push_front(CurBatch);
|
||||
UnusedSlots = BG->MaxCachedPerBatch;
|
||||
|
@ -741,21 +794,11 @@ private:
|
|||
};
|
||||
|
||||
Region->FreeListInfo.PushedBlocks += Size;
|
||||
BatchGroup *Cur = Region->FreeListInfo.BlockList.front();
|
||||
|
||||
if (ClassId == SizeClassMap::BatchClassId) {
|
||||
if (Cur == nullptr) {
|
||||
// Don't need to classify BatchClassId.
|
||||
Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
|
||||
Region->FreeListInfo.BlockList.push_front(Cur);
|
||||
}
|
||||
InsertBlocks(Cur, Array, Size);
|
||||
return;
|
||||
}
|
||||
BatchGroupT *Cur = Region->FreeListInfo.BlockList.front();
|
||||
|
||||
// In the following, `Cur` always points to the BatchGroup for blocks that
|
||||
// will be pushed next. `Prev` is the element right before `Cur`.
|
||||
BatchGroup *Prev = nullptr;
|
||||
BatchGroupT *Prev = nullptr;
|
||||
|
||||
while (Cur != nullptr &&
|
||||
compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
|
||||
|
@ -812,26 +855,96 @@ private:
|
|||
InsertBlocks(Cur, Array + Size - Count, Count);
|
||||
}
|
||||
|
||||
TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
|
||||
bool &ReportRegionExhausted) {
|
||||
TransferBatchT *B = nullptr;
|
||||
|
||||
while (true) {
|
||||
// We only expect one thread doing the freelist refillment and other
|
||||
// threads will be waiting for either the completion of the
|
||||
// `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
|
||||
// threads.
|
||||
bool PopulateFreeList = false;
|
||||
{
|
||||
ScopedLock FL(Region->FLLock);
|
||||
if (!Region->isPopulatingFreeList) {
|
||||
Region->isPopulatingFreeList = true;
|
||||
PopulateFreeList = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (PopulateFreeList) {
|
||||
ScopedLock ML(Region->MMLock);
|
||||
|
||||
const bool RegionIsExhausted = Region->Exhausted;
|
||||
if (!RegionIsExhausted)
|
||||
B = populateFreeListAndPopBatch(C, ClassId, Region);
|
||||
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
|
||||
|
||||
{
|
||||
// Before reacquiring the `FLLock`, the freelist may be used up again
|
||||
// and some threads are waiting for the freelist refillment by the
|
||||
// current thread. It's important to set
|
||||
// `Region->isPopulatingFreeList` to false so the threads about to
|
||||
// sleep will notice the status change.
|
||||
ScopedLock FL(Region->FLLock);
|
||||
Region->isPopulatingFreeList = false;
|
||||
Region->FLLockCV.notifyAll(Region->FLLock);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// At here, there are two preconditions to be met before waiting,
|
||||
// 1. The freelist is empty.
|
||||
// 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
|
||||
// `populateFreeListAndPopBatch()`.
|
||||
//
|
||||
// Note that it has the chance that freelist is empty but
|
||||
// Region->isPopulatingFreeList == false because all the new populated
|
||||
// blocks were used up right after the refillment. Therefore, we have to
|
||||
// check if someone is still populating the freelist.
|
||||
ScopedLock FL(Region->FLLock);
|
||||
if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
|
||||
break;
|
||||
|
||||
if (!Region->isPopulatingFreeList)
|
||||
continue;
|
||||
|
||||
// Now the freelist is empty and someone's doing the refillment. We will
|
||||
// wait until anyone refills the freelist or someone finishes doing
|
||||
// `populateFreeListAndPopBatch()`. The refillment can be done by
|
||||
// `populateFreeListAndPopBatch()`, `pushBlocks()`,
|
||||
// `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
|
||||
Region->FLLockCV.wait(Region->FLLock);
|
||||
|
||||
if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
|
||||
break;
|
||||
}
|
||||
|
||||
return B;
|
||||
}
|
||||
|
||||
// Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
|
||||
// group id will be considered first.
|
||||
//
|
||||
// The region mutex needs to be held while calling this method.
|
||||
TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
|
||||
TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
|
||||
REQUIRES(Region->FLLock) {
|
||||
if (Region->FreeListInfo.BlockList.empty())
|
||||
return nullptr;
|
||||
|
||||
SinglyLinkedList<TransferBatch> &Batches =
|
||||
SinglyLinkedList<TransferBatchT> &Batches =
|
||||
Region->FreeListInfo.BlockList.front()->Batches;
|
||||
|
||||
if (Batches.empty()) {
|
||||
DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
|
||||
BatchGroup *BG = Region->FreeListInfo.BlockList.front();
|
||||
BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
|
||||
Region->FreeListInfo.BlockList.pop_front();
|
||||
|
||||
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
|
||||
// `TransferBatch` with single block.
|
||||
TransferBatch *TB = reinterpret_cast<TransferBatch *>(BG);
|
||||
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
|
||||
TB->clear();
|
||||
TB->add(
|
||||
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
|
||||
|
@ -839,13 +952,13 @@ private:
|
|||
return TB;
|
||||
}
|
||||
|
||||
TransferBatch *B = Batches.front();
|
||||
TransferBatchT *B = Batches.front();
|
||||
Batches.pop_front();
|
||||
DCHECK_NE(B, nullptr);
|
||||
DCHECK_GT(B->getCount(), 0U);
|
||||
|
||||
if (Batches.empty()) {
|
||||
BatchGroup *BG = Region->FreeListInfo.BlockList.front();
|
||||
BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
|
||||
Region->FreeListInfo.BlockList.pop_front();
|
||||
|
||||
// We don't keep BatchGroup with zero blocks to avoid empty-checking while
|
||||
|
@ -863,11 +976,11 @@ private:
|
|||
}
|
||||
|
||||
// Refill the freelist and return one batch.
|
||||
NOINLINE TransferBatch *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
|
||||
RegionInfo *Region)
|
||||
NOINLINE TransferBatchT *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
|
||||
RegionInfo *Region)
|
||||
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
|
||||
const uptr Size = getSizeByClassId(ClassId);
|
||||
const u16 MaxCount = TransferBatch::getMaxCached(Size);
|
||||
const u16 MaxCount = CacheT::getMaxCached(Size);
|
||||
|
||||
const uptr RegionBeg = Region->RegionBeg;
|
||||
const uptr MappedUser = Region->MemMapInfo.MappedUser;
|
||||
|
@ -903,7 +1016,7 @@ private:
|
|||
DCHECK_GT(NumberOfBlocks, 0);
|
||||
|
||||
constexpr u32 ShuffleArraySize =
|
||||
MaxNumBatches * TransferBatch::MaxNumCached;
|
||||
MaxNumBatches * TransferBatchT::MaxNumCached;
|
||||
CompactPtrT ShuffleArray[ShuffleArraySize];
|
||||
DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
|
||||
|
||||
|
@ -936,7 +1049,7 @@ private:
|
|||
pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
|
||||
}
|
||||
|
||||
TransferBatch *B = popBatchImpl(C, ClassId, Region);
|
||||
TransferBatchT *B = popBatchImpl(C, ClassId, Region);
|
||||
DCHECK_NE(B, nullptr);
|
||||
|
||||
// Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
|
||||
|
@ -957,10 +1070,10 @@ private:
|
|||
if (Region->MemMapInfo.MappedUser == 0)
|
||||
return;
|
||||
const uptr BlockSize = getSizeByClassId(ClassId);
|
||||
const uptr InUse =
|
||||
const uptr InUseBlocks =
|
||||
Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
|
||||
const uptr BytesInFreeList =
|
||||
Region->MemMapInfo.AllocatedUser - InUse * BlockSize;
|
||||
Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize;
|
||||
uptr RegionPushedBytesDelta = 0;
|
||||
if (BytesInFreeList >=
|
||||
Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
|
||||
|
@ -972,122 +1085,145 @@ private:
|
|||
"%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
|
||||
"inuse: %6zu total: %6zu releases: %6zu last "
|
||||
"released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n",
|
||||
Region->Exhausted ? "F" : " ", ClassId, getSizeByClassId(ClassId),
|
||||
Region->Exhausted ? "E" : " ", ClassId, getSizeByClassId(ClassId),
|
||||
Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks,
|
||||
Region->FreeListInfo.PushedBlocks, InUse, TotalChunks,
|
||||
Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks,
|
||||
Region->ReleaseInfo.RangesReleased,
|
||||
Region->ReleaseInfo.LastReleasedBytes >> 10,
|
||||
RegionPushedBytesDelta >> 10, Region->RegionBeg,
|
||||
getRegionBaseByClassId(ClassId));
|
||||
}
|
||||
|
||||
void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId,
|
||||
ScopedString *Str) REQUIRES(Region->MMLock) {
|
||||
const uptr BlockSize = getSizeByClassId(ClassId);
|
||||
const uptr AllocatedUserEnd =
|
||||
Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
|
||||
|
||||
SinglyLinkedList<BatchGroupT> GroupsToRelease;
|
||||
{
|
||||
ScopedLock L(Region->FLLock);
|
||||
GroupsToRelease = Region->FreeListInfo.BlockList;
|
||||
Region->FreeListInfo.BlockList.clear();
|
||||
}
|
||||
|
||||
FragmentationRecorder Recorder;
|
||||
if (!GroupsToRelease.empty()) {
|
||||
PageReleaseContext Context =
|
||||
markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
|
||||
getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
|
||||
auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
|
||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
||||
|
||||
mergeGroupsToReleaseBack(Region, GroupsToRelease);
|
||||
}
|
||||
|
||||
ScopedLock L(Region->FLLock);
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize;
|
||||
const uptr InUseBlocks =
|
||||
Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
|
||||
const uptr AllocatedPagesCount =
|
||||
roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize;
|
||||
DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
|
||||
const uptr InUsePages =
|
||||
AllocatedPagesCount - Recorder.getReleasedPagesCount();
|
||||
const uptr InUseBytes = InUsePages * PageSize;
|
||||
|
||||
uptr Integral;
|
||||
uptr Fractional;
|
||||
computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
|
||||
&Fractional);
|
||||
Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
|
||||
"pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
|
||||
ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
|
||||
AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
|
||||
}
|
||||
|
||||
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
|
||||
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
|
||||
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
|
||||
ScopedLock L(Region->FLLock);
|
||||
|
||||
const uptr BlockSize = getSizeByClassId(ClassId);
|
||||
const uptr BytesInFreeList =
|
||||
Region->MemMapInfo.AllocatedUser - (Region->FreeListInfo.PoppedBlocks -
|
||||
Region->FreeListInfo.PushedBlocks) *
|
||||
BlockSize;
|
||||
if (UNLIKELY(BytesInFreeList == 0))
|
||||
return false;
|
||||
|
||||
uptr BytesInFreeList;
|
||||
const uptr AllocatedUserEnd =
|
||||
Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
|
||||
const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
|
||||
SinglyLinkedList<BatchGroupT> GroupsToRelease;
|
||||
|
||||
// ====================================================================== //
|
||||
// 1. Check if we have enough free blocks and if it's worth doing a page
|
||||
// release.
|
||||
// ====================================================================== //
|
||||
if (ReleaseType != ReleaseToOS::ForceAll &&
|
||||
!hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
|
||||
ReleaseType)) {
|
||||
return 0;
|
||||
}
|
||||
{
|
||||
ScopedLock L(Region->FLLock);
|
||||
|
||||
// ====================================================================== //
|
||||
// 2. Determine which groups can release the pages. Use a heuristic to
|
||||
// gather groups that are candidates for doing a release.
|
||||
// ====================================================================== //
|
||||
SinglyLinkedList<BatchGroup> GroupsToRelease;
|
||||
if (ReleaseType == ReleaseToOS::ForceAll) {
|
||||
GroupsToRelease = Region->FreeListInfo.BlockList;
|
||||
Region->FreeListInfo.BlockList.clear();
|
||||
} else {
|
||||
GroupsToRelease = collectGroupsToRelease(
|
||||
Region, BlockSize, AllocatedUserEnd, CompactPtrBase);
|
||||
}
|
||||
if (GroupsToRelease.empty())
|
||||
return 0;
|
||||
BytesInFreeList = Region->MemMapInfo.AllocatedUser -
|
||||
(Region->FreeListInfo.PoppedBlocks -
|
||||
Region->FreeListInfo.PushedBlocks) *
|
||||
BlockSize;
|
||||
if (UNLIKELY(BytesInFreeList == 0))
|
||||
return false;
|
||||
|
||||
// Ideally, we should use a class like `ScopedUnlock`. However, this form of
|
||||
// unlocking is not supported by the thread-safety analysis. See
|
||||
// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-alias-analysis
|
||||
// for more details.
|
||||
// Put it as local class so that we can mark the ctor/dtor with proper
|
||||
// annotations associated to the target lock. Note that accessing the
|
||||
// function variable in local class only works in thread-safety annotations.
|
||||
// TODO: Implement general `ScopedUnlock` when it's supported.
|
||||
class FLLockScopedUnlock {
|
||||
public:
|
||||
FLLockScopedUnlock(RegionInfo *Region) RELEASE(Region->FLLock)
|
||||
: R(Region) {
|
||||
R->FLLock.assertHeld();
|
||||
R->FLLock.unlock();
|
||||
// ==================================================================== //
|
||||
// 1. Check if we have enough free blocks and if it's worth doing a page
|
||||
// release.
|
||||
// ==================================================================== //
|
||||
if (ReleaseType != ReleaseToOS::ForceAll &&
|
||||
!hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
|
||||
ReleaseType)) {
|
||||
return 0;
|
||||
}
|
||||
~FLLockScopedUnlock() ACQUIRE(Region->FLLock) { R->FLLock.lock(); }
|
||||
|
||||
private:
|
||||
RegionInfo *R;
|
||||
};
|
||||
// ==================================================================== //
|
||||
// 2. Determine which groups can release the pages. Use a heuristic to
|
||||
// gather groups that are candidates for doing a release.
|
||||
// ==================================================================== //
|
||||
if (ReleaseType == ReleaseToOS::ForceAll) {
|
||||
GroupsToRelease = Region->FreeListInfo.BlockList;
|
||||
Region->FreeListInfo.BlockList.clear();
|
||||
} else {
|
||||
GroupsToRelease =
|
||||
collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd,
|
||||
getCompactPtrBaseByClassId(ClassId));
|
||||
}
|
||||
if (GroupsToRelease.empty())
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Note that we have extracted the `GroupsToRelease` from region freelist.
|
||||
// It's safe to let pushBlocks()/popBatches() access the remaining region
|
||||
// freelist. In the steps 3 and 4, we will temporarily release the FLLock
|
||||
// and lock it again before step 5.
|
||||
|
||||
uptr ReleasedBytes = 0;
|
||||
{
|
||||
FLLockScopedUnlock UL(Region);
|
||||
// ==================================================================== //
|
||||
// 3. Mark the free blocks in `GroupsToRelease` in the
|
||||
// `PageReleaseContext`. Then we can tell which pages are in-use by
|
||||
// querying `PageReleaseContext`.
|
||||
// ==================================================================== //
|
||||
PageReleaseContext Context = markFreeBlocks(
|
||||
Region, BlockSize, AllocatedUserEnd, CompactPtrBase, GroupsToRelease);
|
||||
if (UNLIKELY(!Context.hasBlockMarked())) {
|
||||
ScopedLock L(Region->FLLock);
|
||||
mergeGroupsToReleaseBack(Region, GroupsToRelease);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ==================================================================== //
|
||||
// 4. Release the unused physical pages back to the OS.
|
||||
// ==================================================================== //
|
||||
RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
|
||||
Region->RegionBeg,
|
||||
Context.getReleaseOffset());
|
||||
auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
|
||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
||||
if (Recorder.getReleasedRangesCount() > 0) {
|
||||
Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
|
||||
Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
|
||||
Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
|
||||
}
|
||||
Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
|
||||
ReleasedBytes = Recorder.getReleasedBytes();
|
||||
// ==================================================================== //
|
||||
// 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`.
|
||||
// Then we can tell which pages are in-use by querying
|
||||
// `PageReleaseContext`.
|
||||
// ==================================================================== //
|
||||
PageReleaseContext Context =
|
||||
markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
|
||||
getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
|
||||
if (UNLIKELY(!Context.hasBlockMarked())) {
|
||||
mergeGroupsToReleaseBack(Region, GroupsToRelease);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ==================================================================== //
|
||||
// 4. Release the unused physical pages back to the OS.
|
||||
// ==================================================================== //
|
||||
RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
|
||||
Region->RegionBeg,
|
||||
Context.getReleaseOffset());
|
||||
auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
|
||||
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
|
||||
if (Recorder.getReleasedRangesCount() > 0) {
|
||||
Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
|
||||
Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
|
||||
Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
|
||||
}
|
||||
Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
|
||||
|
||||
// ====================================================================== //
|
||||
// 5. Merge the `GroupsToRelease` back to the freelist.
|
||||
// ====================================================================== //
|
||||
mergeGroupsToReleaseBack(Region, GroupsToRelease);
|
||||
|
||||
return ReleasedBytes;
|
||||
return Recorder.getReleasedBytes();
|
||||
}
|
||||
|
||||
bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
|
||||
|
@ -1154,13 +1290,13 @@ private:
|
|||
return true;
|
||||
}
|
||||
|
||||
SinglyLinkedList<BatchGroup>
|
||||
SinglyLinkedList<BatchGroupT>
|
||||
collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize,
|
||||
const uptr AllocatedUserEnd, const uptr CompactPtrBase)
|
||||
REQUIRES(Region->MMLock, Region->FLLock) {
|
||||
const uptr GroupSize = (1U << GroupSizeLog);
|
||||
const uptr GroupSize = (1UL << GroupSizeLog);
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
SinglyLinkedList<BatchGroup> GroupsToRelease;
|
||||
SinglyLinkedList<BatchGroupT> GroupsToRelease;
|
||||
|
||||
// We are examining each group and will take the minimum distance to the
|
||||
// release threshold as the next Region::TryReleaseThreshold(). Note that if
|
||||
|
@ -1169,8 +1305,8 @@ private:
|
|||
// the comment on `SmallerBlockReleasePageDelta` for more details.
|
||||
uptr MinDistToThreshold = GroupSize;
|
||||
|
||||
for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
|
||||
*Prev = nullptr;
|
||||
for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
|
||||
*Prev = nullptr;
|
||||
BG != nullptr;) {
|
||||
// Group boundary is always GroupSize-aligned from CompactPtr base. The
|
||||
// layout of memory groups is like,
|
||||
|
@ -1254,7 +1390,7 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
// If `BG` is the first BatchGroup in the list, we only need to advance
|
||||
// If `BG` is the first BatchGroupT in the list, we only need to advance
|
||||
// `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
|
||||
// for `Prev`.
|
||||
//
|
||||
|
@ -1290,7 +1426,7 @@ private:
|
|||
// Note that we need to advance before pushing this BatchGroup to
|
||||
// GroupsToRelease because it's a destructive operation.
|
||||
|
||||
BatchGroup *Cur = BG;
|
||||
BatchGroupT *Cur = BG;
|
||||
BG = BG->Next;
|
||||
|
||||
// Ideally, we may want to update this only after successful release.
|
||||
|
@ -1323,9 +1459,9 @@ private:
|
|||
PageReleaseContext
|
||||
markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
|
||||
const uptr AllocatedUserEnd, const uptr CompactPtrBase,
|
||||
SinglyLinkedList<BatchGroup> &GroupsToRelease)
|
||||
SinglyLinkedList<BatchGroupT> &GroupsToRelease)
|
||||
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
|
||||
const uptr GroupSize = (1U << GroupSizeLog);
|
||||
const uptr GroupSize = (1UL << GroupSizeLog);
|
||||
auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
|
||||
return decompactPtrInternal(CompactPtrBase, CompactPtr);
|
||||
};
|
||||
|
@ -1352,7 +1488,7 @@ private:
|
|||
if (UNLIKELY(!Context.ensurePageMapAllocated()))
|
||||
return Context;
|
||||
|
||||
for (BatchGroup &BG : GroupsToRelease) {
|
||||
for (BatchGroupT &BG : GroupsToRelease) {
|
||||
const uptr BatchGroupBase =
|
||||
decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
|
||||
const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
|
||||
|
@ -1400,8 +1536,10 @@ private:
|
|||
}
|
||||
|
||||
void mergeGroupsToReleaseBack(RegionInfo *Region,
|
||||
SinglyLinkedList<BatchGroup> &GroupsToRelease)
|
||||
REQUIRES(Region->MMLock, Region->FLLock) {
|
||||
SinglyLinkedList<BatchGroupT> &GroupsToRelease)
|
||||
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
|
||||
ScopedLock L(Region->FLLock);
|
||||
|
||||
// After merging two freelists, we may have redundant `BatchGroup`s that
|
||||
// need to be recycled. The number of unused `BatchGroup`s is expected to be
|
||||
// small. Pick a constant which is inferred from real programs.
|
||||
|
@ -1419,8 +1557,8 @@ private:
|
|||
// Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
|
||||
// that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
|
||||
// sorted.
|
||||
for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
|
||||
*Prev = nullptr;
|
||||
for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
|
||||
*Prev = nullptr;
|
||||
;) {
|
||||
if (BG == nullptr || GroupsToRelease.empty()) {
|
||||
if (!GroupsToRelease.empty())
|
||||
|
@ -1437,8 +1575,8 @@ private:
|
|||
continue;
|
||||
}
|
||||
|
||||
BatchGroup *Cur = GroupsToRelease.front();
|
||||
TransferBatch *UnusedTransferBatch = nullptr;
|
||||
BatchGroupT *Cur = GroupsToRelease.front();
|
||||
TransferBatchT *UnusedTransferBatch = nullptr;
|
||||
GroupsToRelease.pop_front();
|
||||
|
||||
if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
|
||||
|
@ -1454,7 +1592,7 @@ private:
|
|||
if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
|
||||
BG->Batches.append_back(&Cur->Batches);
|
||||
} else {
|
||||
TransferBatch *NonFullBatch = Cur->Batches.front();
|
||||
TransferBatchT *NonFullBatch = Cur->Batches.front();
|
||||
Cur->Batches.pop_front();
|
||||
const u16 NonFullBatchCount = NonFullBatch->getCount();
|
||||
// The remaining Batches in `Cur` are full.
|
||||
|
@ -1481,6 +1619,8 @@ private:
|
|||
if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
|
||||
ScopedLock L(BatchClassRegion->FLLock);
|
||||
pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
|
||||
if (conditionVariableEnabled())
|
||||
BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
|
||||
Idx = 0;
|
||||
}
|
||||
Blocks[Idx++] =
|
||||
|
@ -1516,15 +1656,20 @@ private:
|
|||
if (Idx != 0) {
|
||||
ScopedLock L(BatchClassRegion->FLLock);
|
||||
pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
|
||||
if (conditionVariableEnabled())
|
||||
BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
|
||||
}
|
||||
|
||||
if (SCUDO_DEBUG) {
|
||||
BatchGroup *Prev = Region->FreeListInfo.BlockList.front();
|
||||
for (BatchGroup *Cur = Prev->Next; Cur != nullptr;
|
||||
BatchGroupT *Prev = Region->FreeListInfo.BlockList.front();
|
||||
for (BatchGroupT *Cur = Prev->Next; Cur != nullptr;
|
||||
Prev = Cur, Cur = Cur->Next) {
|
||||
CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
|
||||
}
|
||||
}
|
||||
|
||||
if (conditionVariableEnabled())
|
||||
Region->FLLockCV.notifyAll(Region->FLLock);
|
||||
}
|
||||
|
||||
// TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
|
||||
|
|
3
Telegram/ThirdParty/scudo/release.cpp
vendored
3
Telegram/ThirdParty/scudo/release.cpp
vendored
|
@ -10,7 +10,8 @@
|
|||
|
||||
namespace scudo {
|
||||
|
||||
BufferPool<RegionPageMap::StaticBufferCount, RegionPageMap::StaticBufferSize>
|
||||
BufferPool<RegionPageMap::StaticBufferCount,
|
||||
RegionPageMap::StaticBufferNumElements>
|
||||
RegionPageMap::Buffers;
|
||||
|
||||
} // namespace scudo
|
||||
|
|
171
Telegram/ThirdParty/scudo/release.h
vendored
171
Telegram/ThirdParty/scudo/release.h
vendored
|
@ -80,20 +80,53 @@ private:
|
|||
MapPlatformData *Data = nullptr;
|
||||
};
|
||||
|
||||
// A buffer pool which holds a fixed number of static buffers for fast buffer
|
||||
// allocation. If the request size is greater than `StaticBufferSize`, it'll
|
||||
class FragmentationRecorder {
|
||||
public:
|
||||
FragmentationRecorder() = default;
|
||||
|
||||
uptr getReleasedPagesCount() const { return ReleasedPagesCount; }
|
||||
|
||||
void releasePageRangeToOS(uptr From, uptr To) {
|
||||
DCHECK_EQ((To - From) % getPageSizeCached(), 0U);
|
||||
ReleasedPagesCount += (To - From) / getPageSizeCached();
|
||||
}
|
||||
|
||||
private:
|
||||
uptr ReleasedPagesCount = 0;
|
||||
};
|
||||
|
||||
// A buffer pool which holds a fixed number of static buffers of `uptr` elements
|
||||
// for fast buffer allocation. If the request size is greater than
|
||||
// `StaticBufferNumElements` or if all the static buffers are in use, it'll
|
||||
// delegate the allocation to map().
|
||||
template <uptr StaticBufferCount, uptr StaticBufferSize> class BufferPool {
|
||||
template <uptr StaticBufferCount, uptr StaticBufferNumElements>
|
||||
class BufferPool {
|
||||
public:
|
||||
// Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
|
||||
// extracting the least significant bit from the `Mask`.
|
||||
static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
|
||||
static_assert(isAligned(StaticBufferSize, SCUDO_CACHE_LINE_SIZE), "");
|
||||
static_assert(isAligned(StaticBufferNumElements * sizeof(uptr),
|
||||
SCUDO_CACHE_LINE_SIZE),
|
||||
"");
|
||||
|
||||
// Return a buffer which is at least `BufferSize`.
|
||||
uptr *getBuffer(const uptr BufferSize) {
|
||||
if (UNLIKELY(BufferSize > StaticBufferSize))
|
||||
return getDynamicBuffer(BufferSize);
|
||||
struct Buffer {
|
||||
// Pointer to the buffer's memory, or nullptr if no buffer was allocated.
|
||||
uptr *Data = nullptr;
|
||||
|
||||
// The index of the underlying static buffer, or StaticBufferCount if this
|
||||
// buffer was dynamically allocated. This value is initially set to a poison
|
||||
// value to aid debugging.
|
||||
uptr BufferIndex = ~static_cast<uptr>(0);
|
||||
|
||||
// Only valid if BufferIndex == StaticBufferCount.
|
||||
MemMapT MemMap = {};
|
||||
};
|
||||
|
||||
// Return a zero-initialized buffer which can contain at least the given
|
||||
// number of elements, or nullptr on failure.
|
||||
Buffer getBuffer(const uptr NumElements) {
|
||||
if (UNLIKELY(NumElements > StaticBufferNumElements))
|
||||
return getDynamicBuffer(NumElements);
|
||||
|
||||
uptr index;
|
||||
{
|
||||
|
@ -108,69 +141,55 @@ public:
|
|||
}
|
||||
|
||||
if (index >= StaticBufferCount)
|
||||
return getDynamicBuffer(BufferSize);
|
||||
return getDynamicBuffer(NumElements);
|
||||
|
||||
const uptr Offset = index * StaticBufferSize;
|
||||
memset(&RawBuffer[Offset], 0, StaticBufferSize);
|
||||
return &RawBuffer[Offset];
|
||||
Buffer Buf;
|
||||
Buf.Data = &RawBuffer[index * StaticBufferNumElements];
|
||||
Buf.BufferIndex = index;
|
||||
memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr));
|
||||
return Buf;
|
||||
}
|
||||
|
||||
void releaseBuffer(uptr *Buffer, const uptr BufferSize) {
|
||||
const uptr index = getStaticBufferIndex(Buffer, BufferSize);
|
||||
if (index < StaticBufferCount) {
|
||||
void releaseBuffer(Buffer Buf) {
|
||||
DCHECK_NE(Buf.Data, nullptr);
|
||||
DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
|
||||
if (Buf.BufferIndex != StaticBufferCount) {
|
||||
ScopedLock L(Mutex);
|
||||
DCHECK_EQ((Mask & (static_cast<uptr>(1) << index)), 0U);
|
||||
Mask |= static_cast<uptr>(1) << index;
|
||||
DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);
|
||||
Mask |= static_cast<uptr>(1) << Buf.BufferIndex;
|
||||
} else {
|
||||
unmap(reinterpret_cast<void *>(Buffer),
|
||||
roundUp(BufferSize, getPageSizeCached()));
|
||||
Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());
|
||||
}
|
||||
}
|
||||
|
||||
bool isStaticBufferTestOnly(uptr *Buffer, uptr BufferSize) {
|
||||
return getStaticBufferIndex(Buffer, BufferSize) < StaticBufferCount;
|
||||
bool isStaticBufferTestOnly(const Buffer &Buf) {
|
||||
DCHECK_NE(Buf.Data, nullptr);
|
||||
DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
|
||||
return Buf.BufferIndex != StaticBufferCount;
|
||||
}
|
||||
|
||||
private:
|
||||
uptr getStaticBufferIndex(uptr *Buffer, uptr BufferSize) {
|
||||
if (UNLIKELY(BufferSize > StaticBufferSize))
|
||||
return StaticBufferCount;
|
||||
|
||||
const uptr BufferBase = reinterpret_cast<uptr>(Buffer);
|
||||
const uptr RawBufferBase = reinterpret_cast<uptr>(RawBuffer);
|
||||
|
||||
if (BufferBase < RawBufferBase ||
|
||||
BufferBase >= RawBufferBase + sizeof(RawBuffer)) {
|
||||
return StaticBufferCount;
|
||||
}
|
||||
|
||||
DCHECK_LE(BufferSize, StaticBufferSize);
|
||||
DCHECK_LE(BufferBase + BufferSize, RawBufferBase + sizeof(RawBuffer));
|
||||
DCHECK_EQ((BufferBase - RawBufferBase) % StaticBufferSize, 0U);
|
||||
|
||||
const uptr index =
|
||||
(BufferBase - RawBufferBase) / (StaticBufferSize * sizeof(uptr));
|
||||
DCHECK_LT(index, StaticBufferCount);
|
||||
return index;
|
||||
}
|
||||
|
||||
uptr *getDynamicBuffer(const uptr BufferSize) {
|
||||
Buffer getDynamicBuffer(const uptr NumElements) {
|
||||
// When using a heap-based buffer, precommit the pages backing the
|
||||
// Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
|
||||
// where page fault exceptions are skipped as the allocated memory
|
||||
// is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
|
||||
// performance benefit on other platforms.
|
||||
const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
|
||||
return reinterpret_cast<uptr *>(
|
||||
map(nullptr, roundUp(BufferSize, getPageSizeCached()), "scudo:counters",
|
||||
MmapFlags, &MapData));
|
||||
const uptr MappedSize =
|
||||
roundUp(NumElements * sizeof(uptr), getPageSizeCached());
|
||||
Buffer Buf;
|
||||
if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) {
|
||||
Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase());
|
||||
Buf.BufferIndex = StaticBufferCount;
|
||||
}
|
||||
return Buf;
|
||||
}
|
||||
|
||||
HybridMutex Mutex;
|
||||
// '1' means that buffer index is not used. '0' means the buffer is in use.
|
||||
uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
|
||||
uptr RawBuffer[StaticBufferCount * StaticBufferSize] GUARDED_BY(Mutex);
|
||||
[[no_unique_address]] MapPlatformData MapData = {};
|
||||
uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex);
|
||||
};
|
||||
|
||||
// A Region page map is used to record the usage of pages in the regions. It
|
||||
|
@ -185,23 +204,17 @@ private:
|
|||
class RegionPageMap {
|
||||
public:
|
||||
RegionPageMap()
|
||||
: Regions(0),
|
||||
NumCounters(0),
|
||||
CounterSizeBitsLog(0),
|
||||
CounterMask(0),
|
||||
PackingRatioLog(0),
|
||||
BitOffsetMask(0),
|
||||
SizePerRegion(0),
|
||||
BufferSize(0),
|
||||
Buffer(nullptr) {}
|
||||
: Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
|
||||
PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
|
||||
BufferNumElements(0) {}
|
||||
RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
|
||||
reset(NumberOfRegions, CountersPerRegion, MaxValue);
|
||||
}
|
||||
~RegionPageMap() {
|
||||
if (!isAllocated())
|
||||
return;
|
||||
Buffers.releaseBuffer(Buffer, BufferSize);
|
||||
Buffer = nullptr;
|
||||
Buffers.releaseBuffer(Buffer);
|
||||
Buffer = {};
|
||||
}
|
||||
|
||||
// Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
|
||||
|
@ -216,7 +229,7 @@ public:
|
|||
Regions = NumberOfRegion;
|
||||
NumCounters = CountersPerRegion;
|
||||
|
||||
constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
|
||||
constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL;
|
||||
// Rounding counter storage size up to the power of two allows for using
|
||||
// bit shifts calculating particular counter's Index and offset.
|
||||
const uptr CounterSizeBits =
|
||||
|
@ -233,11 +246,11 @@ public:
|
|||
SizePerRegion =
|
||||
roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
|
||||
PackingRatioLog;
|
||||
BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
|
||||
Buffer = Buffers.getBuffer(BufferSize);
|
||||
BufferNumElements = SizePerRegion * Regions;
|
||||
Buffer = Buffers.getBuffer(BufferNumElements);
|
||||
}
|
||||
|
||||
bool isAllocated() const { return !!Buffer; }
|
||||
bool isAllocated() const { return Buffer.Data != nullptr; }
|
||||
|
||||
uptr getCount() const { return NumCounters; }
|
||||
|
||||
|
@ -246,7 +259,8 @@ public:
|
|||
DCHECK_LT(I, NumCounters);
|
||||
const uptr Index = I >> PackingRatioLog;
|
||||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
||||
return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
|
||||
return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) &
|
||||
CounterMask;
|
||||
}
|
||||
|
||||
void inc(uptr Region, uptr I) const {
|
||||
|
@ -255,8 +269,8 @@ public:
|
|||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
||||
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
|
||||
DCHECK_EQ(isAllCounted(Region, I), false);
|
||||
Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
|
||||
<< BitOffset;
|
||||
Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
|
||||
<< BitOffset;
|
||||
}
|
||||
|
||||
void incN(uptr Region, uptr I, uptr N) const {
|
||||
|
@ -267,7 +281,7 @@ public:
|
|||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
||||
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
|
||||
DCHECK_EQ(isAllCounted(Region, I), false);
|
||||
Buffer[Region * SizePerRegion + Index] += N << BitOffset;
|
||||
Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset;
|
||||
}
|
||||
|
||||
void incRange(uptr Region, uptr From, uptr To) const {
|
||||
|
@ -286,7 +300,7 @@ public:
|
|||
const uptr Index = I >> PackingRatioLog;
|
||||
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
|
||||
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
|
||||
Buffer[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
|
||||
Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
|
||||
}
|
||||
void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
|
||||
DCHECK_LE(From, To);
|
||||
|
@ -309,9 +323,16 @@ public:
|
|||
return get(Region, I) == CounterMask;
|
||||
}
|
||||
|
||||
uptr getBufferSize() const { return BufferSize; }
|
||||
uptr getBufferNumElements() const { return BufferNumElements; }
|
||||
|
||||
private:
|
||||
// We may consider making this configurable if there are cases which may
|
||||
// benefit from this.
|
||||
static const uptr StaticBufferCount = 2U;
|
||||
static const uptr StaticBufferNumElements = 512U;
|
||||
using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>;
|
||||
static BufferPoolT Buffers;
|
||||
|
||||
uptr Regions;
|
||||
uptr NumCounters;
|
||||
uptr CounterSizeBitsLog;
|
||||
|
@ -320,14 +341,8 @@ private:
|
|||
uptr BitOffsetMask;
|
||||
|
||||
uptr SizePerRegion;
|
||||
uptr BufferSize;
|
||||
uptr *Buffer;
|
||||
|
||||
// We may consider making this configurable if there are cases which may
|
||||
// benefit from this.
|
||||
static const uptr StaticBufferCount = 2U;
|
||||
static const uptr StaticBufferSize = 512U;
|
||||
static BufferPool<StaticBufferCount, StaticBufferSize> Buffers;
|
||||
uptr BufferNumElements;
|
||||
BufferPoolT::Buffer Buffer;
|
||||
};
|
||||
|
||||
template <class ReleaseRecorderT> class FreePagesRangeTracker {
|
||||
|
|
33
Telegram/ThirdParty/scudo/report.cpp
vendored
33
Telegram/ThirdParty/scudo/report.cpp
vendored
|
@ -24,11 +24,7 @@ public:
|
|||
Message.vappend(Format, Args);
|
||||
va_end(Args);
|
||||
}
|
||||
NORETURN ~ScopedErrorReport() {
|
||||
outputRaw(Message.data());
|
||||
setAbortMessage(Message.data());
|
||||
die();
|
||||
}
|
||||
NORETURN ~ScopedErrorReport() { reportRawError(Message.data()); }
|
||||
|
||||
private:
|
||||
ScopedString Message;
|
||||
|
@ -36,18 +32,6 @@ private:
|
|||
|
||||
inline void NORETURN trap() { __builtin_trap(); }
|
||||
|
||||
void NORETURN reportSoftRSSLimit(uptr RssLimitMb) {
|
||||
ScopedErrorReport Report;
|
||||
Report.append("Soft RSS limit of %zu MB exhausted, current RSS is %zu MB\n",
|
||||
RssLimitMb, GetRSS() >> 20);
|
||||
}
|
||||
|
||||
void NORETURN reportHardRSSLimit(uptr RssLimitMb) {
|
||||
ScopedErrorReport Report;
|
||||
Report.append("Hard RSS limit of %zu MB exhausted, current RSS is %zu MB\n",
|
||||
RssLimitMb, GetRSS() >> 20);
|
||||
}
|
||||
|
||||
// This could potentially be called recursively if a CHECK fails in the reports.
|
||||
void NORETURN reportCheckFailed(const char *File, int Line,
|
||||
const char *Condition, u64 Value1, u64 Value2) {
|
||||
|
@ -67,6 +51,13 @@ void NORETURN reportError(const char *Message) {
|
|||
Report.append("%s\n", Message);
|
||||
}
|
||||
|
||||
// Generic fatal error message without ScopedString.
|
||||
void NORETURN reportRawError(const char *Message) {
|
||||
outputRaw(Message);
|
||||
setAbortMessage(Message);
|
||||
die();
|
||||
}
|
||||
|
||||
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
|
||||
ScopedErrorReport Report;
|
||||
Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
|
||||
|
@ -79,14 +70,6 @@ void NORETURN reportHeaderCorruption(void *Ptr) {
|
|||
Report.append("corrupted chunk header at address %p\n", Ptr);
|
||||
}
|
||||
|
||||
// Two threads have attempted to modify a chunk header at the same time. This is
|
||||
// symptomatic of a race-condition in the application code, or general lack of
|
||||
// proper locking.
|
||||
void NORETURN reportHeaderRace(void *Ptr) {
|
||||
ScopedErrorReport Report;
|
||||
Report.append("race on chunk header at address %p\n", Ptr);
|
||||
}
|
||||
|
||||
// The allocator was compiled with parameters that conflict with field size
|
||||
// requirements.
|
||||
void NORETURN reportSanityCheckError(const char *Field) {
|
||||
|
|
8
Telegram/ThirdParty/scudo/report.h
vendored
8
Telegram/ThirdParty/scudo/report.h
vendored
|
@ -15,15 +15,17 @@ namespace scudo {
|
|||
|
||||
// Reports are *fatal* unless stated otherwise.
|
||||
|
||||
// Generic error.
|
||||
// Generic error, adds newline to end of message.
|
||||
void NORETURN reportError(const char *Message);
|
||||
|
||||
// Generic error, but the message is not modified.
|
||||
void NORETURN reportRawError(const char *Message);
|
||||
|
||||
// Flags related errors.
|
||||
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
|
||||
|
||||
// Chunk header related errors.
|
||||
void NORETURN reportHeaderCorruption(void *Ptr);
|
||||
void NORETURN reportHeaderRace(void *Ptr);
|
||||
|
||||
// Sanity checks related error.
|
||||
void NORETURN reportSanityCheckError(const char *Field);
|
||||
|
@ -34,8 +36,6 @@ void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
|
|||
uptr MaxSize);
|
||||
void NORETURN reportOutOfBatchClass();
|
||||
void NORETURN reportOutOfMemory(uptr RequestedSize);
|
||||
void NORETURN reportSoftRSSLimit(uptr RssLimitMb);
|
||||
void NORETURN reportHardRSSLimit(uptr RssLimitMb);
|
||||
enum class AllocatorAction : u8 {
|
||||
Recycling,
|
||||
Deallocating,
|
||||
|
|
58
Telegram/ThirdParty/scudo/report_linux.cpp
vendored
Normal file
58
Telegram/ThirdParty/scudo/report_linux.cpp
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
//===-- report_linux.cpp ----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
#if SCUDO_LINUX || SCUDO_TRUSTY
|
||||
|
||||
#include "common.h"
|
||||
#include "internal_defs.h"
|
||||
#include "report.h"
|
||||
#include "report_linux.h"
|
||||
#include "string_utils.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
namespace scudo {
|
||||
|
||||
// Fatal internal map() error (potentially OOM related).
|
||||
void NORETURN reportMapError(uptr SizeIfOOM) {
|
||||
char Error[128] = "Scudo ERROR: internal map failure\n";
|
||||
if (SizeIfOOM) {
|
||||
formatString(
|
||||
Error, sizeof(Error),
|
||||
"Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
|
||||
SizeIfOOM >> 10);
|
||||
}
|
||||
reportRawError(Error);
|
||||
}
|
||||
|
||||
void NORETURN reportUnmapError(uptr Addr, uptr Size) {
|
||||
char Error[128];
|
||||
formatString(Error, sizeof(Error),
|
||||
"Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
|
||||
"Size %zu\n",
|
||||
strerror(errno), Addr, Size);
|
||||
reportRawError(Error);
|
||||
}
|
||||
|
||||
void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot) {
|
||||
char Error[128];
|
||||
formatString(
|
||||
Error, sizeof(Error),
|
||||
"Scudo ERROR: internal protect failure (error desc=%s) Addr 0x%zx "
|
||||
"Size %zu Prot %x\n",
|
||||
strerror(errno), Addr, Size, Prot);
|
||||
reportRawError(Error);
|
||||
}
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_LINUX || SCUDO_TRUSTY
|
34
Telegram/ThirdParty/scudo/report_linux.h
vendored
Normal file
34
Telegram/ThirdParty/scudo/report_linux.h
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
//===-- report_linux.h ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SCUDO_REPORT_LINUX_H_
|
||||
#define SCUDO_REPORT_LINUX_H_
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
#if SCUDO_LINUX || SCUDO_TRUSTY
|
||||
|
||||
#include "internal_defs.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
// Report a fatal error when a map call fails. SizeIfOOM shall
|
||||
// hold the requested size on an out-of-memory error, 0 otherwise.
|
||||
void NORETURN reportMapError(uptr SizeIfOOM = 0);
|
||||
|
||||
// Report a fatal error when an unmap call fails.
|
||||
void NORETURN reportUnmapError(uptr Addr, uptr Size);
|
||||
|
||||
// Report a fatal error when a mprotect call fails.
|
||||
void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot);
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_LINUX || SCUDO_TRUSTY
|
||||
|
||||
#endif // SCUDO_REPORT_LINUX_H_
|
37
Telegram/ThirdParty/scudo/rss_limit_checker.cpp
vendored
37
Telegram/ThirdParty/scudo/rss_limit_checker.cpp
vendored
|
@ -1,37 +0,0 @@
|
|||
//===-- common.cpp ----------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "rss_limit_checker.h"
|
||||
#include "atomic_helpers.h"
|
||||
#include "string_utils.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
void RssLimitChecker::check(u64 NextCheck) {
|
||||
// The interval for the checks is 250ms.
|
||||
static constexpr u64 CheckInterval = 250 * 1000000;
|
||||
|
||||
// Early return in case another thread already did the calculation.
|
||||
if (!atomic_compare_exchange_strong(&RssNextCheckAtNS, &NextCheck,
|
||||
getMonotonicTime() + CheckInterval,
|
||||
memory_order_relaxed)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uptr CurrentRssMb = GetRSS() >> 20;
|
||||
|
||||
RssLimitExceeded Result = RssLimitExceeded::Neither;
|
||||
if (UNLIKELY(HardRssLimitMb && HardRssLimitMb < CurrentRssMb))
|
||||
Result = RssLimitExceeded::Hard;
|
||||
else if (UNLIKELY(SoftRssLimitMb && SoftRssLimitMb < CurrentRssMb))
|
||||
Result = RssLimitExceeded::Soft;
|
||||
|
||||
atomic_store_relaxed(&RssLimitStatus, static_cast<u8>(Result));
|
||||
}
|
||||
|
||||
} // namespace scudo
|
63
Telegram/ThirdParty/scudo/rss_limit_checker.h
vendored
63
Telegram/ThirdParty/scudo/rss_limit_checker.h
vendored
|
@ -1,63 +0,0 @@
|
|||
//===-- common.h ------------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SCUDO_RSS_LIMIT_CHECKER_H_
|
||||
#define SCUDO_RSS_LIMIT_CHECKER_H_
|
||||
|
||||
#include "atomic_helpers.h"
|
||||
#include "common.h"
|
||||
#include "internal_defs.h"
|
||||
|
||||
namespace scudo {
|
||||
|
||||
class RssLimitChecker {
|
||||
public:
|
||||
enum RssLimitExceeded {
|
||||
Neither,
|
||||
Soft,
|
||||
Hard,
|
||||
};
|
||||
|
||||
void init(int SoftRssLimitMb, int HardRssLimitMb) {
|
||||
CHECK_GE(SoftRssLimitMb, 0);
|
||||
CHECK_GE(HardRssLimitMb, 0);
|
||||
this->SoftRssLimitMb = static_cast<uptr>(SoftRssLimitMb);
|
||||
this->HardRssLimitMb = static_cast<uptr>(HardRssLimitMb);
|
||||
}
|
||||
|
||||
// Opportunistic RSS limit check. This will update the RSS limit status, if
|
||||
// it can, every 250ms, otherwise it will just return the current one.
|
||||
RssLimitExceeded getRssLimitExceeded() {
|
||||
if (!HardRssLimitMb && !SoftRssLimitMb)
|
||||
return RssLimitExceeded::Neither;
|
||||
|
||||
u64 NextCheck = atomic_load_relaxed(&RssNextCheckAtNS);
|
||||
u64 Now = getMonotonicTime();
|
||||
|
||||
if (UNLIKELY(Now >= NextCheck))
|
||||
check(NextCheck);
|
||||
|
||||
return static_cast<RssLimitExceeded>(atomic_load_relaxed(&RssLimitStatus));
|
||||
}
|
||||
|
||||
uptr getSoftRssLimit() const { return SoftRssLimitMb; }
|
||||
uptr getHardRssLimit() const { return HardRssLimitMb; }
|
||||
|
||||
private:
|
||||
void check(u64 NextCheck);
|
||||
|
||||
uptr SoftRssLimitMb = 0;
|
||||
uptr HardRssLimitMb = 0;
|
||||
|
||||
atomic_u64 RssNextCheckAtNS = {};
|
||||
atomic_u8 RssLimitStatus = {};
|
||||
};
|
||||
|
||||
} // namespace scudo
|
||||
|
||||
#endif // SCUDO_RSS_LIMIT_CHECKER_H_
|
186
Telegram/ThirdParty/scudo/secondary.h
vendored
186
Telegram/ThirdParty/scudo/secondary.h
vendored
|
@ -72,11 +72,26 @@ static inline void unmap(LargeBlock::Header *H) {
|
|||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct CachedBlock {
|
||||
uptr CommitBase = 0;
|
||||
uptr CommitSize = 0;
|
||||
uptr BlockBegin = 0;
|
||||
MemMapT MemMap = {};
|
||||
u64 Time = 0;
|
||||
|
||||
bool isValid() { return CommitBase != 0; }
|
||||
|
||||
void invalidate() { CommitBase = 0; }
|
||||
};
|
||||
} // namespace
|
||||
|
||||
template <typename Config> class MapAllocatorNoCache {
|
||||
public:
|
||||
void init(UNUSED s32 ReleaseToOsInterval) {}
|
||||
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
|
||||
UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
|
||||
UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
|
||||
UNUSED bool *Zeroed) {
|
||||
return false;
|
||||
}
|
||||
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
|
||||
|
@ -102,20 +117,22 @@ public:
|
|||
static const uptr MaxUnusedCachePages = 4U;
|
||||
|
||||
template <typename Config>
|
||||
void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
|
||||
bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
|
||||
uptr AllocPos, uptr Flags, MemMapT &MemMap) {
|
||||
Flags |= MAP_RESIZABLE;
|
||||
Flags |= MAP_ALLOWNOMEM;
|
||||
|
||||
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
|
||||
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
|
||||
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
|
||||
MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
|
||||
MAP_RESIZABLE | MAP_MEMTAG | Flags);
|
||||
MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
|
||||
"scudo:secondary", MAP_RESIZABLE | Flags);
|
||||
return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
|
||||
MAP_MEMTAG | Flags) &&
|
||||
MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
|
||||
"scudo:secondary", Flags);
|
||||
} else {
|
||||
const uptr RemapFlags =
|
||||
MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
|
||||
Flags;
|
||||
MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
|
||||
(useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
|
||||
return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,17 +155,24 @@ public:
|
|||
|
||||
void getStats(ScopedString *Str) {
|
||||
ScopedLock L(Mutex);
|
||||
uptr Integral;
|
||||
uptr Fractional;
|
||||
computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
|
||||
&Fractional);
|
||||
Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
|
||||
"MaxEntriesCount: %u, MaxEntrySize: %zu\n",
|
||||
EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
|
||||
atomic_load_relaxed(&MaxEntrySize));
|
||||
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
|
||||
"(%zu.%02zu%%)\n",
|
||||
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
|
||||
for (CachedBlock Entry : Entries) {
|
||||
if (!Entry.CommitBase)
|
||||
if (!Entry.isValid())
|
||||
continue;
|
||||
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
|
||||
"BlockSize: %zu\n",
|
||||
"BlockSize: %zu %s\n",
|
||||
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
|
||||
Entry.CommitSize);
|
||||
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,7 +190,7 @@ public:
|
|||
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
|
||||
}
|
||||
|
||||
void store(Options Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
|
||||
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
|
||||
if (!canCache(H->CommitSize))
|
||||
return unmap(H);
|
||||
|
||||
|
@ -195,7 +219,7 @@ public:
|
|||
MAP_NOACCESS);
|
||||
}
|
||||
} else if (Interval == 0) {
|
||||
Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
|
||||
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
|
||||
Entry.Time = 0;
|
||||
}
|
||||
do {
|
||||
|
@ -210,7 +234,7 @@ public:
|
|||
if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
|
||||
QuarantinePos =
|
||||
(QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
|
||||
if (!Quarantine[QuarantinePos].CommitBase) {
|
||||
if (!Quarantine[QuarantinePos].isValid()) {
|
||||
Quarantine[QuarantinePos] = Entry;
|
||||
return;
|
||||
}
|
||||
|
@ -225,7 +249,7 @@ public:
|
|||
EmptyCache = true;
|
||||
} else {
|
||||
for (u32 I = 0; I < MaxCount; I++) {
|
||||
if (Entries[I].CommitBase)
|
||||
if (Entries[I].isValid())
|
||||
continue;
|
||||
if (I != 0)
|
||||
Entries[I] = Entries[0];
|
||||
|
@ -246,26 +270,31 @@ public:
|
|||
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
|
||||
}
|
||||
|
||||
bool retrieve(Options Options, uptr Size, uptr Alignment,
|
||||
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
|
||||
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
|
||||
// 10% of the requested size proved to be the optimal choice for
|
||||
// retrieving cached blocks after testing several options.
|
||||
constexpr u32 FragmentedBytesDivisor = 10;
|
||||
bool Found = false;
|
||||
CachedBlock Entry;
|
||||
uptr HeaderPos = 0;
|
||||
uptr EntryHeaderPos = 0;
|
||||
{
|
||||
ScopedLock L(Mutex);
|
||||
CallsToRetrieve++;
|
||||
if (EntriesCount == 0)
|
||||
return false;
|
||||
u32 OptimalFitIndex = 0;
|
||||
uptr MinDiff = UINTPTR_MAX;
|
||||
for (u32 I = 0; I < MaxCount; I++) {
|
||||
const uptr CommitBase = Entries[I].CommitBase;
|
||||
if (!CommitBase)
|
||||
if (!Entries[I].isValid())
|
||||
continue;
|
||||
const uptr CommitBase = Entries[I].CommitBase;
|
||||
const uptr CommitSize = Entries[I].CommitSize;
|
||||
const uptr AllocPos =
|
||||
roundDown(CommitBase + CommitSize - Size, Alignment);
|
||||
HeaderPos =
|
||||
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
|
||||
const uptr HeaderPos = AllocPos - HeadersSize;
|
||||
if (HeaderPos > CommitBase + CommitSize)
|
||||
continue;
|
||||
if (HeaderPos < CommitBase ||
|
||||
|
@ -273,17 +302,36 @@ public:
|
|||
continue;
|
||||
}
|
||||
Found = true;
|
||||
Entry = Entries[I];
|
||||
Entries[I].CommitBase = 0;
|
||||
const uptr Diff = HeaderPos - CommitBase;
|
||||
// immediately use a cached block if it's size is close enough to the
|
||||
// requested size.
|
||||
const uptr MaxAllowedFragmentedBytes =
|
||||
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
|
||||
if (Diff <= MaxAllowedFragmentedBytes) {
|
||||
OptimalFitIndex = I;
|
||||
EntryHeaderPos = HeaderPos;
|
||||
break;
|
||||
}
|
||||
// keep track of the smallest cached block
|
||||
// that is greater than (AllocSize + HeaderSize)
|
||||
if (Diff > MinDiff)
|
||||
continue;
|
||||
OptimalFitIndex = I;
|
||||
MinDiff = Diff;
|
||||
EntryHeaderPos = HeaderPos;
|
||||
}
|
||||
if (Found) {
|
||||
Entry = Entries[OptimalFitIndex];
|
||||
Entries[OptimalFitIndex].invalidate();
|
||||
EntriesCount--;
|
||||
break;
|
||||
SuccessfulRetrieves++;
|
||||
}
|
||||
}
|
||||
if (!Found)
|
||||
return false;
|
||||
|
||||
*H = reinterpret_cast<LargeBlock::Header *>(
|
||||
LargeBlock::addHeaderTag<Config>(HeaderPos));
|
||||
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
|
||||
*Zeroed = Entry.Time == 0;
|
||||
if (useMemoryTagging<Config>(Options))
|
||||
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
|
||||
|
@ -295,8 +343,7 @@ public:
|
|||
} else if (Entry.BlockBegin < NewBlockBegin) {
|
||||
storeTags(Entry.BlockBegin, NewBlockBegin);
|
||||
} else {
|
||||
storeTags(untagPointer(NewBlockBegin),
|
||||
untagPointer(Entry.BlockBegin));
|
||||
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
|
||||
}
|
||||
}
|
||||
(*H)->CommitBase = Entry.CommitBase;
|
||||
|
@ -338,15 +385,15 @@ public:
|
|||
void disableMemoryTagging() EXCLUDES(Mutex) {
|
||||
ScopedLock L(Mutex);
|
||||
for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
|
||||
if (Quarantine[I].CommitBase) {
|
||||
if (Quarantine[I].isValid()) {
|
||||
MemMapT &MemMap = Quarantine[I].MemMap;
|
||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
||||
Quarantine[I].CommitBase = 0;
|
||||
Quarantine[I].invalidate();
|
||||
}
|
||||
}
|
||||
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
|
||||
for (u32 I = 0; I < MaxCount; I++) {
|
||||
if (Entries[I].CommitBase) {
|
||||
if (Entries[I].isValid()) {
|
||||
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
|
||||
Entries[I].CommitSize, 0);
|
||||
}
|
||||
|
@ -367,10 +414,10 @@ private:
|
|||
{
|
||||
ScopedLock L(Mutex);
|
||||
for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
|
||||
if (!Entries[I].CommitBase)
|
||||
if (!Entries[I].isValid())
|
||||
continue;
|
||||
MapInfo[N] = Entries[I].MemMap;
|
||||
Entries[I].CommitBase = 0;
|
||||
Entries[I].invalidate();
|
||||
N++;
|
||||
}
|
||||
EntriesCount = 0;
|
||||
|
@ -382,23 +429,15 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
struct CachedBlock {
|
||||
uptr CommitBase = 0;
|
||||
uptr CommitSize = 0;
|
||||
uptr BlockBegin = 0;
|
||||
MemMapT MemMap = {};
|
||||
u64 Time = 0;
|
||||
};
|
||||
|
||||
void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
|
||||
if (!Entry.CommitBase || !Entry.Time)
|
||||
if (!Entry.isValid() || !Entry.Time)
|
||||
return;
|
||||
if (Entry.Time > Time) {
|
||||
if (OldestTime == 0 || Entry.Time < OldestTime)
|
||||
OldestTime = Entry.Time;
|
||||
return;
|
||||
}
|
||||
Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
|
||||
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
|
||||
Entry.Time = 0;
|
||||
}
|
||||
|
||||
|
@ -421,6 +460,8 @@ private:
|
|||
u64 OldestTime GUARDED_BY(Mutex) = 0;
|
||||
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
|
||||
atomic_s32 ReleaseToOsIntervalMs = {};
|
||||
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
|
||||
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
|
||||
|
||||
CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
|
||||
NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
|
||||
|
@ -439,11 +480,11 @@ public:
|
|||
S->link(&Stats);
|
||||
}
|
||||
|
||||
void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
|
||||
void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
|
||||
uptr *BlockEnd = nullptr,
|
||||
FillContentsMode FillContents = NoFill);
|
||||
|
||||
void deallocate(Options Options, void *Ptr);
|
||||
void deallocate(const Options &Options, void *Ptr);
|
||||
|
||||
static uptr getBlockEnd(void *Ptr) {
|
||||
auto *B = LargeBlock::getHeader<Config>(Ptr);
|
||||
|
@ -454,6 +495,10 @@ public:
|
|||
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
|
||||
}
|
||||
|
||||
static constexpr uptr getHeadersSize() {
|
||||
return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
|
||||
}
|
||||
|
||||
void disable() NO_THREAD_SAFETY_ANALYSIS {
|
||||
Mutex.lock();
|
||||
Cache.disable();
|
||||
|
@ -494,6 +539,7 @@ private:
|
|||
DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
|
||||
uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
|
||||
uptr FreedBytes GUARDED_BY(Mutex) = 0;
|
||||
uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
|
||||
uptr LargestSize GUARDED_BY(Mutex) = 0;
|
||||
u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
|
||||
u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
|
||||
|
@ -512,24 +558,23 @@ private:
|
|||
// the committed memory will amount to something close to Size - AlignmentHint
|
||||
// (pending rounding and headers).
|
||||
template <typename Config>
|
||||
void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
|
||||
uptr *BlockEndPtr,
|
||||
void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
|
||||
uptr Alignment, uptr *BlockEndPtr,
|
||||
FillContentsMode FillContents) {
|
||||
if (Options.get(OptionBit::AddLargeAllocationSlack))
|
||||
Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
|
||||
Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
|
||||
const uptr PageSize = getPageSizeCached();
|
||||
uptr RoundedSize =
|
||||
roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() +
|
||||
Chunk::getHeaderSize(),
|
||||
PageSize);
|
||||
if (Alignment > PageSize)
|
||||
RoundedSize += Alignment - PageSize;
|
||||
|
||||
if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
|
||||
// Note that cached blocks may have aligned address already. Thus we simply
|
||||
// pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
|
||||
const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
|
||||
|
||||
if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
|
||||
LargeBlock::Header *H;
|
||||
bool Zeroed;
|
||||
if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
|
||||
if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
|
||||
&Zeroed)) {
|
||||
const uptr BlockEnd = H->CommitBase + H->CommitSize;
|
||||
if (BlockEndPtr)
|
||||
*BlockEndPtr = BlockEnd;
|
||||
|
@ -545,6 +590,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
|
|||
ScopedLock L(Mutex);
|
||||
InUseBlocks.push_back(H);
|
||||
AllocatedBytes += H->CommitSize;
|
||||
FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
|
||||
NumberOfAllocs++;
|
||||
Stats.add(StatAllocated, H->CommitSize);
|
||||
Stats.add(StatMapped, H->MemMap.getCapacity());
|
||||
|
@ -553,16 +599,22 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
|
|||
}
|
||||
}
|
||||
|
||||
uptr RoundedSize =
|
||||
roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
|
||||
if (Alignment > PageSize)
|
||||
RoundedSize += Alignment - PageSize;
|
||||
|
||||
ReservedMemoryT ReservedMemory;
|
||||
const uptr MapSize = RoundedSize + 2 * PageSize;
|
||||
ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
|
||||
if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
|
||||
MAP_ALLOWNOMEM))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Take the entire ownership of reserved region.
|
||||
MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
|
||||
ReservedMemory.getCapacity());
|
||||
uptr MapBase = MemMap.getBase();
|
||||
if (UNLIKELY(!MapBase))
|
||||
return nullptr;
|
||||
uptr CommitBase = MapBase + PageSize;
|
||||
uptr MapEnd = MapBase + MapSize;
|
||||
|
||||
|
@ -592,9 +644,12 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
|
|||
|
||||
const uptr CommitSize = MapEnd - PageSize - CommitBase;
|
||||
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
|
||||
mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, MemMap);
|
||||
const uptr HeaderPos =
|
||||
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
|
||||
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
|
||||
MemMap)) {
|
||||
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
|
||||
return nullptr;
|
||||
}
|
||||
const uptr HeaderPos = AllocPos - getHeadersSize();
|
||||
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
|
||||
LargeBlock::addHeaderTag<Config>(HeaderPos));
|
||||
if (useMemoryTagging<Config>(Options))
|
||||
|
@ -609,6 +664,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
|
|||
ScopedLock L(Mutex);
|
||||
InUseBlocks.push_back(H);
|
||||
AllocatedBytes += CommitSize;
|
||||
FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
|
||||
if (LargestSize < CommitSize)
|
||||
LargestSize = CommitSize;
|
||||
NumberOfAllocs++;
|
||||
|
@ -619,7 +675,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
|
|||
}
|
||||
|
||||
template <typename Config>
|
||||
void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
|
||||
void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
|
||||
EXCLUDES(Mutex) {
|
||||
LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
|
||||
const uptr CommitSize = H->CommitSize;
|
||||
|
@ -627,6 +683,7 @@ void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
|
|||
ScopedLock L(Mutex);
|
||||
InUseBlocks.remove(H);
|
||||
FreedBytes += CommitSize;
|
||||
FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
|
||||
NumberOfFrees++;
|
||||
Stats.sub(StatAllocated, CommitSize);
|
||||
Stats.sub(StatMapped, H->MemMap.getCapacity());
|
||||
|
@ -638,10 +695,11 @@ template <typename Config>
|
|||
void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
|
||||
ScopedLock L(Mutex);
|
||||
Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
|
||||
"(%zuK), remains %u (%zuK) max %zuM\n",
|
||||
"(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
|
||||
NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
|
||||
FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
|
||||
(AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
|
||||
(AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
|
||||
FragmentedBytes >> 10);
|
||||
Cache.getStats(Str);
|
||||
}
|
||||
|
||||
|
|
26
Telegram/ThirdParty/scudo/size_class_map.h
vendored
26
Telegram/ThirdParty/scudo/size_class_map.h
vendored
|
@ -254,7 +254,7 @@ struct AndroidSizeClassConfig {
|
|||
static const u16 MaxNumCachedHint = 13;
|
||||
static const uptr MaxBytesCachedLog = 13;
|
||||
|
||||
static constexpr u32 Classes[] = {
|
||||
static constexpr uptr Classes[] = {
|
||||
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0,
|
||||
0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450,
|
||||
0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210,
|
||||
|
@ -269,7 +269,7 @@ struct AndroidSizeClassConfig {
|
|||
static const u16 MaxNumCachedHint = 14;
|
||||
static const uptr MaxBytesCachedLog = 13;
|
||||
|
||||
static constexpr u32 Classes[] = {
|
||||
static constexpr uptr Classes[] = {
|
||||
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090,
|
||||
0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130,
|
||||
0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0,
|
||||
|
@ -289,28 +289,6 @@ typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
|
|||
static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
|
||||
#endif
|
||||
|
||||
struct SvelteSizeClassConfig {
|
||||
#if SCUDO_WORDSIZE == 64U
|
||||
static const uptr NumBits = 4;
|
||||
static const uptr MinSizeLog = 4;
|
||||
static const uptr MidSizeLog = 8;
|
||||
static const uptr MaxSizeLog = 14;
|
||||
static const u16 MaxNumCachedHint = 13;
|
||||
static const uptr MaxBytesCachedLog = 10;
|
||||
static const uptr SizeDelta = Chunk::getHeaderSize();
|
||||
#else
|
||||
static const uptr NumBits = 4;
|
||||
static const uptr MinSizeLog = 3;
|
||||
static const uptr MidSizeLog = 7;
|
||||
static const uptr MaxSizeLog = 14;
|
||||
static const u16 MaxNumCachedHint = 14;
|
||||
static const uptr MaxBytesCachedLog = 10;
|
||||
static const uptr SizeDelta = Chunk::getHeaderSize();
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef FixedSizeClassMap<SvelteSizeClassConfig> SvelteSizeClassMap;
|
||||
|
||||
struct TrustySizeClassConfig {
|
||||
static const uptr NumBits = 1;
|
||||
static const uptr MinSizeLog = 5;
|
||||
|
|
5
Telegram/ThirdParty/scudo/stack_depot.h
vendored
5
Telegram/ThirdParty/scudo/stack_depot.h
vendored
|
@ -62,8 +62,7 @@ class StackDepot {
|
|||
// This is achieved by re-checking the hash of the stack trace before
|
||||
// returning the trace.
|
||||
|
||||
#ifdef SCUDO_FUZZ
|
||||
// Use smaller table sizes for fuzzing in order to reduce input size.
|
||||
#if SCUDO_SMALL_STACK_DEPOT
|
||||
static const uptr TabBits = 4;
|
||||
#else
|
||||
static const uptr TabBits = 16;
|
||||
|
@ -72,7 +71,7 @@ class StackDepot {
|
|||
static const uptr TabMask = TabSize - 1;
|
||||
atomic_u32 Tab[TabSize] = {};
|
||||
|
||||
#ifdef SCUDO_FUZZ
|
||||
#if SCUDO_SMALL_STACK_DEPOT
|
||||
static const uptr RingBits = 4;
|
||||
#else
|
||||
static const uptr RingBits = 19;
|
||||
|
|
15
Telegram/ThirdParty/scudo/tests/CMakeLists.txt
vendored
15
Telegram/ThirdParty/scudo/tests/CMakeLists.txt
vendored
|
@ -15,11 +15,15 @@ set(SCUDO_UNITTEST_CFLAGS
|
|||
-DGTEST_HAS_RTTI=0
|
||||
-g
|
||||
# Extra flags for the C++ tests
|
||||
-Wconversion
|
||||
# TODO(kostyak): find a way to make -fsized-deallocation work
|
||||
-Wno-mismatched-new-delete)
|
||||
|
||||
if(COMPILER_RT_DEBUG)
|
||||
list(APPEND SCUDO_UNITTEST_CFLAGS -DSCUDO_DEBUG=1)
|
||||
list(APPEND SCUDO_UNITTEST_CFLAGS -DSCUDO_DEBUG=1 -DSCUDO_ENABLE_HOOKS=1)
|
||||
if (NOT FUCHSIA)
|
||||
list(APPEND SCUDO_UNITTEST_CFLAGS -DSCUDO_ENABLE_HOOKS_TESTS=1)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(ANDROID)
|
||||
|
@ -92,6 +96,7 @@ set(SCUDO_UNIT_TEST_SOURCES
|
|||
chunk_test.cpp
|
||||
combined_test.cpp
|
||||
common_test.cpp
|
||||
condition_variable_test.cpp
|
||||
flags_test.cpp
|
||||
list_test.cpp
|
||||
map_test.cpp
|
||||
|
@ -137,11 +142,3 @@ set(SCUDO_CXX_UNIT_TEST_SOURCES
|
|||
add_scudo_unittest(ScudoCxxUnitTest
|
||||
SOURCES ${SCUDO_CXX_UNIT_TEST_SOURCES}
|
||||
ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers RTScudoStandaloneCxxWrappers)
|
||||
|
||||
set(SCUDO_HOOKS_UNIT_TEST_SOURCES
|
||||
scudo_hooks_test.cpp
|
||||
scudo_unit_test_main.cpp
|
||||
)
|
||||
|
||||
add_scudo_unittest(ScudoHooksUnitTest
|
||||
SOURCES ${SCUDO_HOOKS_UNIT_TEST_SOURCES})
|
||||
|
|
23
Telegram/ThirdParty/scudo/tests/chunk_test.cpp
vendored
23
Telegram/ThirdParty/scudo/tests/chunk_test.cpp
vendored
|
@ -37,29 +37,6 @@ TEST(ScudoChunkDeathTest, ChunkBasic) {
|
|||
free(Block);
|
||||
}
|
||||
|
||||
TEST(ScudoChunkTest, ChunkCmpXchg) {
|
||||
initChecksum();
|
||||
const scudo::uptr Size = 0x100U;
|
||||
scudo::Chunk::UnpackedHeader OldHeader = {};
|
||||
OldHeader.OriginOrWasZeroed = scudo::Chunk::Origin::Malloc;
|
||||
OldHeader.ClassId = 0x42U;
|
||||
OldHeader.SizeOrUnusedBytes = Size;
|
||||
OldHeader.State = scudo::Chunk::State::Allocated;
|
||||
void *Block = malloc(HeaderSize + Size);
|
||||
void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
|
||||
HeaderSize);
|
||||
scudo::Chunk::storeHeader(Cookie, P, &OldHeader);
|
||||
memset(P, 'A', Size);
|
||||
scudo::Chunk::UnpackedHeader NewHeader = OldHeader;
|
||||
NewHeader.State = scudo::Chunk::State::Quarantined;
|
||||
scudo::Chunk::compareExchangeHeader(Cookie, P, &NewHeader, &OldHeader);
|
||||
NewHeader = {};
|
||||
EXPECT_TRUE(scudo::Chunk::isValid(Cookie, P, &NewHeader));
|
||||
EXPECT_EQ(NewHeader.State, scudo::Chunk::State::Quarantined);
|
||||
EXPECT_FALSE(scudo::Chunk::isValid(InvalidCookie, P, &NewHeader));
|
||||
free(Block);
|
||||
}
|
||||
|
||||
TEST(ScudoChunkDeathTest, CorruptHeader) {
|
||||
initChecksum();
|
||||
const scudo::uptr Size = 0x100U;
|
||||
|
|
188
Telegram/ThirdParty/scudo/tests/combined_test.cpp
vendored
188
Telegram/ThirdParty/scudo/tests/combined_test.cpp
vendored
|
@ -12,8 +12,11 @@
|
|||
#include "allocator_config.h"
|
||||
#include "chunk.h"
|
||||
#include "combined.h"
|
||||
#include "condition_variable.h"
|
||||
#include "mem_map.h"
|
||||
#include "size_class_map.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <condition_variable>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
@ -54,7 +57,7 @@ void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
|
|||
EXPECT_DEATH(
|
||||
{
|
||||
disableDebuggerdMaybe();
|
||||
reinterpret_cast<char *>(P)[-1] = 0xaa;
|
||||
reinterpret_cast<char *>(P)[-1] = 'A';
|
||||
},
|
||||
"");
|
||||
if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
|
||||
|
@ -63,7 +66,7 @@ void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
|
|||
EXPECT_DEATH(
|
||||
{
|
||||
disableDebuggerdMaybe();
|
||||
reinterpret_cast<char *>(P)[Size] = 0xaa;
|
||||
reinterpret_cast<char *>(P)[Size] = 'A';
|
||||
},
|
||||
"");
|
||||
}
|
||||
|
@ -78,14 +81,70 @@ template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
|
|||
}
|
||||
~TestAllocator() { this->unmapTestOnly(); }
|
||||
|
||||
void *operator new(size_t size) {
|
||||
void *p = nullptr;
|
||||
EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
|
||||
return p;
|
||||
void *operator new(size_t size);
|
||||
void operator delete(void *ptr);
|
||||
};
|
||||
|
||||
constexpr size_t kMaxAlign = std::max({
|
||||
alignof(scudo::Allocator<scudo::DefaultConfig>),
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
alignof(scudo::Allocator<scudo::FuchsiaConfig>),
|
||||
#endif
|
||||
alignof(scudo::Allocator<scudo::AndroidConfig>)
|
||||
});
|
||||
|
||||
#if SCUDO_RISCV64
|
||||
// The allocator is over 4MB large. Rather than creating an instance of this on
|
||||
// the heap, keep it in a global storage to reduce fragmentation from having to
|
||||
// mmap this at the start of every test.
|
||||
struct TestAllocatorStorage {
|
||||
static constexpr size_t kMaxSize = std::max({
|
||||
sizeof(scudo::Allocator<scudo::DefaultConfig>),
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
sizeof(scudo::Allocator<scudo::FuchsiaConfig>),
|
||||
#endif
|
||||
sizeof(scudo::Allocator<scudo::AndroidConfig>)
|
||||
});
|
||||
|
||||
// To alleviate some problem, let's skip the thread safety analysis here.
|
||||
static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
|
||||
CHECK(size <= kMaxSize &&
|
||||
"Allocation size doesn't fit in the allocator storage");
|
||||
M.lock();
|
||||
return AllocatorStorage;
|
||||
}
|
||||
|
||||
void operator delete(void *ptr) { free(ptr); }
|
||||
static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS {
|
||||
M.assertHeld();
|
||||
M.unlock();
|
||||
ASSERT_EQ(ptr, AllocatorStorage);
|
||||
}
|
||||
|
||||
static scudo::HybridMutex M;
|
||||
static uint8_t AllocatorStorage[kMaxSize];
|
||||
};
|
||||
scudo::HybridMutex TestAllocatorStorage::M;
|
||||
alignas(kMaxAlign) uint8_t TestAllocatorStorage::AllocatorStorage[kMaxSize];
|
||||
#else
|
||||
struct TestAllocatorStorage {
|
||||
static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
|
||||
void *p = nullptr;
|
||||
EXPECT_EQ(0, posix_memalign(&p, kMaxAlign, size));
|
||||
return p;
|
||||
}
|
||||
static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS { free(ptr); }
|
||||
};
|
||||
#endif
|
||||
|
||||
template <typename Config>
|
||||
void *TestAllocator<Config>::operator new(size_t size) {
|
||||
return TestAllocatorStorage::get(size);
|
||||
}
|
||||
|
||||
template <typename Config>
|
||||
void TestAllocator<Config>::operator delete(void *ptr) {
|
||||
TestAllocatorStorage::release(ptr);
|
||||
}
|
||||
|
||||
template <class TypeParam> struct ScudoCombinedTest : public Test {
|
||||
ScudoCombinedTest() {
|
||||
|
@ -107,15 +166,60 @@ template <class TypeParam> struct ScudoCombinedTest : public Test {
|
|||
|
||||
template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
|
||||
|
||||
namespace scudo {
|
||||
struct TestConditionVariableConfig {
|
||||
static const bool MaySupportMemoryTagging = true;
|
||||
template <class A>
|
||||
using TSDRegistryT =
|
||||
scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
|
||||
|
||||
struct Primary {
|
||||
using SizeClassMap = scudo::AndroidSizeClassMap;
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
static const scudo::uptr RegionSizeLog = 28U;
|
||||
typedef scudo::u32 CompactPtrT;
|
||||
static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
||||
static const scudo::uptr GroupSizeLog = 20U;
|
||||
static const bool EnableRandomOffset = true;
|
||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
||||
#else
|
||||
static const scudo::uptr RegionSizeLog = 18U;
|
||||
static const scudo::uptr GroupSizeLog = 18U;
|
||||
typedef scudo::uptr CompactPtrT;
|
||||
#endif
|
||||
static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
|
||||
static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
|
||||
static const bool UseConditionVariable = true;
|
||||
#if SCUDO_LINUX
|
||||
using ConditionVariableT = scudo::ConditionVariableLinux;
|
||||
#else
|
||||
using ConditionVariableT = scudo::ConditionVariableDummy;
|
||||
#endif
|
||||
};
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
template <typename Config>
|
||||
using PrimaryT = scudo::SizeClassAllocator64<Config>;
|
||||
#else
|
||||
template <typename Config>
|
||||
using PrimaryT = scudo::SizeClassAllocator32<Config>;
|
||||
#endif
|
||||
|
||||
struct Secondary {
|
||||
template <typename Config>
|
||||
using CacheT = scudo::MapAllocatorNoCache<Config>;
|
||||
};
|
||||
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
|
||||
};
|
||||
} // namespace scudo
|
||||
|
||||
#if SCUDO_FUCHSIA
|
||||
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
|
||||
#else
|
||||
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
|
||||
#endif
|
||||
|
||||
#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
|
||||
|
@ -170,6 +274,7 @@ void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
|
|||
}
|
||||
|
||||
Allocator->printStats();
|
||||
Allocator->printFragmentationInfo();
|
||||
}
|
||||
|
||||
#define SCUDO_MAKE_BASIC_TEST(SizeLog) \
|
||||
|
@ -209,7 +314,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
|
|||
void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
|
||||
EXPECT_NE(P, nullptr);
|
||||
for (scudo::uptr I = 0; I < Size; I++)
|
||||
ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
|
||||
ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
|
||||
memset(P, 0xaa, Size);
|
||||
Allocator->deallocate(P, Origin, Size);
|
||||
}
|
||||
|
@ -227,7 +332,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
|
|||
void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
|
||||
EXPECT_NE(P, nullptr);
|
||||
for (scudo::uptr I = 0; I < Size; I++)
|
||||
ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
|
||||
ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
|
||||
memset(P, 0xaa, Size);
|
||||
Allocator->deallocate(P, Origin, Size);
|
||||
}
|
||||
|
@ -286,7 +391,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
|
|||
// we preserve the data in the process.
|
||||
scudo::uptr Size = 16;
|
||||
void *P = Allocator->allocate(Size, Origin);
|
||||
const char Marker = 0xab;
|
||||
const char Marker = 'A';
|
||||
memset(P, Marker, Size);
|
||||
while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
|
||||
void *NewP = Allocator->reallocate(P, Size * 2);
|
||||
|
@ -308,7 +413,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
|
|||
scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
|
||||
const scudo::uptr DataSize = 2048U;
|
||||
void *P = Allocator->allocate(Size, Origin);
|
||||
const char Marker = 0xab;
|
||||
const char Marker = 'A';
|
||||
memset(P, Marker, scudo::Min(Size, DataSize));
|
||||
while (Size > 1U) {
|
||||
Size /= 2U;
|
||||
|
@ -331,7 +436,7 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
|
|||
constexpr scudo::uptr ReallocSize =
|
||||
TypeParam::Primary::SizeClassMap::MaxSize - 64;
|
||||
void *P = Allocator->allocate(ReallocSize, Origin);
|
||||
const char Marker = 0xab;
|
||||
const char Marker = 'A';
|
||||
memset(P, Marker, ReallocSize);
|
||||
for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
|
||||
const scudo::uptr NewSize =
|
||||
|
@ -388,7 +493,7 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
|
|||
disableDebuggerdMaybe();
|
||||
void *P = Allocator->allocate(Size, Origin);
|
||||
Allocator->deallocate(P, Origin);
|
||||
reinterpret_cast<char *>(P)[0] = 0xaa;
|
||||
reinterpret_cast<char *>(P)[0] = 'A';
|
||||
},
|
||||
"");
|
||||
EXPECT_DEATH(
|
||||
|
@ -396,7 +501,7 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
|
|||
disableDebuggerdMaybe();
|
||||
void *P = Allocator->allocate(Size, Origin);
|
||||
Allocator->deallocate(P, Origin);
|
||||
reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
|
||||
reinterpret_cast<char *>(P)[Size - 1] = 'A';
|
||||
},
|
||||
"");
|
||||
}
|
||||
|
@ -408,15 +513,15 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
|
|||
if (Allocator->useMemoryTaggingTestOnly()) {
|
||||
// Check that disabling memory tagging works correctly.
|
||||
void *P = Allocator->allocate(2048, Origin);
|
||||
EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
|
||||
EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 'A', "");
|
||||
scudo::ScopedDisableMemoryTagChecks NoTagChecks;
|
||||
Allocator->disableMemoryTagging();
|
||||
reinterpret_cast<char *>(P)[2048] = 0xaa;
|
||||
reinterpret_cast<char *>(P)[2048] = 'A';
|
||||
Allocator->deallocate(P, Origin);
|
||||
|
||||
P = Allocator->allocate(2048, Origin);
|
||||
EXPECT_EQ(scudo::untagPointer(P), P);
|
||||
reinterpret_cast<char *>(P)[2048] = 0xaa;
|
||||
reinterpret_cast<char *>(P)[2048] = 'A';
|
||||
Allocator->deallocate(P, Origin);
|
||||
|
||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
||||
|
@ -456,6 +561,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
|
|||
|
||||
bool UnlockRequired;
|
||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_TRUE(!TSD->getCache().isEmpty());
|
||||
TSD->getCache().drain();
|
||||
EXPECT_TRUE(TSD->getCache().isEmpty());
|
||||
|
@ -480,6 +586,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
|
|||
|
||||
bool UnlockRequired;
|
||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_TRUE(TSD->getCache().isEmpty());
|
||||
EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
|
||||
EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
|
||||
|
@ -723,7 +830,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
|
|||
for (unsigned I = 0; I != Ptrs.size(); ++I) {
|
||||
Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
|
||||
for (scudo::uptr J = 0; J < Size; ++J)
|
||||
ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
|
||||
ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], '\0');
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -786,6 +893,7 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
|
|||
|
||||
bool UnlockRequired;
|
||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
TSD->getCache().drain();
|
||||
|
||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
||||
|
@ -793,41 +901,3 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
|
|||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if SCUDO_LINUX
|
||||
|
||||
SCUDO_TYPED_TEST(ScudoCombinedTest, SoftRssLimit) {
|
||||
auto *Allocator = this->Allocator.get();
|
||||
Allocator->setRssLimitsTestOnly(1, 0, true);
|
||||
|
||||
size_t Megabyte = 1024 * 1024;
|
||||
size_t ChunkSize = 16;
|
||||
size_t Error = 256;
|
||||
|
||||
std::vector<void *> Ptrs;
|
||||
for (size_t index = 0; index < Megabyte + Error; index += ChunkSize) {
|
||||
void *Ptr = Allocator->allocate(ChunkSize, Origin);
|
||||
Ptrs.push_back(Ptr);
|
||||
}
|
||||
|
||||
EXPECT_EQ(nullptr, Allocator->allocate(ChunkSize, Origin));
|
||||
|
||||
for (void *Ptr : Ptrs)
|
||||
Allocator->deallocate(Ptr, Origin);
|
||||
}
|
||||
|
||||
SCUDO_TYPED_TEST(ScudoCombinedTest, HardRssLimit) {
|
||||
auto *Allocator = this->Allocator.get();
|
||||
Allocator->setRssLimitsTestOnly(0, 1, false);
|
||||
|
||||
size_t Megabyte = 1024 * 1024;
|
||||
|
||||
EXPECT_DEATH(
|
||||
{
|
||||
disableDebuggerdMaybe();
|
||||
Allocator->allocate(Megabyte, Origin);
|
||||
},
|
||||
"");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
23
Telegram/ThirdParty/scudo/tests/common_test.cpp
vendored
23
Telegram/ThirdParty/scudo/tests/common_test.cpp
vendored
|
@ -72,27 +72,4 @@ TEST(ScudoCommonTest, Zeros) {
|
|||
MemMap.unmap(MemMap.getBase(), Size);
|
||||
}
|
||||
|
||||
#if 0
|
||||
// This test is temorarily disabled because it may not work as expected. E.g.,
|
||||
// it doesn't dirty the pages so the pages may not be commited and it may only
|
||||
// work on the single thread environment. As a result, this test is flaky and is
|
||||
// impacting many test scenarios.
|
||||
TEST(ScudoCommonTest, GetRssFromBuffer) {
|
||||
constexpr int64_t AllocSize = 10000000;
|
||||
constexpr int64_t Error = 3000000;
|
||||
constexpr size_t Runs = 10;
|
||||
|
||||
int64_t Rss = scudo::GetRSS();
|
||||
EXPECT_GT(Rss, 0);
|
||||
|
||||
std::vector<std::unique_ptr<char[]>> Allocs(Runs);
|
||||
for (auto &Alloc : Allocs) {
|
||||
Alloc.reset(new char[AllocSize]());
|
||||
int64_t Prev = Rss;
|
||||
Rss = scudo::GetRSS();
|
||||
EXPECT_LE(std::abs(Rss - AllocSize - Prev), Error);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace scudo
|
||||
|
|
59
Telegram/ThirdParty/scudo/tests/condition_variable_test.cpp
vendored
Normal file
59
Telegram/ThirdParty/scudo/tests/condition_variable_test.cpp
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
//===-- condition_variable_test.cpp -----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "tests/scudo_unit_test.h"
|
||||
|
||||
#include "common.h"
|
||||
#include "condition_variable.h"
|
||||
#include "mutex.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
template <typename ConditionVariableT> void simpleWaitAndNotifyAll() {
|
||||
constexpr scudo::u32 NumThreads = 2;
|
||||
constexpr scudo::u32 CounterMax = 1024;
|
||||
std::thread Threads[NumThreads];
|
||||
|
||||
scudo::HybridMutex M;
|
||||
ConditionVariableT CV;
|
||||
CV.bindTestOnly(M);
|
||||
scudo::u32 Counter = 0;
|
||||
|
||||
for (scudo::u32 I = 0; I < NumThreads; ++I) {
|
||||
Threads[I] = std::thread(
|
||||
[&](scudo::u32 Id) {
|
||||
do {
|
||||
scudo::ScopedLock L(M);
|
||||
if (Counter % NumThreads != Id && Counter < CounterMax)
|
||||
CV.wait(M);
|
||||
if (Counter >= CounterMax) {
|
||||
break;
|
||||
} else {
|
||||
++Counter;
|
||||
CV.notifyAll(M);
|
||||
}
|
||||
} while (true);
|
||||
},
|
||||
I);
|
||||
}
|
||||
|
||||
for (std::thread &T : Threads)
|
||||
T.join();
|
||||
|
||||
EXPECT_EQ(Counter, CounterMax);
|
||||
}
|
||||
|
||||
TEST(ScudoConditionVariableTest, DummyCVWaitAndNotifyAll) {
|
||||
simpleWaitAndNotifyAll<scudo::ConditionVariableDummy>();
|
||||
}
|
||||
|
||||
#ifdef SCUDO_LINUX
|
||||
TEST(ScudoConditionVariableTest, LinuxCVWaitAndNotifyAll) {
|
||||
simpleWaitAndNotifyAll<scudo::ConditionVariableLinux>();
|
||||
}
|
||||
#endif
|
10
Telegram/ThirdParty/scudo/tests/memtag_test.cpp
vendored
10
Telegram/ThirdParty/scudo/tests/memtag_test.cpp
vendored
|
@ -120,7 +120,15 @@ TEST_F(MemtagTest, SelectRandomTag) {
|
|||
uptr Tags = 0;
|
||||
for (uptr I = 0; I < 100000; ++I)
|
||||
Tags = Tags | (1u << extractTag(selectRandomTag(Ptr, 0)));
|
||||
EXPECT_EQ(0xfffeull, Tags);
|
||||
// std::popcnt is C++20
|
||||
int PopCnt = 0;
|
||||
while (Tags) {
|
||||
PopCnt += Tags & 1;
|
||||
Tags >>= 1;
|
||||
}
|
||||
// Random tags are not always very random, and this test is not about PRNG
|
||||
// quality. Anything above half would be satisfactory.
|
||||
EXPECT_GE(PopCnt, 8);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
43
Telegram/ThirdParty/scudo/tests/primary_test.cpp
vendored
43
Telegram/ThirdParty/scudo/tests/primary_test.cpp
vendored
|
@ -8,6 +8,8 @@
|
|||
|
||||
#include "tests/scudo_unit_test.h"
|
||||
|
||||
#include "allocator_config.h"
|
||||
#include "condition_variable.h"
|
||||
#include "primary32.h"
|
||||
#include "primary64.h"
|
||||
#include "size_class_map.h"
|
||||
|
@ -104,6 +106,34 @@ template <typename SizeClassMapT> struct TestConfig4 {
|
|||
};
|
||||
};
|
||||
|
||||
// This is the only test config that enables the condition variable.
|
||||
template <typename SizeClassMapT> struct TestConfig5 {
|
||||
static const bool MaySupportMemoryTagging = true;
|
||||
|
||||
struct Primary {
|
||||
using SizeClassMap = SizeClassMapT;
|
||||
#if defined(__mips__)
|
||||
// Unable to allocate greater size on QEMU-user.
|
||||
static const scudo::uptr RegionSizeLog = 23U;
|
||||
#else
|
||||
static const scudo::uptr RegionSizeLog = 24U;
|
||||
#endif
|
||||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
|
||||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
|
||||
static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
|
||||
static const scudo::uptr GroupSizeLog = 18U;
|
||||
typedef scudo::u32 CompactPtrT;
|
||||
static const bool EnableRandomOffset = true;
|
||||
static const scudo::uptr MapSizeIncrement = 1UL << 18;
|
||||
static const bool UseConditionVariable = true;
|
||||
#if SCUDO_LINUX
|
||||
using ConditionVariableT = scudo::ConditionVariableLinux;
|
||||
#else
|
||||
using ConditionVariableT = scudo::ConditionVariableDummy;
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
template <template <typename> class BaseConfig, typename SizeClassMapT>
|
||||
struct Config : public BaseConfig<SizeClassMapT> {};
|
||||
|
||||
|
@ -142,7 +172,8 @@ struct ScudoPrimaryTest : public Test {};
|
|||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4)
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4) \
|
||||
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig5)
|
||||
#endif
|
||||
|
||||
#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
|
||||
|
@ -206,7 +237,7 @@ struct SmallRegionsConfig {
|
|||
// For the 32-bit one, it requires actually exhausting memory, so we skip it.
|
||||
TEST(ScudoPrimaryTest, Primary64OOM) {
|
||||
using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
|
||||
using TransferBatch = Primary::CacheT::TransferBatch;
|
||||
using TransferBatch = Primary::TransferBatchT;
|
||||
Primary Allocator;
|
||||
Allocator.init(/*ReleaseToOsInterval=*/-1);
|
||||
typename Primary::CacheT Cache;
|
||||
|
@ -232,8 +263,9 @@ TEST(ScudoPrimaryTest, Primary64OOM) {
|
|||
while (!Batches.empty()) {
|
||||
TransferBatch *B = Batches.back();
|
||||
Batches.pop_back();
|
||||
B->copyToArray(Blocks);
|
||||
Allocator.pushBlocks(&Cache, ClassId, Blocks, B->getCount());
|
||||
const scudo::u16 Count = B->getCount();
|
||||
B->moveToArray(Blocks);
|
||||
Allocator.pushBlocks(&Cache, ClassId, Blocks, Count);
|
||||
Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
|
||||
}
|
||||
Cache.destroy(nullptr);
|
||||
|
@ -283,7 +315,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
|
|||
}
|
||||
|
||||
SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
|
||||
using Primary = TestAllocator<TypeParam, scudo::SvelteSizeClassMap>;
|
||||
using Primary = TestAllocator<TypeParam, scudo::Config::Primary::SizeClassMap>;
|
||||
std::unique_ptr<Primary> Allocator(new Primary);
|
||||
Allocator->init(/*ReleaseToOsInterval=*/-1);
|
||||
std::mutex Mutex;
|
||||
|
@ -335,6 +367,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
|
|||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
||||
scudo::ScopedString Str;
|
||||
Allocator->getStats(&Str);
|
||||
Allocator->getFragmentationInfo(&Str);
|
||||
Str.output();
|
||||
}
|
||||
|
||||
|
|
42
Telegram/ThirdParty/scudo/tests/release_test.cpp
vendored
42
Telegram/ThirdParty/scudo/tests/release_test.cpp
vendored
|
@ -23,17 +23,16 @@ TEST(ScudoReleaseTest, RegionPageMap) {
|
|||
// Various valid counter's max values packed into one word.
|
||||
scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I);
|
||||
ASSERT_TRUE(PageMap2N.isAllocated());
|
||||
EXPECT_EQ(sizeof(scudo::uptr), PageMap2N.getBufferSize());
|
||||
EXPECT_EQ(1U, PageMap2N.getBufferNumElements());
|
||||
// Check the "all bit set" values too.
|
||||
scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I);
|
||||
ASSERT_TRUE(PageMap2N1_1.isAllocated());
|
||||
EXPECT_EQ(sizeof(scudo::uptr), PageMap2N1_1.getBufferSize());
|
||||
EXPECT_EQ(1U, PageMap2N1_1.getBufferNumElements());
|
||||
// Verify the packing ratio, the counter is Expected to be packed into the
|
||||
// closest power of 2 bits.
|
||||
scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
|
||||
ASSERT_TRUE(PageMap.isAllocated());
|
||||
EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpPowerOfTwo(I + 1),
|
||||
PageMap.getBufferSize());
|
||||
EXPECT_EQ(scudo::roundUpPowerOfTwo(I + 1), PageMap.getBufferNumElements());
|
||||
}
|
||||
|
||||
// Go through 1, 2, 4, 8, .. {32,64} bits per counter.
|
||||
|
@ -533,7 +532,8 @@ template <class SizeClassMap> void testReleasePartialRegion() {
|
|||
ReleaseBase);
|
||||
Partial.ensurePageMapAllocated();
|
||||
|
||||
EXPECT_GE(Full.PageMap.getBufferSize(), Partial.PageMap.getBufferSize());
|
||||
EXPECT_GE(Full.PageMap.getBufferNumElements(),
|
||||
Partial.PageMap.getBufferNumElements());
|
||||
}
|
||||
|
||||
while (!FreeList.empty()) {
|
||||
|
@ -552,22 +552,16 @@ TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) {
|
|||
testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>();
|
||||
}
|
||||
|
||||
TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSSvelte) {
|
||||
testReleaseFreeMemoryToOS<scudo::SvelteSizeClassMap>();
|
||||
}
|
||||
|
||||
TEST(ScudoReleaseTest, PageMapMarkRange) {
|
||||
testPageMapMarkRange<scudo::DefaultSizeClassMap>();
|
||||
testPageMapMarkRange<scudo::AndroidSizeClassMap>();
|
||||
testPageMapMarkRange<scudo::FuchsiaSizeClassMap>();
|
||||
testPageMapMarkRange<scudo::SvelteSizeClassMap>();
|
||||
}
|
||||
|
||||
TEST(ScudoReleaseTest, ReleasePartialRegion) {
|
||||
testReleasePartialRegion<scudo::DefaultSizeClassMap>();
|
||||
testReleasePartialRegion<scudo::AndroidSizeClassMap>();
|
||||
testReleasePartialRegion<scudo::FuchsiaSizeClassMap>();
|
||||
testReleasePartialRegion<scudo::SvelteSizeClassMap>();
|
||||
}
|
||||
|
||||
template <class SizeClassMap> void testReleaseRangeWithSingleBlock() {
|
||||
|
@ -630,31 +624,31 @@ TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) {
|
|||
testReleaseRangeWithSingleBlock<scudo::DefaultSizeClassMap>();
|
||||
testReleaseRangeWithSingleBlock<scudo::AndroidSizeClassMap>();
|
||||
testReleaseRangeWithSingleBlock<scudo::FuchsiaSizeClassMap>();
|
||||
testReleaseRangeWithSingleBlock<scudo::SvelteSizeClassMap>();
|
||||
}
|
||||
|
||||
TEST(ScudoReleaseTest, BufferPool) {
|
||||
constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1;
|
||||
constexpr scudo::uptr StaticBufferSize = 512U;
|
||||
constexpr scudo::uptr StaticBufferNumElements = 512U;
|
||||
|
||||
// Allocate the buffer pool on the heap because it is quite large (slightly
|
||||
// more than StaticBufferCount * StaticBufferSize * sizeof(uptr)) and it may
|
||||
// not fit in the stack on some platforms.
|
||||
using BufferPool = scudo::BufferPool<StaticBufferCount, StaticBufferSize>;
|
||||
// more than StaticBufferCount * StaticBufferNumElements * sizeof(uptr)) and
|
||||
// it may not fit in the stack on some platforms.
|
||||
using BufferPool =
|
||||
scudo::BufferPool<StaticBufferCount, StaticBufferNumElements>;
|
||||
std::unique_ptr<BufferPool> Pool(new BufferPool());
|
||||
|
||||
std::vector<std::pair<scudo::uptr *, scudo::uptr>> Buffers;
|
||||
std::vector<BufferPool::Buffer> Buffers;
|
||||
for (scudo::uptr I = 0; I < StaticBufferCount; ++I) {
|
||||
scudo::uptr *P = Pool->getBuffer(StaticBufferSize);
|
||||
EXPECT_TRUE(Pool->isStaticBufferTestOnly(P, StaticBufferSize));
|
||||
Buffers.emplace_back(P, StaticBufferSize);
|
||||
BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements);
|
||||
EXPECT_TRUE(Pool->isStaticBufferTestOnly(Buffer));
|
||||
Buffers.push_back(Buffer);
|
||||
}
|
||||
|
||||
// The static buffer is supposed to be used up.
|
||||
scudo::uptr *P = Pool->getBuffer(StaticBufferSize);
|
||||
EXPECT_FALSE(Pool->isStaticBufferTestOnly(P, StaticBufferSize));
|
||||
BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements);
|
||||
EXPECT_FALSE(Pool->isStaticBufferTestOnly(Buffer));
|
||||
|
||||
Pool->releaseBuffer(P, StaticBufferSize);
|
||||
Pool->releaseBuffer(Buffer);
|
||||
for (auto &Buffer : Buffers)
|
||||
Pool->releaseBuffer(Buffer.first, Buffer.second);
|
||||
Pool->releaseBuffer(Buffer);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ TEST(ScudoReportDeathTest, Generic) {
|
|||
EXPECT_DEATH(scudo::reportError("TEST123"), "Scudo ERROR.*TEST123");
|
||||
EXPECT_DEATH(scudo::reportInvalidFlag("ABC", "DEF"), "Scudo ERROR.*ABC.*DEF");
|
||||
EXPECT_DEATH(scudo::reportHeaderCorruption(P), "Scudo ERROR.*42424242");
|
||||
EXPECT_DEATH(scudo::reportHeaderRace(P), "Scudo ERROR.*42424242");
|
||||
EXPECT_DEATH(scudo::reportSanityCheckError("XYZ"), "Scudo ERROR.*XYZ");
|
||||
EXPECT_DEATH(scudo::reportAlignmentTooBig(123, 456), "Scudo ERROR.*123.*456");
|
||||
EXPECT_DEATH(scudo::reportAllocationSizeTooBig(123, 456, 789),
|
||||
|
|
114
Telegram/ThirdParty/scudo/tests/scudo_hooks_test.cpp
vendored
114
Telegram/ThirdParty/scudo/tests/scudo_hooks_test.cpp
vendored
|
@ -1,114 +0,0 @@
|
|||
//===-- scudo_hooks_test.cpp ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "tests/scudo_unit_test.h"
|
||||
|
||||
#include "allocator_config.h"
|
||||
#include "combined.h"
|
||||
|
||||
namespace {
|
||||
void *LastAllocatedPtr = nullptr;
|
||||
size_t LastRequestSize = 0;
|
||||
void *LastDeallocatedPtr = nullptr;
|
||||
} // namespace
|
||||
|
||||
// Scudo defines weak symbols that can be defined by a client binary
|
||||
// to register callbacks at key points in the allocation timeline. In
|
||||
// order to enforce those invariants, we provide definitions that
|
||||
// update some global state every time they are called, so that tests
|
||||
// can inspect their effects. An unfortunate side effect of this
|
||||
// setup is that because those symbols are part of the binary, they
|
||||
// can't be selectively enabled; that means that they will get called
|
||||
// on unrelated tests in the same compilation unit. To mitigate this
|
||||
// issue, we insulate those tests in a separate compilation unit.
|
||||
extern "C" {
|
||||
__attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
|
||||
size_t Size) {
|
||||
LastAllocatedPtr = Ptr;
|
||||
LastRequestSize = Size;
|
||||
}
|
||||
__attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
|
||||
LastDeallocatedPtr = Ptr;
|
||||
}
|
||||
}
|
||||
|
||||
// Simple check that allocation callbacks, when registered, are called:
|
||||
// 1) __scudo_allocate_hook is called when allocating.
|
||||
// 2) __scudo_deallocate_hook is called when deallocating.
|
||||
// 3) Both hooks are called when reallocating.
|
||||
// 4) Neither are called for a no-op reallocation.
|
||||
TEST(ScudoHooksTest, AllocateHooks) {
|
||||
scudo::Allocator<scudo::DefaultConfig> Allocator;
|
||||
constexpr scudo::uptr DefaultSize = 16U;
|
||||
constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
|
||||
|
||||
// Simple allocation and deallocation.
|
||||
{
|
||||
LastAllocatedPtr = nullptr;
|
||||
LastRequestSize = 0;
|
||||
|
||||
void *Ptr = Allocator.allocate(DefaultSize, Origin);
|
||||
|
||||
EXPECT_EQ(Ptr, LastAllocatedPtr);
|
||||
EXPECT_EQ(DefaultSize, LastRequestSize);
|
||||
|
||||
LastDeallocatedPtr = nullptr;
|
||||
|
||||
Allocator.deallocate(Ptr, Origin);
|
||||
|
||||
EXPECT_EQ(Ptr, LastDeallocatedPtr);
|
||||
}
|
||||
|
||||
// Simple no-op, same size reallocation.
|
||||
{
|
||||
void *Ptr = Allocator.allocate(DefaultSize, Origin);
|
||||
|
||||
LastAllocatedPtr = nullptr;
|
||||
LastRequestSize = 0;
|
||||
LastDeallocatedPtr = nullptr;
|
||||
|
||||
void *NewPtr = Allocator.reallocate(Ptr, DefaultSize);
|
||||
|
||||
EXPECT_EQ(Ptr, NewPtr);
|
||||
EXPECT_EQ(nullptr, LastAllocatedPtr);
|
||||
EXPECT_EQ(0U, LastRequestSize);
|
||||
EXPECT_EQ(nullptr, LastDeallocatedPtr);
|
||||
}
|
||||
|
||||
// Reallocation in increasing size classes. This ensures that at
|
||||
// least one of the reallocations will be meaningful.
|
||||
{
|
||||
void *Ptr = Allocator.allocate(0, Origin);
|
||||
|
||||
for (scudo::uptr ClassId = 1U;
|
||||
ClassId <= scudo::DefaultConfig::Primary::SizeClassMap::LargestClassId;
|
||||
++ClassId) {
|
||||
const scudo::uptr Size =
|
||||
scudo::DefaultConfig::Primary::SizeClassMap::getSizeByClassId(
|
||||
ClassId);
|
||||
|
||||
LastAllocatedPtr = nullptr;
|
||||
LastRequestSize = 0;
|
||||
LastDeallocatedPtr = nullptr;
|
||||
|
||||
void *NewPtr = Allocator.reallocate(Ptr, Size);
|
||||
|
||||
if (NewPtr != Ptr) {
|
||||
EXPECT_EQ(NewPtr, LastAllocatedPtr);
|
||||
EXPECT_EQ(Size, LastRequestSize);
|
||||
EXPECT_EQ(Ptr, LastDeallocatedPtr);
|
||||
} else {
|
||||
EXPECT_EQ(nullptr, LastAllocatedPtr);
|
||||
EXPECT_EQ(0U, LastRequestSize);
|
||||
EXPECT_EQ(nullptr, LastDeallocatedPtr);
|
||||
}
|
||||
|
||||
Ptr = NewPtr;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -45,4 +45,10 @@ using Test = ::testing::Test;
|
|||
#define SKIP_NO_DEBUG(T) DISABLED_##T
|
||||
#endif
|
||||
|
||||
#if SCUDO_FUCHSIA
|
||||
// The zxtest library provides a default main function that does the same thing
|
||||
// for Fuchsia builds.
|
||||
#define SCUDO_NO_TEST_MAIN
|
||||
#endif
|
||||
|
||||
extern bool UseQuarantine;
|
||||
|
|
|
@ -45,9 +45,7 @@ __scudo_default_options() {
|
|||
"dealloc_type_mismatch=" DEALLOC_TYPE_MISMATCH;
|
||||
}
|
||||
|
||||
// The zxtest library provides a default main function that does the same thing
|
||||
// for Fuchsia builds.
|
||||
#if !SCUDO_FUCHSIA
|
||||
#if !defined(SCUDO_NO_TEST_MAIN)
|
||||
int main(int argc, char **argv) {
|
||||
EnableMemoryTaggingIfSupported();
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
|
|
|
@ -20,10 +20,6 @@ TEST(ScudoSizeClassMapTest, DefaultSizeClassMap) {
|
|||
testSizeClassMap<scudo::DefaultSizeClassMap>();
|
||||
}
|
||||
|
||||
TEST(ScudoSizeClassMapTest, SvelteSizeClassMap) {
|
||||
testSizeClassMap<scudo::SvelteSizeClassMap>();
|
||||
}
|
||||
|
||||
TEST(ScudoSizeClassMapTest, AndroidSizeClassMap) {
|
||||
testSizeClassMap<scudo::AndroidSizeClassMap>();
|
||||
}
|
||||
|
|
4
Telegram/ThirdParty/scudo/tests/tsd_test.cpp
vendored
4
Telegram/ThirdParty/scudo/tests/tsd_test.cpp
vendored
|
@ -101,6 +101,7 @@ template <class AllocatorT> static void testRegistry() {
|
|||
|
||||
bool UnlockRequired;
|
||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
EXPECT_EQ(TSD->getCache().Canary, 0U);
|
||||
if (UnlockRequired)
|
||||
|
@ -108,6 +109,7 @@ template <class AllocatorT> static void testRegistry() {
|
|||
|
||||
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
|
||||
TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
EXPECT_EQ(TSD->getCache().Canary, 0U);
|
||||
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
|
||||
|
@ -137,6 +139,7 @@ template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
|
|||
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
|
||||
bool UnlockRequired;
|
||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
// For an exclusive TSD, the cache should be empty. We cannot guarantee the
|
||||
// same for a shared TSD.
|
||||
|
@ -195,6 +198,7 @@ static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
|
|||
bool UnlockRequired;
|
||||
for (scudo::uptr I = 0; I < 4096U; I++) {
|
||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
Set.insert(reinterpret_cast<void *>(TSD));
|
||||
if (UnlockRequired)
|
||||
|
|
201
Telegram/ThirdParty/scudo/tests/wrappers_c_test.cpp
vendored
201
Telegram/ThirdParty/scudo/tests/wrappers_c_test.cpp
vendored
|
@ -45,7 +45,101 @@ int malloc_iterate(uintptr_t base, size_t size,
|
|||
void *arg);
|
||||
void *valloc(size_t size);
|
||||
void *pvalloc(size_t size);
|
||||
|
||||
#ifndef SCUDO_ENABLE_HOOKS_TESTS
|
||||
#define SCUDO_ENABLE_HOOKS_TESTS 0
|
||||
#endif
|
||||
|
||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
|
||||
#error "Hooks tests should have hooks enabled as well!"
|
||||
#endif
|
||||
|
||||
struct AllocContext {
|
||||
void *Ptr;
|
||||
size_t Size;
|
||||
};
|
||||
struct DeallocContext {
|
||||
void *Ptr;
|
||||
};
|
||||
struct ReallocContext {
|
||||
void *AllocPtr;
|
||||
void *DeallocPtr;
|
||||
size_t Size;
|
||||
};
|
||||
static AllocContext AC;
|
||||
static DeallocContext DC;
|
||||
static ReallocContext RC;
|
||||
|
||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
||||
__attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
|
||||
size_t Size) {
|
||||
AC.Ptr = Ptr;
|
||||
AC.Size = Size;
|
||||
}
|
||||
__attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
|
||||
DC.Ptr = Ptr;
|
||||
}
|
||||
__attribute__((visibility("default"))) void
|
||||
__scudo_realloc_allocate_hook(void *OldPtr, void *NewPtr, size_t Size) {
|
||||
// Verify that __scudo_realloc_deallocate_hook is called first and set the
|
||||
// right pointer.
|
||||
EXPECT_EQ(OldPtr, RC.DeallocPtr);
|
||||
RC.AllocPtr = NewPtr;
|
||||
RC.Size = Size;
|
||||
|
||||
// Note that this is only used for testing. In general, only one pair of hooks
|
||||
// will be invoked in `realloc`. if __scudo_realloc_*_hook are not defined,
|
||||
// it'll call the general hooks only. To make the test easier, we call the
|
||||
// general one here so that either case (whether __scudo_realloc_*_hook are
|
||||
// defined) will be verified without separating them into different tests.
|
||||
__scudo_allocate_hook(NewPtr, Size);
|
||||
}
|
||||
__attribute__((visibility("default"))) void
|
||||
__scudo_realloc_deallocate_hook(void *Ptr) {
|
||||
RC.DeallocPtr = Ptr;
|
||||
|
||||
// See the comment in the __scudo_realloc_allocate_hook above.
|
||||
__scudo_deallocate_hook(Ptr);
|
||||
}
|
||||
#endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
||||
}
|
||||
|
||||
class ScudoWrappersCTest : public Test {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
|
||||
printf("Hooks are enabled but hooks tests are disabled.\n");
|
||||
}
|
||||
|
||||
void invalidateHookPtrs() {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS) {
|
||||
void *InvalidPtr = reinterpret_cast<void *>(0xdeadbeef);
|
||||
AC.Ptr = InvalidPtr;
|
||||
DC.Ptr = InvalidPtr;
|
||||
RC.AllocPtr = RC.DeallocPtr = InvalidPtr;
|
||||
}
|
||||
}
|
||||
void verifyAllocHookPtr(UNUSED void *Ptr) {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
||||
EXPECT_EQ(Ptr, AC.Ptr);
|
||||
}
|
||||
void verifyAllocHookSize(UNUSED size_t Size) {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
||||
EXPECT_EQ(Size, AC.Size);
|
||||
}
|
||||
void verifyDeallocHookPtr(UNUSED void *Ptr) {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
||||
EXPECT_EQ(Ptr, DC.Ptr);
|
||||
}
|
||||
void verifyReallocHookPtrs(UNUSED void *OldPtr, void *NewPtr, size_t Size) {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS) {
|
||||
EXPECT_EQ(OldPtr, RC.DeallocPtr);
|
||||
EXPECT_EQ(NewPtr, RC.AllocPtr);
|
||||
EXPECT_EQ(Size, RC.Size);
|
||||
}
|
||||
}
|
||||
};
|
||||
using ScudoWrappersCDeathTest = ScudoWrappersCTest;
|
||||
|
||||
// Note that every C allocation function in the test binary will be fulfilled
|
||||
// by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
|
||||
|
@ -59,11 +153,13 @@ void *pvalloc(size_t size);
|
|||
|
||||
static const size_t Size = 100U;
|
||||
|
||||
TEST(ScudoWrappersCDeathTest, Malloc) {
|
||||
TEST_F(ScudoWrappersCDeathTest, Malloc) {
|
||||
void *P = malloc(Size);
|
||||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Size, malloc_usable_size(P));
|
||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size);
|
||||
|
||||
// An update to this warning in Clang now triggers in this line, but it's ok
|
||||
// because the check is expecting a bad pointer and should fail.
|
||||
|
@ -78,6 +174,7 @@ TEST(ScudoWrappersCDeathTest, Malloc) {
|
|||
#endif
|
||||
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
EXPECT_DEATH(free(P), "");
|
||||
|
||||
P = malloc(0U);
|
||||
|
@ -89,13 +186,16 @@ TEST(ScudoWrappersCDeathTest, Malloc) {
|
|||
EXPECT_EQ(errno, ENOMEM);
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCTest, Calloc) {
|
||||
TEST_F(ScudoWrappersCTest, Calloc) {
|
||||
void *P = calloc(1U, Size);
|
||||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Size, malloc_usable_size(P));
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size);
|
||||
for (size_t I = 0; I < Size; I++)
|
||||
EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
P = calloc(1U, 0U);
|
||||
EXPECT_NE(P, nullptr);
|
||||
|
@ -116,7 +216,7 @@ TEST(ScudoWrappersCTest, Calloc) {
|
|||
EXPECT_EQ(errno, ENOMEM);
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCTest, SmallAlign) {
|
||||
TEST_F(ScudoWrappersCTest, SmallAlign) {
|
||||
// Allocating pointers by the powers of 2 from 1 to 0x10000
|
||||
// Using powers of 2 due to memalign using powers of 2 and test more sizes
|
||||
constexpr size_t MaxSize = 0x10000;
|
||||
|
@ -137,7 +237,7 @@ TEST(ScudoWrappersCTest, SmallAlign) {
|
|||
free(ptr);
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCTest, Memalign) {
|
||||
TEST_F(ScudoWrappersCTest, Memalign) {
|
||||
void *P;
|
||||
for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
|
||||
const size_t Alignment = 1U << I;
|
||||
|
@ -146,14 +246,20 @@ TEST(ScudoWrappersCTest, Memalign) {
|
|||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Size, malloc_usable_size(P));
|
||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size);
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
P = nullptr;
|
||||
EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
|
||||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Size, malloc_usable_size(P));
|
||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size);
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
}
|
||||
|
||||
EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
|
||||
|
@ -165,18 +271,24 @@ TEST(ScudoWrappersCTest, Memalign) {
|
|||
for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
|
||||
P = memalign(Alignment, 1024U);
|
||||
EXPECT_NE(P, nullptr);
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size);
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCTest, AlignedAlloc) {
|
||||
TEST_F(ScudoWrappersCTest, AlignedAlloc) {
|
||||
const size_t Alignment = 4096U;
|
||||
void *P = aligned_alloc(Alignment, Alignment * 4U);
|
||||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
|
||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Alignment * 4U);
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
errno = 0;
|
||||
P = aligned_alloc(Alignment, Size);
|
||||
|
@ -184,33 +296,60 @@ TEST(ScudoWrappersCTest, AlignedAlloc) {
|
|||
EXPECT_EQ(errno, EINVAL);
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCDeathTest, Realloc) {
|
||||
TEST_F(ScudoWrappersCDeathTest, Realloc) {
|
||||
invalidateHookPtrs();
|
||||
// realloc(nullptr, N) is malloc(N)
|
||||
void *P = realloc(nullptr, 0U);
|
||||
void *P = realloc(nullptr, Size);
|
||||
EXPECT_NE(P, nullptr);
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size);
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
invalidateHookPtrs();
|
||||
P = malloc(Size);
|
||||
EXPECT_NE(P, nullptr);
|
||||
// realloc(P, 0U) is free(P) and returns nullptr
|
||||
EXPECT_EQ(realloc(P, 0U), nullptr);
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
P = malloc(Size);
|
||||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Size, malloc_usable_size(P));
|
||||
memset(P, 0x42, Size);
|
||||
|
||||
invalidateHookPtrs();
|
||||
void *OldP = P;
|
||||
P = realloc(P, Size * 2U);
|
||||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Size * 2U, malloc_usable_size(P));
|
||||
for (size_t I = 0; I < Size; I++)
|
||||
EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
|
||||
if (OldP == P) {
|
||||
verifyDeallocHookPtr(OldP);
|
||||
verifyAllocHookPtr(OldP);
|
||||
} else {
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size * 2U);
|
||||
verifyDeallocHookPtr(OldP);
|
||||
}
|
||||
verifyReallocHookPtrs(OldP, P, Size * 2U);
|
||||
|
||||
invalidateHookPtrs();
|
||||
OldP = P;
|
||||
P = realloc(P, Size / 2U);
|
||||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_LE(Size / 2U, malloc_usable_size(P));
|
||||
for (size_t I = 0; I < Size / 2U; I++)
|
||||
EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
|
||||
if (OldP == P) {
|
||||
verifyDeallocHookPtr(OldP);
|
||||
verifyAllocHookPtr(OldP);
|
||||
} else {
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(Size / 2U);
|
||||
}
|
||||
verifyReallocHookPtrs(OldP, P, Size / 2U);
|
||||
free(P);
|
||||
|
||||
EXPECT_DEATH(P = realloc(P, Size), "");
|
||||
|
@ -244,7 +383,7 @@ TEST(ScudoWrappersCDeathTest, Realloc) {
|
|||
}
|
||||
|
||||
#if !SCUDO_FUCHSIA
|
||||
TEST(ScudoWrappersCTest, MallOpt) {
|
||||
TEST_F(ScudoWrappersCTest, MallOpt) {
|
||||
errno = 0;
|
||||
EXPECT_EQ(mallopt(-1000, 1), 0);
|
||||
// mallopt doesn't set errno.
|
||||
|
@ -265,7 +404,7 @@ TEST(ScudoWrappersCTest, MallOpt) {
|
|||
}
|
||||
#endif
|
||||
|
||||
TEST(ScudoWrappersCTest, OtherAlloc) {
|
||||
TEST_F(ScudoWrappersCTest, OtherAlloc) {
|
||||
#if HAVE_PVALLOC
|
||||
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
|
||||
|
||||
|
@ -273,7 +412,11 @@ TEST(ScudoWrappersCTest, OtherAlloc) {
|
|||
EXPECT_NE(P, nullptr);
|
||||
EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
|
||||
EXPECT_LE(PageSize, malloc_usable_size(P));
|
||||
verifyAllocHookPtr(P);
|
||||
// Size will be rounded up to PageSize.
|
||||
verifyAllocHookSize(PageSize);
|
||||
free(P);
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
|
||||
|
||||
|
@ -288,29 +431,39 @@ TEST(ScudoWrappersCTest, OtherAlloc) {
|
|||
#endif
|
||||
}
|
||||
|
||||
#if !SCUDO_FUCHSIA
|
||||
TEST(ScudoWrappersCTest, MallInfo) {
|
||||
template<typename FieldType>
|
||||
void MallInfoTest() {
|
||||
// mallinfo is deprecated.
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
|
||||
const size_t BypassQuarantineSize = 1024U;
|
||||
const FieldType BypassQuarantineSize = 1024U;
|
||||
struct mallinfo MI = mallinfo();
|
||||
size_t Allocated = MI.uordblks;
|
||||
FieldType Allocated = MI.uordblks;
|
||||
void *P = malloc(BypassQuarantineSize);
|
||||
EXPECT_NE(P, nullptr);
|
||||
MI = mallinfo();
|
||||
EXPECT_GE(static_cast<size_t>(MI.uordblks), Allocated + BypassQuarantineSize);
|
||||
EXPECT_GT(static_cast<size_t>(MI.hblkhd), 0U);
|
||||
size_t Free = MI.fordblks;
|
||||
EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
|
||||
EXPECT_GT(MI.hblkhd, static_cast<FieldType>(0));
|
||||
FieldType Free = MI.fordblks;
|
||||
free(P);
|
||||
MI = mallinfo();
|
||||
EXPECT_GE(static_cast<size_t>(MI.fordblks), Free + BypassQuarantineSize);
|
||||
EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
|
||||
#pragma clang diagnostic pop
|
||||
}
|
||||
|
||||
#if !SCUDO_FUCHSIA
|
||||
TEST_F(ScudoWrappersCTest, MallInfo) {
|
||||
#if SCUDO_ANDROID
|
||||
// Android accidentally set the fields to size_t instead of int.
|
||||
MallInfoTest<size_t>();
|
||||
#else
|
||||
MallInfoTest<int>();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __GLIBC_PREREQ(2, 33)
|
||||
TEST(ScudoWrappersCTest, MallInfo2) {
|
||||
#if __GLIBC_PREREQ(2, 33) || SCUDO_ANDROID
|
||||
TEST_F(ScudoWrappersCTest, MallInfo2) {
|
||||
const size_t BypassQuarantineSize = 1024U;
|
||||
struct mallinfo2 MI = mallinfo2();
|
||||
size_t Allocated = MI.uordblks;
|
||||
|
@ -342,7 +495,7 @@ static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
|
|||
// To achieve this, we allocate a chunk for which the backing block will be
|
||||
// aligned on a page, then run the malloc_iterate on both the pages that the
|
||||
// block is a boundary for. It must only be seen once by the callback function.
|
||||
TEST(ScudoWrappersCTest, MallocIterateBoundary) {
|
||||
TEST_F(ScudoWrappersCTest, MallocIterateBoundary) {
|
||||
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
|
||||
#if SCUDO_ANDROID
|
||||
// Android uses a 16 byte alignment for both 32 bit and 64 bit.
|
||||
|
@ -394,7 +547,7 @@ TEST(ScudoWrappersCTest, MallocIterateBoundary) {
|
|||
|
||||
// Fuchsia doesn't have alarm, fork or malloc_info.
|
||||
#if !SCUDO_FUCHSIA
|
||||
TEST(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
|
||||
TEST_F(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
|
||||
// We expect heap operations within a disable/enable scope to deadlock.
|
||||
EXPECT_DEATH(
|
||||
{
|
||||
|
@ -409,7 +562,7 @@ TEST(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
|
|||
"");
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCTest, MallocInfo) {
|
||||
TEST_F(ScudoWrappersCTest, MallocInfo) {
|
||||
// Use volatile so that the allocations don't get optimized away.
|
||||
void *volatile P1 = malloc(1234);
|
||||
void *volatile P2 = malloc(4321);
|
||||
|
@ -429,7 +582,7 @@ TEST(ScudoWrappersCTest, MallocInfo) {
|
|||
free(P2);
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCDeathTest, Fork) {
|
||||
TEST_F(ScudoWrappersCDeathTest, Fork) {
|
||||
void *P;
|
||||
pid_t Pid = fork();
|
||||
EXPECT_GE(Pid, 0) << strerror(errno);
|
||||
|
@ -481,7 +634,7 @@ static void *enableMalloc(UNUSED void *Unused) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCTest, DisableForkEnable) {
|
||||
TEST_F(ScudoWrappersCTest, DisableForkEnable) {
|
||||
pthread_t ThreadId;
|
||||
Ready = false;
|
||||
EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
|
||||
|
|
|
@ -27,45 +27,109 @@
|
|||
void operator delete(void *, size_t) noexcept;
|
||||
void operator delete[](void *, size_t) noexcept;
|
||||
|
||||
// Note that every Cxx allocation function in the test binary will be fulfilled
|
||||
// by Scudo. See the comment in the C counterpart of this file.
|
||||
extern "C" {
|
||||
#ifndef SCUDO_ENABLE_HOOKS_TESTS
|
||||
#define SCUDO_ENABLE_HOOKS_TESTS 0
|
||||
#endif
|
||||
|
||||
template <typename T> static void testCxxNew() {
|
||||
T *P = new T;
|
||||
EXPECT_NE(P, nullptr);
|
||||
memset(P, 0x42, sizeof(T));
|
||||
EXPECT_DEATH(delete[] P, "");
|
||||
delete P;
|
||||
EXPECT_DEATH(delete P, "");
|
||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
|
||||
#error "Hooks tests should have hooks enabled as well!"
|
||||
#endif
|
||||
|
||||
P = new T;
|
||||
EXPECT_NE(P, nullptr);
|
||||
memset(P, 0x42, sizeof(T));
|
||||
operator delete(P, sizeof(T));
|
||||
struct AllocContext {
|
||||
void *Ptr;
|
||||
size_t Size;
|
||||
};
|
||||
struct DeallocContext {
|
||||
void *Ptr;
|
||||
};
|
||||
static AllocContext AC;
|
||||
static DeallocContext DC;
|
||||
|
||||
P = new (std::nothrow) T;
|
||||
EXPECT_NE(P, nullptr);
|
||||
memset(P, 0x42, sizeof(T));
|
||||
delete P;
|
||||
|
||||
const size_t N = 16U;
|
||||
T *A = new T[N];
|
||||
EXPECT_NE(A, nullptr);
|
||||
memset(A, 0x42, sizeof(T) * N);
|
||||
EXPECT_DEATH(delete A, "");
|
||||
delete[] A;
|
||||
EXPECT_DEATH(delete[] A, "");
|
||||
|
||||
A = new T[N];
|
||||
EXPECT_NE(A, nullptr);
|
||||
memset(A, 0x42, sizeof(T) * N);
|
||||
operator delete[](A, sizeof(T) * N);
|
||||
|
||||
A = new (std::nothrow) T[N];
|
||||
EXPECT_NE(A, nullptr);
|
||||
memset(A, 0x42, sizeof(T) * N);
|
||||
delete[] A;
|
||||
#if (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
||||
__attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
|
||||
size_t Size) {
|
||||
AC.Ptr = Ptr;
|
||||
AC.Size = Size;
|
||||
}
|
||||
__attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
|
||||
DC.Ptr = Ptr;
|
||||
}
|
||||
#endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
|
||||
}
|
||||
|
||||
class ScudoWrappersCppTest : public Test {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
|
||||
printf("Hooks are enabled but hooks tests are disabled.\n");
|
||||
}
|
||||
|
||||
void verifyAllocHookPtr(UNUSED void *Ptr) {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
||||
EXPECT_EQ(Ptr, AC.Ptr);
|
||||
}
|
||||
void verifyAllocHookSize(UNUSED size_t Size) {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
||||
EXPECT_EQ(Size, AC.Size);
|
||||
}
|
||||
void verifyDeallocHookPtr(UNUSED void *Ptr) {
|
||||
if (SCUDO_ENABLE_HOOKS_TESTS)
|
||||
EXPECT_EQ(Ptr, DC.Ptr);
|
||||
}
|
||||
|
||||
template <typename T> void testCxxNew() {
|
||||
T *P = new T;
|
||||
EXPECT_NE(P, nullptr);
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(sizeof(T));
|
||||
memset(P, 0x42, sizeof(T));
|
||||
EXPECT_DEATH(delete[] P, "");
|
||||
delete P;
|
||||
verifyDeallocHookPtr(P);
|
||||
EXPECT_DEATH(delete P, "");
|
||||
|
||||
P = new T;
|
||||
EXPECT_NE(P, nullptr);
|
||||
memset(P, 0x42, sizeof(T));
|
||||
operator delete(P, sizeof(T));
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
P = new (std::nothrow) T;
|
||||
verifyAllocHookPtr(P);
|
||||
verifyAllocHookSize(sizeof(T));
|
||||
EXPECT_NE(P, nullptr);
|
||||
memset(P, 0x42, sizeof(T));
|
||||
delete P;
|
||||
verifyDeallocHookPtr(P);
|
||||
|
||||
const size_t N = 16U;
|
||||
T *A = new T[N];
|
||||
EXPECT_NE(A, nullptr);
|
||||
verifyAllocHookPtr(A);
|
||||
verifyAllocHookSize(sizeof(T) * N);
|
||||
memset(A, 0x42, sizeof(T) * N);
|
||||
EXPECT_DEATH(delete A, "");
|
||||
delete[] A;
|
||||
verifyDeallocHookPtr(A);
|
||||
EXPECT_DEATH(delete[] A, "");
|
||||
|
||||
A = new T[N];
|
||||
EXPECT_NE(A, nullptr);
|
||||
memset(A, 0x42, sizeof(T) * N);
|
||||
operator delete[](A, sizeof(T) * N);
|
||||
verifyDeallocHookPtr(A);
|
||||
|
||||
A = new (std::nothrow) T[N];
|
||||
verifyAllocHookPtr(A);
|
||||
verifyAllocHookSize(sizeof(T) * N);
|
||||
EXPECT_NE(A, nullptr);
|
||||
memset(A, 0x42, sizeof(T) * N);
|
||||
delete[] A;
|
||||
verifyDeallocHookPtr(A);
|
||||
}
|
||||
};
|
||||
using ScudoWrappersCppDeathTest = ScudoWrappersCppTest;
|
||||
|
||||
class Pixel {
|
||||
public:
|
||||
|
@ -75,7 +139,10 @@ public:
|
|||
Color C = Color::Red;
|
||||
};
|
||||
|
||||
TEST(ScudoWrappersCppDeathTest, New) {
|
||||
// Note that every Cxx allocation function in the test binary will be fulfilled
|
||||
// by Scudo. See the comment in the C counterpart of this file.
|
||||
|
||||
TEST_F(ScudoWrappersCppDeathTest, New) {
|
||||
if (getenv("SKIP_TYPE_MISMATCH") || SKIP_MISMATCH_TESTS) {
|
||||
printf("Skipped type mismatch tests.\n");
|
||||
return;
|
||||
|
@ -116,7 +183,7 @@ static void stressNew() {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(ScudoWrappersCppTest, ThreadedNew) {
|
||||
TEST_F(ScudoWrappersCppTest, ThreadedNew) {
|
||||
// TODO: Investigate why libc sometimes crashes with tag missmatch in
|
||||
// __pthread_clockjoin_ex.
|
||||
std::unique_ptr<scudo::ScopedDisableMemoryTagChecks> NoTags;
|
||||
|
@ -138,7 +205,7 @@ TEST(ScudoWrappersCppTest, ThreadedNew) {
|
|||
}
|
||||
|
||||
#if !SCUDO_FUCHSIA
|
||||
TEST(ScudoWrappersCppTest, AllocAfterFork) {
|
||||
TEST_F(ScudoWrappersCppTest, AllocAfterFork) {
|
||||
// This test can fail flakily when ran as a part of large number of
|
||||
// other tests if the maxmimum number of mappings allowed is low.
|
||||
// We tried to reduce the number of iterations of the loops with
|
||||
|
|
6
Telegram/ThirdParty/scudo/trusty.cpp
vendored
6
Telegram/ThirdParty/scudo/trusty.cpp
vendored
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include "common.h"
|
||||
#include "mutex.h"
|
||||
#include "report_linux.h"
|
||||
#include "trusty.h"
|
||||
|
||||
#include <errno.h> // for errno
|
||||
|
@ -50,7 +51,8 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
|
|||
|
||||
if (IS_ERR(P)) {
|
||||
errno = lk_err_to_errno(PTR_ERR(P));
|
||||
dieOnMapUnmapError(Size);
|
||||
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
|
||||
reportMapError(Size);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -60,7 +62,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
|
|||
void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
|
||||
UNUSED MapPlatformData *Data) {
|
||||
if (_trusty_munmap(Addr, Size) != 0)
|
||||
dieOnMapUnmapError();
|
||||
reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
|
||||
}
|
||||
|
||||
void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
|
||||
|
|
17
Telegram/ThirdParty/scudo/tsd.h
vendored
17
Telegram/ThirdParty/scudo/tsd.h
vendored
|
@ -53,8 +53,14 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
|
|||
inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
|
||||
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
|
||||
|
||||
void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) {
|
||||
Instance->commitBack(this);
|
||||
void commitBack(Allocator *Instance) { Instance->commitBack(this); }
|
||||
|
||||
// As the comments attached to `getCache()`, the TSD doesn't always need to be
|
||||
// locked. In that case, we would only skip the check before we have all TSDs
|
||||
// locked in all paths.
|
||||
void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
|
||||
if (SCUDO_DEBUG && !BypassCheck)
|
||||
Mutex.assertHeld();
|
||||
}
|
||||
|
||||
// Ideally, we may want to assert that all the operations on
|
||||
|
@ -66,11 +72,8 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
|
|||
// TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
|
||||
// TSD doesn't always require holding the lock. Add this assertion while the
|
||||
// lock is always acquired.
|
||||
typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
|
||||
return Cache;
|
||||
}
|
||||
typename Allocator::QuarantineCacheT &getQuarantineCache()
|
||||
ASSERT_CAPABILITY(Mutex) {
|
||||
typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
|
||||
typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
|
||||
return QuarantineCache;
|
||||
}
|
||||
|
||||
|
|
5
Telegram/ThirdParty/scudo/tsd_shared.h
vendored
5
Telegram/ThirdParty/scudo/tsd_shared.h
vendored
|
@ -120,6 +120,11 @@ struct TSDRegistrySharedT {
|
|||
TSDsArraySize);
|
||||
for (uptr I = 0; I < NumberOfTSDs; ++I) {
|
||||
TSDs[I].lock();
|
||||
// Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
|
||||
// thread annotations. However, given the TSD is only locked on shared
|
||||
// path, do the assertion in a separate path to avoid confusing the
|
||||
// analyzer.
|
||||
TSDs[I].assertLocked(/*BypassCheck=*/true);
|
||||
Str->append(" Shared TSD[%zu]:\n", I);
|
||||
TSDs[I].getCache().getStats(Str);
|
||||
TSDs[I].unlock();
|
||||
|
|
51
Telegram/ThirdParty/scudo/vector.h
vendored
51
Telegram/ThirdParty/scudo/vector.h
vendored
|
@ -9,26 +9,20 @@
|
|||
#ifndef SCUDO_VECTOR_H_
|
||||
#define SCUDO_VECTOR_H_
|
||||
|
||||
#include "common.h"
|
||||
#include "mem_map.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
namespace scudo {
|
||||
|
||||
// A low-level vector based on map. May incur a significant memory overhead for
|
||||
// small vectors. The current implementation supports only POD types.
|
||||
// A low-level vector based on map. It stores the contents inline up to a fixed
|
||||
// capacity, or in an external memory buffer if it grows bigger than that. May
|
||||
// incur a significant memory overhead for small vectors. The current
|
||||
// implementation supports only POD types.
|
||||
//
|
||||
// NOTE: This class is not meant to be used directly, use Vector<T> instead.
|
||||
template <typename T> class VectorNoCtor {
|
||||
public:
|
||||
constexpr void init(uptr InitialCapacity = 0) {
|
||||
Data = &LocalData[0];
|
||||
CapacityBytes = sizeof(LocalData);
|
||||
if (InitialCapacity > capacity())
|
||||
reserve(InitialCapacity);
|
||||
}
|
||||
void destroy() {
|
||||
if (Data != &LocalData[0])
|
||||
unmap(Data, CapacityBytes, 0, &MapData);
|
||||
}
|
||||
T &operator[](uptr I) {
|
||||
DCHECK_LT(I, Size);
|
||||
return Data[I];
|
||||
|
@ -78,24 +72,43 @@ public:
|
|||
const T *end() const { return data() + size(); }
|
||||
T *end() { return data() + size(); }
|
||||
|
||||
protected:
|
||||
constexpr void init(uptr InitialCapacity = 0) {
|
||||
Data = &LocalData[0];
|
||||
CapacityBytes = sizeof(LocalData);
|
||||
if (InitialCapacity > capacity())
|
||||
reserve(InitialCapacity);
|
||||
}
|
||||
void destroy() {
|
||||
if (Data != &LocalData[0])
|
||||
ExternalBuffer.unmap(ExternalBuffer.getBase(),
|
||||
ExternalBuffer.getCapacity());
|
||||
}
|
||||
|
||||
private:
|
||||
void reallocate(uptr NewCapacity) {
|
||||
DCHECK_GT(NewCapacity, 0);
|
||||
DCHECK_LE(Size, NewCapacity);
|
||||
|
||||
MemMapT NewExternalBuffer;
|
||||
NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
|
||||
T *NewData = reinterpret_cast<T *>(
|
||||
map(nullptr, NewCapacity, "scudo:vector", 0, &MapData));
|
||||
memcpy(NewData, Data, Size * sizeof(T));
|
||||
NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector");
|
||||
T *NewExternalData = reinterpret_cast<T *>(NewExternalBuffer.getBase());
|
||||
|
||||
memcpy(NewExternalData, Data, Size * sizeof(T));
|
||||
destroy();
|
||||
Data = NewData;
|
||||
|
||||
Data = NewExternalData;
|
||||
CapacityBytes = NewCapacity;
|
||||
ExternalBuffer = NewExternalBuffer;
|
||||
}
|
||||
|
||||
T *Data = nullptr;
|
||||
T LocalData[256 / sizeof(T)] = {};
|
||||
uptr CapacityBytes = 0;
|
||||
uptr Size = 0;
|
||||
[[no_unique_address]] MapPlatformData MapData = {};
|
||||
|
||||
T LocalData[256 / sizeof(T)] = {};
|
||||
MemMapT ExternalBuffer;
|
||||
};
|
||||
|
||||
template <typename T> class Vector : public VectorNoCtor<T> {
|
||||
|
|
3
Telegram/ThirdParty/scudo/wrappers_c.cpp
vendored
3
Telegram/ThirdParty/scudo/wrappers_c.cpp
vendored
|
@ -12,6 +12,9 @@
|
|||
#if !SCUDO_ANDROID || !_BIONIC
|
||||
|
||||
#include "allocator_config.h"
|
||||
#include "internal_defs.h"
|
||||
#include "platform.h"
|
||||
#include "scudo/interface.h"
|
||||
#include "wrappers_c.h"
|
||||
#include "wrappers_c_checks.h"
|
||||
|
||||
|
|
108
Telegram/ThirdParty/scudo/wrappers_c.inc
vendored
108
Telegram/ThirdParty/scudo/wrappers_c.inc
vendored
|
@ -17,6 +17,35 @@
|
|||
#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
|
||||
#endif
|
||||
|
||||
static void reportAllocation(void *ptr, size_t size) {
|
||||
if (SCUDO_ENABLE_HOOKS)
|
||||
if (__scudo_allocate_hook && ptr)
|
||||
__scudo_allocate_hook(ptr, size);
|
||||
}
|
||||
static void reportDeallocation(void *ptr) {
|
||||
if (SCUDO_ENABLE_HOOKS)
|
||||
if (__scudo_deallocate_hook)
|
||||
__scudo_deallocate_hook(ptr);
|
||||
}
|
||||
static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
|
||||
DCHECK_NE(new_ptr, nullptr);
|
||||
|
||||
if (SCUDO_ENABLE_HOOKS) {
|
||||
if (__scudo_realloc_allocate_hook)
|
||||
__scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
|
||||
else if (__scudo_allocate_hook)
|
||||
__scudo_allocate_hook(new_ptr, size);
|
||||
}
|
||||
}
|
||||
static void reportReallocDeallocation(void *old_ptr) {
|
||||
if (SCUDO_ENABLE_HOOKS) {
|
||||
if (__scudo_realloc_deallocate_hook)
|
||||
__scudo_realloc_deallocate_hook(old_ptr);
|
||||
else if (__scudo_deallocate_hook)
|
||||
__scudo_deallocate_hook(old_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
|
||||
|
@ -28,11 +57,14 @@ INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
|
|||
}
|
||||
scudo::reportCallocOverflow(nmemb, size);
|
||||
}
|
||||
return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
|
||||
Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
|
||||
void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc,
|
||||
SCUDO_MALLOC_ALIGNMENT, true);
|
||||
reportAllocation(Ptr, Product);
|
||||
return scudo::setErrnoOnNull(Ptr);
|
||||
}
|
||||
|
||||
INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
|
||||
reportDeallocation(ptr);
|
||||
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
|
||||
}
|
||||
|
||||
|
@ -75,8 +107,10 @@ INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
|
|||
#endif
|
||||
|
||||
INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
|
||||
return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
|
||||
size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
|
||||
void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
|
||||
SCUDO_MALLOC_ALIGNMENT);
|
||||
reportAllocation(Ptr, size);
|
||||
return scudo::setErrnoOnNull(Ptr);
|
||||
}
|
||||
|
||||
#if SCUDO_ANDROID
|
||||
|
@ -105,8 +139,10 @@ INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
|
|||
scudo::reportAlignmentNotPowerOfTwo(alignment);
|
||||
}
|
||||
}
|
||||
return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
|
||||
alignment);
|
||||
void *Ptr =
|
||||
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
|
||||
INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
|
||||
|
@ -120,6 +156,8 @@ INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
|
|||
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
|
||||
if (UNLIKELY(!Ptr))
|
||||
return ENOMEM;
|
||||
reportAllocation(Ptr, size);
|
||||
|
||||
*memptr = Ptr;
|
||||
return 0;
|
||||
}
|
||||
|
@ -134,26 +172,57 @@ INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
|
|||
scudo::reportPvallocOverflow(size);
|
||||
}
|
||||
// pvalloc(0) should allocate one page.
|
||||
return scudo::setErrnoOnNull(
|
||||
void *Ptr =
|
||||
SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
|
||||
scudo::Chunk::Origin::Memalign, PageSize));
|
||||
scudo::Chunk::Origin::Memalign, PageSize);
|
||||
reportAllocation(Ptr, scudo::roundUp(size, PageSize));
|
||||
|
||||
return scudo::setErrnoOnNull(Ptr);
|
||||
}
|
||||
|
||||
INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
|
||||
if (!ptr)
|
||||
return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
|
||||
size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
|
||||
if (!ptr) {
|
||||
void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
|
||||
SCUDO_MALLOC_ALIGNMENT);
|
||||
reportAllocation(Ptr, size);
|
||||
return scudo::setErrnoOnNull(Ptr);
|
||||
}
|
||||
if (size == 0) {
|
||||
reportDeallocation(ptr);
|
||||
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
|
||||
return nullptr;
|
||||
}
|
||||
return scudo::setErrnoOnNull(
|
||||
SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
|
||||
|
||||
// Given that the reporting of deallocation and allocation are not atomic, we
|
||||
// always pretend the old pointer will be released so that the user doesn't
|
||||
// need to worry about the false double-use case from the view of hooks.
|
||||
//
|
||||
// For example, assume that `realloc` releases the old pointer and allocates a
|
||||
// new pointer. Before the reporting of both operations has been done, another
|
||||
// thread may get the old pointer from `malloc`. It may be misinterpreted as
|
||||
// double-use if it's not handled properly on the hook side.
|
||||
reportReallocDeallocation(ptr);
|
||||
void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
|
||||
if (NewPtr != nullptr) {
|
||||
// Note that even if NewPtr == ptr, the size has changed. We still need to
|
||||
// report the new size.
|
||||
reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size);
|
||||
} else {
|
||||
// If `realloc` fails, the old pointer is not released. Report the old
|
||||
// pointer as allocated again.
|
||||
reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr,
|
||||
SCUDO_ALLOCATOR.getAllocSize(ptr));
|
||||
}
|
||||
|
||||
return scudo::setErrnoOnNull(NewPtr);
|
||||
}
|
||||
|
||||
INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
|
||||
return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
|
||||
size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
|
||||
void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
|
||||
scudo::getPageSizeCached());
|
||||
reportAllocation(Ptr, size);
|
||||
|
||||
return scudo::setErrnoOnNull(Ptr);
|
||||
}
|
||||
|
||||
INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
|
||||
|
@ -198,6 +267,7 @@ INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
|
|||
return 1;
|
||||
} else if (param == M_LOG_STATS) {
|
||||
SCUDO_ALLOCATOR.printStats();
|
||||
SCUDO_ALLOCATOR.printFragmentationInfo();
|
||||
return 1;
|
||||
} else {
|
||||
scudo::Option option;
|
||||
|
@ -233,8 +303,12 @@ INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
|
|||
}
|
||||
scudo::reportInvalidAlignedAllocAlignment(alignment, size);
|
||||
}
|
||||
return scudo::setErrnoOnNull(
|
||||
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
|
||||
|
||||
void *Ptr =
|
||||
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment);
|
||||
reportAllocation(Ptr, size);
|
||||
|
||||
return scudo::setErrnoOnNull(Ptr);
|
||||
}
|
||||
|
||||
INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
|
||||
|
|
21
Telegram/ThirdParty/scudo/wrappers_c_bionic.cpp
vendored
21
Telegram/ThirdParty/scudo/wrappers_c_bionic.cpp
vendored
|
@ -12,6 +12,9 @@
|
|||
#if SCUDO_ANDROID && _BIONIC
|
||||
|
||||
#include "allocator_config.h"
|
||||
#include "internal_defs.h"
|
||||
#include "platform.h"
|
||||
#include "scudo/interface.h"
|
||||
#include "wrappers_c.h"
|
||||
#include "wrappers_c_checks.h"
|
||||
|
||||
|
@ -24,7 +27,7 @@
|
|||
|
||||
extern "C" void SCUDO_PREFIX(malloc_postinit)();
|
||||
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
|
||||
static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
|
||||
static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)>
|
||||
SCUDO_ALLOCATOR;
|
||||
|
||||
#include "wrappers_c.inc"
|
||||
|
@ -35,15 +38,15 @@ static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
|
|||
// TODO(kostyak): support both allocators.
|
||||
INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
|
||||
|
||||
INTERFACE void
|
||||
__scudo_get_error_info(struct scudo_error_info *error_info,
|
||||
uintptr_t fault_addr, const char *stack_depot,
|
||||
const char *region_info, const char *ring_buffer,
|
||||
const char *memory, const char *memory_tags,
|
||||
uintptr_t memory_addr, size_t memory_size) {
|
||||
INTERFACE void __scudo_get_error_info(
|
||||
struct scudo_error_info *error_info, uintptr_t fault_addr,
|
||||
const char *stack_depot, size_t stack_depot_size, const char *region_info,
|
||||
const char *ring_buffer, size_t ring_buffer_size, const char *memory,
|
||||
const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
|
||||
(void)(stack_depot_size);
|
||||
Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
|
||||
ring_buffer, memory, memory_tags, memory_addr,
|
||||
memory_size);
|
||||
ring_buffer, ring_buffer_size, memory, memory_tags,
|
||||
memory_addr, memory_size);
|
||||
}
|
||||
|
||||
INTERFACE const char *__scudo_get_stack_depot_addr() {
|
||||
|
|
66
Telegram/ThirdParty/scudo/wrappers_cpp.cpp
vendored
66
Telegram/ThirdParty/scudo/wrappers_cpp.cpp
vendored
|
@ -12,6 +12,9 @@
|
|||
#if !SCUDO_ANDROID || !_BIONIC
|
||||
|
||||
#include "allocator_config.h"
|
||||
#include "internal_defs.h"
|
||||
#include "platform.h"
|
||||
#include "scudo/interface.h"
|
||||
#include "wrappers_c.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
@ -21,86 +24,125 @@ struct nothrow_t {};
|
|||
enum class align_val_t : size_t {};
|
||||
} // namespace std
|
||||
|
||||
static void reportAllocation(void *ptr, size_t size) {
|
||||
if (SCUDO_ENABLE_HOOKS)
|
||||
if (__scudo_allocate_hook && ptr)
|
||||
__scudo_allocate_hook(ptr, size);
|
||||
}
|
||||
static void reportDeallocation(void *ptr) {
|
||||
if (SCUDO_ENABLE_HOOKS)
|
||||
if (__scudo_deallocate_hook)
|
||||
__scudo_deallocate_hook(ptr);
|
||||
}
|
||||
|
||||
INTERFACE WEAK void *operator new(size_t size) {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::New);
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
INTERFACE WEAK void *operator new[](size_t size) {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
INTERFACE WEAK void *operator new(size_t size,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::New);
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
INTERFACE WEAK void *operator new[](size_t size,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::New,
|
||||
static_cast<scudo::uptr>(align));
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
|
||||
static_cast<scudo::uptr>(align));
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
|
||||
static_cast<scudo::uptr>(align));
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
|
||||
static_cast<scudo::uptr>(align));
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::New,
|
||||
static_cast<scudo::uptr>(align));
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
|
||||
static_cast<scudo::uptr>(align));
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
return Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
|
||||
static_cast<scudo::uptr>(align));
|
||||
void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
|
||||
static_cast<scudo::uptr>(align));
|
||||
reportAllocation(Ptr, size);
|
||||
return Ptr;
|
||||
}
|
||||
|
||||
INTERFACE WEAK void operator delete(void *ptr) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
|
||||
}
|
||||
INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
|
||||
}
|
||||
INTERFACE WEAK void operator delete(void *ptr,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
|
||||
}
|
||||
INTERFACE WEAK void operator delete[](void *ptr,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
|
||||
}
|
||||
INTERFACE WEAK void operator delete(void *ptr, size_t size) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size);
|
||||
}
|
||||
INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
|
||||
}
|
||||
INTERFACE WEAK void operator delete(void *ptr,
|
||||
std::align_val_t align) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
|
||||
static_cast<scudo::uptr>(align));
|
||||
}
|
||||
INTERFACE WEAK void operator delete[](void *ptr,
|
||||
std::align_val_t align) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
|
||||
static_cast<scudo::uptr>(align));
|
||||
}
|
||||
INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
|
||||
static_cast<scudo::uptr>(align));
|
||||
}
|
||||
INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
|
||||
std::nothrow_t const &) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
|
||||
static_cast<scudo::uptr>(align));
|
||||
}
|
||||
INTERFACE WEAK void operator delete(void *ptr, size_t size,
|
||||
std::align_val_t align) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size,
|
||||
static_cast<scudo::uptr>(align));
|
||||
}
|
||||
INTERFACE WEAK void operator delete[](void *ptr, size_t size,
|
||||
std::align_val_t align) NOEXCEPT {
|
||||
reportDeallocation(ptr);
|
||||
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
|
||||
static_cast<scudo::uptr>(align));
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue