Revert 7a0da88943, "scudo: Support memory tagging in the secondary allocator."

We measured a 2.5 seconds (17.5%) regression in Android boot time
performance with this change.
This commit is contained in:
Peter Collingbourne 2021-02-25 16:50:02 -08:00
parent d7fca3f0bf
commit 9678b07e42
12 changed files with 228 additions and 472 deletions

View File

@ -71,7 +71,6 @@ struct DefaultConfig {
typedef MapAllocatorCache<DefaultConfig> SecondaryCache;
static const u32 SecondaryCacheEntriesArraySize = 32U;
static const u32 SecondaryCacheQuarantineSize = 0U;
static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 19;
static const s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
@ -99,7 +98,6 @@ struct AndroidConfig {
typedef MapAllocatorCache<AndroidConfig> SecondaryCache;
static const u32 SecondaryCacheEntriesArraySize = 256U;
static const u32 SecondaryCacheQuarantineSize = 32U;
static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
static const uptr SecondaryCacheDefaultMaxEntrySize = 2UL << 20;
static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
@ -128,7 +126,6 @@ struct AndroidSvelteConfig {
typedef MapAllocatorCache<AndroidSvelteConfig> SecondaryCache;
static const u32 SecondaryCacheEntriesArraySize = 16U;
static const u32 SecondaryCacheQuarantineSize = 32U;
static const u32 SecondaryCacheDefaultMaxEntriesCount = 4U;
static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 18;
static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;

View File

@ -71,10 +71,12 @@ public:
NewHeader.State = Chunk::State::Available;
Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
if (allocatorSupportsMemoryTagging<Params>())
Ptr = untagPointer(Ptr);
void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
Cache.deallocate(NewHeader.ClassId, BlockBegin);
const uptr ClassId = NewHeader.ClassId;
if (LIKELY(ClassId))
Cache.deallocate(ClassId, BlockBegin);
else
Allocator.Secondary.deallocate(BlockBegin);
}
// We take a shortcut when allocating a quarantine batch by working with the
@ -236,26 +238,11 @@ public:
TSD->Cache.destroy(&Stats);
}
ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
if (!allocatorSupportsMemoryTagging<Params>())
return Ptr;
auto UntaggedPtr = untagPointer(Ptr);
if (UntaggedPtr != Ptr)
return UntaggedPtr;
// Secondary, or pointer allocated while memory tagging is unsupported or
// disabled. The tag mismatch is okay in the latter case because tags will
// not be checked.
return addHeaderTag(Ptr);
}
ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
if (!allocatorSupportsMemoryTagging<Params>())
return Ptr;
return addFixedTag(Ptr, 2);
}
ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
ALWAYS_INLINE void *untagPointerMaybe(void *Ptr) {
if (allocatorSupportsMemoryTagging<Params>())
return reinterpret_cast<void *>(
untagPointer(reinterpret_cast<uptr>(Ptr)));
return Ptr;
}
NOINLINE u32 collectStackTrace() {
@ -352,7 +339,7 @@ public:
TSD->unlock();
}
if (UNLIKELY(ClassId == 0))
Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
Block = Secondary.allocate(NeededSize, Alignment, &SecondaryBlockEnd,
FillContents);
if (UNLIKELY(!Block)) {
@ -448,21 +435,12 @@ public:
TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
}
storeAllocationStackMaybe(Options, Ptr);
} else {
Block = addHeaderTag(Block);
Ptr = addHeaderTag(Ptr);
if (UNLIKELY(FillContents != NoFill)) {
// This condition is not necessarily unlikely, but since memset is
// costly, we might as well mark it as such.
memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
PrimaryT::getSizeByClassId(ClassId));
}
} else if (UNLIKELY(FillContents != NoFill)) {
// This condition is not necessarily unlikely, but since memset is
// costly, we might as well mark it as such.
memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
PrimaryT::getSizeByClassId(ClassId));
}
} else {
Block = addHeaderTag(Block);
Ptr = addHeaderTag(Ptr);
if (UNLIKELY(useMemoryTagging<Params>(Options)))
storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
}
Chunk::UnpackedHeader Header = {};
@ -516,7 +494,7 @@ public:
if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
Ptr = getHeaderTaggedPointer(Ptr);
Ptr = untagPointerMaybe(Ptr);
Chunk::UnpackedHeader Header;
Chunk::loadHeader(Cookie, Ptr, &Header);
@ -555,7 +533,7 @@ public:
}
void *OldTaggedPtr = OldPtr;
OldPtr = getHeaderTaggedPointer(OldPtr);
OldPtr = untagPointerMaybe(OldPtr);
// The following cases are handled by the C wrappers.
DCHECK_NE(OldPtr, nullptr);
@ -591,7 +569,7 @@ public:
Chunk::Origin::Malloc);
}
void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
uptr BlockEnd;
uptr OldSize;
const uptr ClassId = OldHeader.ClassId;
@ -601,19 +579,18 @@ public:
OldSize = OldHeader.SizeOrUnusedBytes;
} else {
BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
OldHeader.SizeOrUnusedBytes);
OldSize = BlockEnd -
(reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
}
// If the new chunk still fits in the previously allocated block (with a
// reasonable delta), we just keep the old block, and update the chunk
// header to reflect the size change.
if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
Chunk::UnpackedHeader NewHeader = OldHeader;
NewHeader.SizeOrUnusedBytes =
(ClassId ? NewSize
: BlockEnd -
(reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
: BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
if (UNLIKELY(ClassId && useMemoryTagging<Params>(Options))) {
@ -706,30 +683,14 @@ public:
initThreadMaybe();
const uptr From = Base;
const uptr To = Base + Size;
bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
systemSupportsMemoryTagging();
auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
Arg](uptr Block) {
auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
if (Block < From || Block >= To)
return;
uptr Chunk;
Chunk::UnpackedHeader Header;
if (MayHaveTaggedPrimary) {
// A chunk header can either have a zero tag (tagged primary) or the
// header tag (secondary, or untagged primary). We don't know which so
// try both.
ScopedDisableMemoryTagChecks x;
if (!getChunkFromBlock(Block, &Chunk, &Header) &&
!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
return;
} else {
if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
return;
}
if (Header.State == Chunk::State::Allocated) {
if (getChunkFromBlock(Block, &Chunk, &Header) &&
Header.State == Chunk::State::Allocated) {
uptr TaggedChunk = Chunk;
if (allocatorSupportsMemoryTagging<Params>())
TaggedChunk = untagPointer(TaggedChunk);
if (useMemoryTagging<Params>(Primary.Options.load()))
TaggedChunk = loadTag(Chunk);
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
@ -790,7 +751,7 @@ public:
return GuardedAlloc.getSize(Ptr);
#endif // GWP_ASAN_HOOKS
Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
Ptr = untagPointerMaybe(const_cast<void *>(Ptr));
Chunk::UnpackedHeader Header;
Chunk::loadHeader(Cookie, Ptr, &Header);
// Getting the usable size of a chunk only makes sense if it's allocated.
@ -815,7 +776,7 @@ public:
#endif // GWP_ASAN_HOOKS
if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
return false;
Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
Ptr = untagPointerMaybe(const_cast<void *>(Ptr));
Chunk::UnpackedHeader Header;
return Chunk::isValid(Cookie, Ptr, &Header) &&
Header.State == Chunk::State::Allocated;
@ -825,17 +786,8 @@ public:
return useMemoryTagging<Params>(Primary.Options.load());
}
void disableMemoryTagging() {
// If we haven't been initialized yet, we need to initialize now in order to
// prevent a future call to initThreadMaybe() from enabling memory tagging
// based on feature detection. But don't call initThreadMaybe() because it
// may end up calling the allocator (via pthread_atfork, via the post-init
// callback), which may cause mappings to be created with memory tagging
// enabled.
TSDRegistry.initOnceMaybe(this);
if (allocatorSupportsMemoryTagging<Params>()) {
Secondary.disableMemoryTagging();
if (allocatorSupportsMemoryTagging<Params>())
Primary.Options.clear(OptionBit::UseMemoryTagging);
}
}
void setTrackAllocationStacks(bool Track) {
@ -1076,8 +1028,6 @@ private:
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (LIKELY(Header->ClassId))
return SizeOrUnusedBytes;
if (allocatorSupportsMemoryTagging<Params>())
Ptr = untagPointer(const_cast<void *>(Ptr));
return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
@ -1103,14 +1053,11 @@ private:
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
// This purposefully underflows for Size == 0.
const bool BypassQuarantine = !Quarantine.getCacheSize() ||
((Size - 1) >= QuarantineMaxChunkSize) ||
!NewHeader.ClassId;
const bool BypassQuarantine =
!Quarantine.getCacheSize() || ((Size - 1) >= QuarantineMaxChunkSize);
if (BypassQuarantine) {
NewHeader.State = Chunk::State::Available;
Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
if (allocatorSupportsMemoryTagging<Params>())
Ptr = untagPointer(Ptr);
void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
const uptr ClassId = NewHeader.ClassId;
if (LIKELY(ClassId)) {
@ -1120,10 +1067,7 @@ private:
if (UnlockRequired)
TSD->unlock();
} else {
if (UNLIKELY(useMemoryTagging<Params>(Options)))
storeTags(reinterpret_cast<uptr>(BlockBegin),
reinterpret_cast<uptr>(Ptr));
Secondary.deallocate(Options, BlockBegin);
Secondary.deallocate(BlockBegin);
}
} else {
NewHeader.State = Chunk::State::Quarantined;

View File

@ -165,9 +165,6 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
void unmap(void *Addr, uptr Size, uptr Flags = 0,
MapPlatformData *Data = nullptr);
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
MapPlatformData *Data = nullptr);
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data = nullptr);

View File

@ -134,16 +134,6 @@ void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
}
}
void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
UNUSED MapPlatformData *Data) {
const zx_vm_option_t Prot =
(Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
DCHECK(Data);
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
if (_zx_vmar_protect(Data->Vmar, Prot, Addr, Size) != ZX_OK)
dieOnMapUnmapError();
}
void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data) {
DCHECK(Data);

View File

@ -50,14 +50,14 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
MmapProt = PROT_NONE;
} else {
MmapProt = PROT_READ | PROT_WRITE;
}
#if defined(__aarch64__)
#ifndef PROT_MTE
#define PROT_MTE 0x20
#endif
if (Flags & MAP_MEMTAG)
MmapProt |= PROT_MTE;
if (Flags & MAP_MEMTAG)
MmapProt |= PROT_MTE;
#endif
}
if (Addr) {
// Currently no scenario for a noaccess mapping with a fixed address.
DCHECK_EQ(Flags & MAP_NOACCESS, 0);
@ -70,7 +70,7 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
return nullptr;
}
#if SCUDO_ANDROID
if (Name)
if (!(Flags & MAP_NOACCESS))
prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
#endif
return P;
@ -82,13 +82,6 @@ void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
dieOnMapUnmapError();
}
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
UNUSED MapPlatformData *Data) {
int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
dieOnMapUnmapError();
}
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
UNUSED MapPlatformData *Data) {
void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);

View File

@ -23,15 +23,7 @@ void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask, uptr *TaggedBegin,
#if defined(__aarch64__) || defined(SCUDO_FUZZ)
// We assume that Top-Byte Ignore is enabled if the architecture supports memory
// tagging. Not all operating systems enable TBI, so we only claim architectural
// support for memory tagging if the operating system enables TBI.
#if SCUDO_LINUX
inline constexpr bool archSupportsMemoryTagging() { return true; }
#else
inline constexpr bool archSupportsMemoryTagging() { return false; }
#endif
inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
@ -128,8 +120,6 @@ inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
return TaggedPtr;
}
inline uptr addFixedTag(uptr Ptr, uptr Tag) { return Ptr | (Tag << 56); }
inline uptr storeTags(uptr Begin, uptr End) {
DCHECK(Begin % 16 == 0);
if (Begin != End) {
@ -255,12 +245,6 @@ inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
UNREACHABLE("memory tagging not supported");
}
inline uptr addFixedTag(uptr Ptr, uptr Tag) {
(void)Ptr;
(void)Tag;
UNREACHABLE("memory tagging not supported");
}
inline uptr storeTags(uptr Begin, uptr End) {
(void)Begin;
(void)End;
@ -296,10 +280,6 @@ inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
*TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
}
inline void *untagPointer(void *Ptr) {
return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
}
template <typename Config>
inline constexpr bool allocatorSupportsMemoryTagging() {
return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging;

View File

@ -60,7 +60,7 @@ public:
void initLinkerInitialized(s32 ReleaseToOsInterval) {
// Reserve the space required for the Primary.
PrimaryBase = reinterpret_cast<uptr>(
map(nullptr, PrimarySize, nullptr, MAP_NOACCESS, &Data));
map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
u32 Seed;
const u64 Time = getMonotonicTime();

View File

@ -28,8 +28,7 @@ namespace LargeBlock {
struct Header {
LargeBlock::Header *Prev;
LargeBlock::Header *Next;
uptr CommitBase;
uptr CommitSize;
uptr BlockEnd;
uptr MapBase;
uptr MapSize;
[[no_unique_address]] MapPlatformData Data;
@ -39,42 +38,29 @@ constexpr uptr getHeaderSize() {
return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
}
template <typename Config> static uptr addHeaderTag(uptr Ptr) {
if (allocatorSupportsMemoryTagging<Config>())
return addFixedTag(Ptr, 1);
return Ptr;
static Header *getHeader(uptr Ptr) {
return reinterpret_cast<Header *>(Ptr - getHeaderSize());
}
template <typename Config> static Header *getHeader(uptr Ptr) {
return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr) -
getHeaderSize());
}
template <typename Config> static Header *getHeader(const void *Ptr) {
return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
static Header *getHeader(const void *Ptr) {
return getHeader(reinterpret_cast<uptr>(Ptr));
}
} // namespace LargeBlock
static void unmap(LargeBlock::Header *H) {
MapPlatformData Data = H->Data;
unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
}
class MapAllocatorNoCache {
public:
void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
void init(UNUSED s32 ReleaseToOsInterval) {}
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H,
UNUSED bool *Zeroed) {
return false;
}
void store(UNUSED Options Options, UNUSED LargeBlock::Header *H) { unmap(H); }
bool store(UNUSED LargeBlock::Header *H) { return false; }
bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
void enable() {}
void releaseToOS() {}
void disableMemoryTagging() {}
bool setOption(Option O, UNUSED sptr Value) {
if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
O == Option::MaxCacheEntrySize)
@ -84,8 +70,6 @@ public:
}
};
static const uptr MaxUnusedCachePages = 4U;
template <typename Config> class MapAllocatorCache {
public:
// Ensure the default maximum specified fits the array.
@ -105,147 +89,67 @@ public:
initLinkerInitialized(ReleaseToOsInterval);
}
void store(Options Options, LargeBlock::Header *H) {
if (!canCache(H->CommitSize))
return unmap(H);
bool store(LargeBlock::Header *H) {
bool EntryCached = false;
bool EmptyCache = false;
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
const u64 Time = getMonotonicTime();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
CachedBlock Entry;
Entry.CommitBase = H->CommitBase;
Entry.CommitSize = H->CommitSize;
Entry.MapBase = H->MapBase;
Entry.MapSize = H->MapSize;
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
Entry.Data = H->Data;
Entry.Time = Time;
if (useMemoryTagging<Config>(Options)) {
if (Interval == 0 && !SCUDO_FUCHSIA) {
// Release the memory and make it inaccessible at the same time by
// creating a new MAP_NOACCESS mapping on top of the existing mapping.
// Fuchsia does not support replacing mappings by creating a new mapping
// on top so we just do the two syscalls there.
Entry.Time = 0;
map(reinterpret_cast<void *>(Entry.CommitBase), Entry.CommitSize,
"scudo:secondary", MAP_RESIZABLE | MAP_NOACCESS | MAP_MEMTAG,
&Entry.Data);
} else {
setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
&Entry.Data);
}
} else if (Interval == 0) {
releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
Entry.Time = 0;
}
do {
{
ScopedLock L(Mutex);
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
// If we get here then memory tagging was disabled in between when we
// read Options and when we locked Mutex. We can't insert our entry into
// the quarantine or the cache because the permissions would be wrong so
// just unmap it.
break;
}
if (Config::SecondaryCacheQuarantineSize &&
useMemoryTagging<Config>(Options)) {
QuarantinePos =
(QuarantinePos + 1) % Config::SecondaryCacheQuarantineSize;
if (!Quarantine[QuarantinePos].CommitBase) {
Quarantine[QuarantinePos] = Entry;
return;
}
CachedBlock PrevEntry = Quarantine[QuarantinePos];
Quarantine[QuarantinePos] = Entry;
if (OldestTime == 0)
OldestTime = Entry.Time;
Entry = PrevEntry;
}
if (EntriesCount >= MaxCount) {
if (IsFullEvents++ == 4U)
EmptyCache = true;
} else {
for (u32 I = 0; I < MaxCount; I++) {
if (Entries[I].CommitBase)
if (Entries[I].Block)
continue;
if (I != 0)
Entries[I] = Entries[0];
Entries[0] = Entry;
Entries[0].Block = reinterpret_cast<uptr>(H);
Entries[0].BlockEnd = H->BlockEnd;
Entries[0].MapBase = H->MapBase;
Entries[0].MapSize = H->MapSize;
Entries[0].Data = H->Data;
Entries[0].Time = Time;
EntriesCount++;
if (OldestTime == 0)
OldestTime = Entry.Time;
EntryCached = true;
break;
}
}
} while (0);
}
s32 Interval;
if (EmptyCache)
empty();
else if (Interval >= 0)
else if ((Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs)) >= 0)
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
if (!EntryCached)
unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
&Entry.Data);
return EntryCached;
}
bool retrieve(Options Options, uptr Size, uptr Alignment,
LargeBlock::Header **H, bool *Zeroed) {
bool retrieve(uptr Size, LargeBlock::Header **H, bool *Zeroed) {
const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
bool Found = false;
CachedBlock Entry;
uptr HeaderPos;
{
ScopedLock L(Mutex);
if (EntriesCount == 0)
return false;
for (u32 I = 0; I < MaxCount; I++) {
const uptr CommitBase = Entries[I].CommitBase;
if (!CommitBase)
continue;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
roundDownTo(CommitBase + CommitSize - Size, Alignment);
HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
if (HeaderPos > CommitBase + CommitSize)
continue;
if (HeaderPos < CommitBase ||
AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
continue;
Found = true;
Entry = Entries[I];
Entries[I].CommitBase = 0;
break;
}
}
if (Found) {
*H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(HeaderPos));
*Zeroed = Entry.Time == 0;
if (useMemoryTagging<Config>(Options))
setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
if (useMemoryTagging<Config>(Options)) {
if (*Zeroed)
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
NewBlockBegin);
else if (Entry.BlockBegin < NewBlockBegin)
storeTags(Entry.BlockBegin, NewBlockBegin);
else
storeTags(untagPointer(NewBlockBegin),
untagPointer(Entry.BlockBegin));
}
(*H)->CommitBase = Entry.CommitBase;
(*H)->CommitSize = Entry.CommitSize;
(*H)->MapBase = Entry.MapBase;
(*H)->MapSize = Entry.MapSize;
(*H)->Data = Entry.Data;
ScopedLock L(Mutex);
if (EntriesCount == 0)
return false;
for (u32 I = 0; I < MaxCount; I++) {
if (!Entries[I].Block)
continue;
const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
if (Size > BlockSize)
continue;
if (Size < BlockSize - PageSize * 4U)
continue;
*H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
*Zeroed = Entries[I].Time == 0;
Entries[I].Block = 0;
(*H)->BlockEnd = Entries[I].BlockEnd;
(*H)->MapBase = Entries[I].MapBase;
(*H)->MapSize = Entries[I].MapSize;
(*H)->Data = Entries[I].Data;
EntriesCount--;
return true;
}
return Found;
return false;
}
bool canCache(uptr Size) {
@ -277,23 +181,6 @@ public:
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
void disableMemoryTagging() {
ScopedLock L(Mutex);
for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
if (Quarantine[I].CommitBase) {
unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
Quarantine[I].CommitBase = 0;
}
}
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
for (u32 I = 0; I < MaxCount; I++)
if (Entries[I].CommitBase)
setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
&Entries[I].Data);
QuarantinePos = -1U;
}
void disable() { Mutex.lock(); }
void enable() { Mutex.unlock(); }
@ -309,12 +196,12 @@ private:
{
ScopedLock L(Mutex);
for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
if (!Entries[I].CommitBase)
if (!Entries[I].Block)
continue;
MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
MapInfo[N].MapSize = Entries[I].MapSize;
MapInfo[N].Data = Entries[I].Data;
Entries[I].CommitBase = 0;
Entries[I].Block = 0;
N++;
}
EntriesCount = 0;
@ -325,50 +212,37 @@ private:
&MapInfo[I].Data);
}
void releaseOlderThan(u64 Time) {
ScopedLock L(Mutex);
if (!EntriesCount)
return;
for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time)
continue;
releasePagesToOS(Entries[I].Block, 0,
Entries[I].BlockEnd - Entries[I].Block,
&Entries[I].Data);
Entries[I].Time = 0;
}
}
struct CachedBlock {
uptr CommitBase;
uptr CommitSize;
uptr Block;
uptr BlockEnd;
uptr MapBase;
uptr MapSize;
uptr BlockBegin;
[[no_unique_address]] MapPlatformData Data;
u64 Time;
};
void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
if (!Entry.CommitBase || !Entry.Time)
return;
if (Entry.Time > Time) {
if (OldestTime == 0 || Entry.Time < OldestTime)
OldestTime = Entry.Time;
return;
}
releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
Entry.Time = 0;
}
void releaseOlderThan(u64 Time) {
ScopedLock L(Mutex);
if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
return;
OldestTime = 0;
for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
releaseIfOlderThan(Quarantine[I], Time);
for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
releaseIfOlderThan(Entries[I], Time);
}
HybridMutex Mutex;
CachedBlock Entries[Config::SecondaryCacheEntriesArraySize];
u32 EntriesCount;
u32 QuarantinePos;
atomic_u32 MaxEntriesCount;
atomic_uptr MaxEntrySize;
u64 OldestTime;
uptr LargestSize;
u32 IsFullEvents;
atomic_s32 ReleaseToOsIntervalMs;
CachedBlock Entries[Config::SecondaryCacheEntriesArraySize];
CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize];
};
template <typename Config> class MapAllocator {
@ -384,15 +258,13 @@ public:
initLinkerInitialized(S, ReleaseToOsInterval);
}
void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
uptr *BlockEnd = nullptr,
void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
FillContentsMode FillContents = NoFill);
void deallocate(Options Options, void *Ptr);
void deallocate(void *Ptr);
static uptr getBlockEnd(void *Ptr) {
auto *B = LargeBlock::getHeader<Config>(Ptr);
return B->CommitBase + B->CommitSize;
return LargeBlock::getHeader(Ptr)->BlockEnd;
}
static uptr getBlockSize(void *Ptr) {
@ -412,12 +284,8 @@ public:
}
template <typename F> void iterateOverBlocks(F Callback) const {
for (const auto &H : InUseBlocks) {
uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(Ptr);
Callback(Ptr);
}
for (const auto &H : InUseBlocks)
Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
}
uptr canCache(uptr Size) { return Cache.canCache(Size); }
@ -426,8 +294,6 @@ public:
void releaseToOS() { Cache.releaseToOS(); }
void disableMemoryTagging() { Cache.disableMemoryTagging(); }
private:
typename Config::SecondaryCache Cache;
@ -453,33 +319,26 @@ private:
// the committed memory will amount to something close to Size - AlignmentHint
// (pending rounding and headers).
template <typename Config>
void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
uptr *BlockEndPtr,
void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
uptr *BlockEnd,
FillContentsMode FillContents) {
Alignment = Max(Alignment, 1UL << SCUDO_MIN_ALIGNMENT_LOG);
DCHECK_GE(Size, AlignmentHint);
const uptr PageSize = getPageSizeCached();
uptr RoundedSize =
roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
Chunk::getHeaderSize(),
PageSize);
if (Alignment > PageSize)
RoundedSize += Alignment - PageSize;
const uptr RoundedSize =
roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
if (AlignmentHint < PageSize && Cache.canCache(RoundedSize)) {
LargeBlock::Header *H;
bool Zeroed;
if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
const uptr BlockEnd = H->CommitBase + H->CommitSize;
if (BlockEndPtr)
*BlockEndPtr = BlockEnd;
uptr PtrInt = reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize();
if (allocatorSupportsMemoryTagging<Config>())
PtrInt = untagPointer(PtrInt);
void *Ptr = reinterpret_cast<void *>(PtrInt);
if (Cache.retrieve(RoundedSize, &H, &Zeroed)) {
if (BlockEnd)
*BlockEnd = H->BlockEnd;
void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
LargeBlock::getHeaderSize());
if (FillContents && !Zeroed)
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
BlockEnd - PtrInt);
const uptr BlockSize = BlockEnd - reinterpret_cast<uptr>(H);
H->BlockEnd - reinterpret_cast<uptr>(Ptr));
const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
{
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
@ -494,8 +353,9 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
MapPlatformData Data = {};
const uptr MapSize = RoundedSize + 2 * PageSize;
uptr MapBase = reinterpret_cast<uptr>(
map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
uptr MapBase =
reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
if (UNLIKELY(!MapBase))
return nullptr;
uptr CommitBase = MapBase + PageSize;
@ -503,11 +363,11 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
// In the unlikely event of alignments larger than a page, adjust the amount
// of memory we want to commit, and trim the extra memory.
if (UNLIKELY(Alignment >= PageSize)) {
if (UNLIKELY(AlignmentHint >= PageSize)) {
// For alignments greater than or equal to a page, the user pointer (eg: the
// pointer that is returned by the C or C++ allocation APIs) ends up on a
// page boundary , and our headers will live in the preceding page.
CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
const uptr NewMapBase = CommitBase - PageSize;
DCHECK_GE(NewMapBase, MapBase);
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
@ -516,8 +376,9 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
MapBase = NewMapBase;
}
const uptr NewMapEnd =
CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
const uptr NewMapEnd = CommitBase + PageSize +
roundUpTo((Size - AlignmentHint), PageSize) +
PageSize;
DCHECK_LE(NewMapEnd, MapEnd);
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
@ -526,34 +387,16 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
}
const uptr CommitSize = MapEnd - PageSize - CommitBase;
const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
"scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG, &Data);
map(reinterpret_cast<void *>(UntaggedPos),
CommitBase + CommitSize - UntaggedPos, "scudo:secondary", MAP_RESIZABLE,
&Data);
} else {
map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0),
&Data);
}
const uptr HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(HeaderPos));
if (useMemoryTagging<Config>(Options))
storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
reinterpret_cast<uptr>(H + 1));
const uptr Ptr = reinterpret_cast<uptr>(
map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
MAP_RESIZABLE, &Data));
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
H->MapBase = MapBase;
H->MapSize = MapEnd - MapBase;
H->CommitBase = CommitBase;
H->CommitSize = CommitSize;
H->BlockEnd = CommitBase + CommitSize;
H->Data = Data;
if (BlockEndPtr)
*BlockEndPtr = CommitBase + CommitSize;
if (BlockEnd)
*BlockEnd = CommitBase + CommitSize;
{
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
@ -564,13 +407,13 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
Stats.add(StatAllocated, CommitSize);
Stats.add(StatMapped, H->MapSize);
}
return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
}
template <typename Config>
void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
const uptr CommitSize = H->CommitSize;
template <typename Config> void MapAllocator<Config>::deallocate(void *Ptr) {
LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
const uptr Block = reinterpret_cast<uptr>(H);
const uptr CommitSize = H->BlockEnd - Block;
{
ScopedLock L(Mutex);
InUseBlocks.remove(H);
@ -579,7 +422,12 @@ void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
Stats.sub(StatAllocated, CommitSize);
Stats.sub(StatMapped, H->MapSize);
}
Cache.store(Options, H);
if (Cache.canCache(CommitSize) && Cache.store(H))
return;
void *Addr = reinterpret_cast<void *>(H->MapBase);
const uptr Size = H->MapSize;
MapPlatformData Data = H->Data;
unmap(Addr, Size, UNMAP_ALL, &Data);
}
template <typename Config>

View File

@ -44,37 +44,39 @@ bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
return AllocatorT::PrimaryT::canAllocate(NeededSize);
}
template <class AllocatorT>
bool isTaggedAllocation(AllocatorT *Allocator, scudo::uptr Size,
scudo::uptr Alignment) {
return Allocator->useMemoryTaggingTestOnly() &&
scudo::systemDetectsMemoryTagFaultsTestOnly() &&
isPrimaryAllocation<AllocatorT>(Size, Alignment);
}
template <class AllocatorT>
void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
scudo::uptr Alignment) {
const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
Size = scudo::roundUpTo(Size, MinAlignment);
if (Allocator->useMemoryTaggingTestOnly())
EXPECT_DEATH(
{
disableDebuggerdMaybe();
reinterpret_cast<char *>(P)[-1] = 0xaa;
},
"");
if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
? Allocator->useMemoryTaggingTestOnly()
: Alignment == MinAlignment) {
EXPECT_DEATH(
{
disableDebuggerdMaybe();
reinterpret_cast<char *>(P)[Size] = 0xaa;
},
"");
}
if (!isTaggedAllocation(Allocator, Size, Alignment))
return;
Size = scudo::roundUpTo(Size, scudo::archMemoryTagGranuleSize());
EXPECT_DEATH(
{
disableDebuggerdMaybe();
reinterpret_cast<char *>(P)[-1] = 0xaa;
},
"");
EXPECT_DEATH(
{
disableDebuggerdMaybe();
reinterpret_cast<char *>(P)[Size] = 0xaa;
},
"");
}
template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
TestAllocator() {
this->reset();
this->initThreadMaybe();
if (scudo::archSupportsMemoryTagging() &&
!scudo::systemDetectsMemoryTagFaultsTestOnly())
this->disableMemoryTagging();
}
~TestAllocator() { this->unmapTestOnly(); }
};
@ -178,8 +180,8 @@ template <class Config> static void testAllocator() {
bool Found = false;
for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
void *P = Allocator->allocate(NeedleSize, Origin);
if (Allocator->getHeaderTaggedPointer(P) ==
Allocator->getHeaderTaggedPointer(NeedleP))
if (Allocator->untagPointerMaybe(P) ==
Allocator->untagPointerMaybe(NeedleP))
Found = true;
Allocator->deallocate(P, Origin);
}
@ -246,30 +248,38 @@ template <class Config> static void testAllocator() {
Allocator->releaseToOS();
// Check that use-after-free is detected.
for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
const scudo::uptr Size = 1U << SizeLog;
if (!Allocator->useMemoryTaggingTestOnly())
continue;
EXPECT_DEATH(
{
disableDebuggerdMaybe();
void *P = Allocator->allocate(Size, Origin);
Allocator->deallocate(P, Origin);
reinterpret_cast<char *>(P)[0] = 0xaa;
},
"");
EXPECT_DEATH(
{
disableDebuggerdMaybe();
void *P = Allocator->allocate(Size, Origin);
Allocator->deallocate(P, Origin);
reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
},
"");
}
if (Allocator->useMemoryTaggingTestOnly() &&
scudo::systemDetectsMemoryTagFaultsTestOnly()) {
// Check that use-after-free is detected.
for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
const scudo::uptr Size = 1U << SizeLog;
if (!isTaggedAllocation(Allocator.get(), Size, 1))
continue;
// UAF detection is probabilistic, so we repeat the test up to 256 times
// if necessary. With 15 possible tags this means a 1 in 15^256 chance of
// a false positive.
EXPECT_DEATH(
{
disableDebuggerdMaybe();
for (unsigned I = 0; I != 256; ++I) {
void *P = Allocator->allocate(Size, Origin);
Allocator->deallocate(P, Origin);
reinterpret_cast<char *>(P)[0] = 0xaa;
}
},
"");
EXPECT_DEATH(
{
disableDebuggerdMaybe();
for (unsigned I = 0; I != 256; ++I) {
void *P = Allocator->allocate(Size, Origin);
Allocator->deallocate(P, Origin);
reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
}
},
"");
}
if (Allocator->useMemoryTaggingTestOnly()) {
// Check that disabling memory tagging works correctly.
void *P = Allocator->allocate(2048, Origin);
EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
@ -279,7 +289,7 @@ template <class Config> static void testAllocator() {
Allocator->deallocate(P, Origin);
P = Allocator->allocate(2048, Origin);
EXPECT_EQ(scudo::untagPointer(P), P);
EXPECT_EQ(Allocator->untagPointerMaybe(P), P);
reinterpret_cast<char *>(P)[2048] = 0xaa;
Allocator->deallocate(P, Origin);

View File

@ -27,29 +27,29 @@ template <typename Config> static void testSecondaryBasic(void) {
std::unique_ptr<SecondaryT> L(new SecondaryT);
L->init(&S);
const scudo::uptr Size = 1U << 16;
void *P = L->allocate(scudo::Options{}, Size);
void *P = L->allocate(Size);
EXPECT_NE(P, nullptr);
memset(P, 'A', Size);
EXPECT_GE(SecondaryT::getBlockSize(P), Size);
L->deallocate(scudo::Options{}, P);
L->deallocate(P);
// If the Secondary can't cache that pointer, it will be unmapped.
if (!L->canCache(Size))
EXPECT_DEATH(memset(P, 'A', Size), "");
const scudo::uptr Align = 1U << 16;
P = L->allocate(scudo::Options{}, Size + Align, Align);
P = L->allocate(Size + Align, Align);
EXPECT_NE(P, nullptr);
void *AlignedP = reinterpret_cast<void *>(
scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
memset(AlignedP, 'A', Size);
L->deallocate(scudo::Options{}, P);
L->deallocate(P);
std::vector<void *> V;
for (scudo::uptr I = 0; I < 32U; I++)
V.push_back(L->allocate(scudo::Options{}, Size));
V.push_back(L->allocate(Size));
std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()()));
while (!V.empty()) {
L->deallocate(scudo::Options{}, V.back());
L->deallocate(V.back());
V.pop_back();
}
scudo::ScopedString Str(1024);
@ -59,14 +59,11 @@ template <typename Config> static void testSecondaryBasic(void) {
struct NoCacheConfig {
typedef scudo::MapAllocatorNoCache SecondaryCache;
static const bool MaySupportMemoryTagging = false;
};
struct TestConfig {
typedef scudo::MapAllocatorCache<TestConfig> SecondaryCache;
static const bool MaySupportMemoryTagging = false;
static const scudo::u32 SecondaryCacheEntriesArraySize = 128U;
static const scudo::u32 SecondaryCacheQuarantineSize = 0U;
static const scudo::u32 SecondaryCacheDefaultMaxEntriesCount = 64U;
static const scudo::uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 20;
static const scudo::s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
@ -100,12 +97,12 @@ TEST(ScudoSecondaryTest, SecondaryCombinations) {
scudo::roundUpTo((1U << SizeLog) + Delta, MinAlign);
const scudo::uptr Size =
HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
void *P = L->allocate(scudo::Options{}, Size, Align);
void *P = L->allocate(Size, Align);
EXPECT_NE(P, nullptr);
void *AlignedP = reinterpret_cast<void *>(
scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
memset(AlignedP, 0xff, UserSize);
L->deallocate(scudo::Options{}, P);
L->deallocate(P);
}
}
}
@ -120,7 +117,7 @@ TEST(ScudoSecondaryTest, SecondaryIterate) {
std::vector<void *> V;
const scudo::uptr PageSize = scudo::getPageSizeCached();
for (scudo::uptr I = 0; I < 32U; I++)
V.push_back(L->allocate(scudo::Options{}, (std::rand() % 16) * PageSize));
V.push_back(L->allocate((std::rand() % 16) * PageSize));
auto Lambda = [V](scudo::uptr Block) {
EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
V.end());
@ -129,7 +126,7 @@ TEST(ScudoSecondaryTest, SecondaryIterate) {
L->iterateOverBlocks(Lambda);
L->enable();
while (!V.empty()) {
L->deallocate(scudo::Options{}, V.back());
L->deallocate(V.back());
V.pop_back();
}
scudo::ScopedString Str(1024);
@ -175,14 +172,14 @@ static void performAllocations(LargeAllocator *L) {
for (scudo::uptr I = 0; I < 128U; I++) {
// Deallocate 75% of the blocks.
const bool Deallocate = (rand() & 3) != 0;
void *P = L->allocate(scudo::Options{}, (std::rand() % 16) * PageSize);
void *P = L->allocate((std::rand() % 16) * PageSize);
if (Deallocate)
L->deallocate(scudo::Options{}, P);
L->deallocate(P);
else
V.push_back(P);
}
while (!V.empty()) {
L->deallocate(scudo::Options{}, V.back());
L->deallocate(V.back());
V.pop_back();
}
}

View File

@ -36,13 +36,6 @@ template <class Allocator> struct TSDRegistryExT {
initLinkerInitialized(Instance);
}
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
initLinkerInitialized(Instance); // Sets Initialized.
}
void unmapTestOnly() {}
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
@ -87,6 +80,13 @@ template <class Allocator> struct TSDRegistryExT {
bool getDisableMemInit() { return State.DisableMemInit; }
private:
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
initLinkerInitialized(Instance); // Sets Initialized.
}
// Using minimal initialization allows for global initialization while keeping
// the thread specific structure untouched. The fallback structure will be
// used instead.

View File

@ -38,13 +38,6 @@ struct TSDRegistrySharedT {
initLinkerInitialized(Instance);
}
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
initLinkerInitialized(Instance); // Sets Initialized.
}
void unmapTestOnly() { setCurrentTSD(nullptr); }
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
@ -146,6 +139,13 @@ private:
*getTlsPtr() |= B;
}
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
initLinkerInitialized(Instance); // Sets Initialized.
}
NOINLINE void initThread(Allocator *Instance) {
initOnceMaybe(Instance);
// Initial context assignment is done in a plain round-robin fashion.