forked from OSchip/llvm-project
[Sanitizers] 64 bit allocator respects allocator_may_return_null flag
Summary: Make SizeClassAllocator64 return nullptr when it encounters OOM, which allows the entire sanitizer's allocator to follow allocator_may_return_null=1 policy (LargeMmapAllocator: D34243, SizeClassAllocator64: D34433). Reviewers: eugenis Subscribers: srhines, kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34540 llvm-svn: 306342
This commit is contained in:
parent
76bf48d932
commit
01676883cd
|
@ -46,8 +46,10 @@ struct SizeClassAllocator64LocalCache {
|
|||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(c, allocator, class_id);
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(c, allocator, class_id)))
|
||||
return nullptr;
|
||||
}
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
CHECK_GT(c->count, 0);
|
||||
CompactPtrT chunk = c->chunks[--c->count];
|
||||
|
@ -101,13 +103,15 @@ struct SizeClassAllocator64LocalCache {
|
|||
}
|
||||
}
|
||||
|
||||
NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
InitCache();
|
||||
uptr num_requested_chunks = c->max_count / 2;
|
||||
allocator->GetFromAllocator(&stats_, class_id, c->chunks,
|
||||
num_requested_chunks);
|
||||
if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
|
||||
num_requested_chunks)))
|
||||
return false;
|
||||
c->count = num_requested_chunks;
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
|
||||
|
|
|
@ -80,7 +80,7 @@ class SizeClassAllocator64 {
|
|||
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
|
||||
}
|
||||
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
|
||||
MapWithCallback(SpaceEnd(), AdditionalSize());
|
||||
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
|
||||
}
|
||||
|
||||
s32 ReleaseToOSIntervalMs() const {
|
||||
|
@ -92,16 +92,6 @@ class SizeClassAllocator64 {
|
|||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void MapWithCallback(uptr beg, uptr size) {
|
||||
CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
|
||||
MapUnmapCallback().OnMap(beg, size);
|
||||
}
|
||||
|
||||
void UnmapWithCallback(uptr beg, uptr size) {
|
||||
MapUnmapCallback().OnUnmap(beg, size);
|
||||
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||
}
|
||||
|
||||
static bool CanAllocate(uptr size, uptr alignment) {
|
||||
return size <= SizeClassMap::kMaxSize &&
|
||||
alignment <= SizeClassMap::kMaxSize;
|
||||
|
@ -116,16 +106,20 @@ class SizeClassAllocator64 {
|
|||
BlockingMutexLock l(®ion->mutex);
|
||||
uptr old_num_chunks = region->num_freed_chunks;
|
||||
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
|
||||
EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);
|
||||
// Failure to allocate free array space while releasing memory is non
|
||||
// recoverable.
|
||||
if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
|
||||
new_num_freed_chunks)))
|
||||
DieOnFailure::OnOOM();
|
||||
for (uptr i = 0; i < n_chunks; i++)
|
||||
free_array[old_num_chunks + i] = chunks[i];
|
||||
region->num_freed_chunks = new_num_freed_chunks;
|
||||
region->n_freed += n_chunks;
|
||||
region->stats.n_freed += n_chunks;
|
||||
|
||||
MaybeReleaseToOS(class_id);
|
||||
}
|
||||
|
||||
NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
||||
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
||||
CompactPtrT *chunks, uptr n_chunks) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
|
@ -133,18 +127,19 @@ class SizeClassAllocator64 {
|
|||
|
||||
BlockingMutexLock l(®ion->mutex);
|
||||
if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
|
||||
PopulateFreeArray(stat, class_id, region,
|
||||
n_chunks - region->num_freed_chunks);
|
||||
if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
|
||||
n_chunks - region->num_freed_chunks)))
|
||||
return false;
|
||||
CHECK_GE(region->num_freed_chunks, n_chunks);
|
||||
}
|
||||
region->num_freed_chunks -= n_chunks;
|
||||
uptr base_idx = region->num_freed_chunks;
|
||||
for (uptr i = 0; i < n_chunks; i++)
|
||||
chunks[i] = free_array[base_idx + i];
|
||||
region->n_allocated += n_chunks;
|
||||
region->stats.n_allocated += n_chunks;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
uptr P = reinterpret_cast<uptr>(p);
|
||||
if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
|
||||
|
@ -211,7 +206,7 @@ class SizeClassAllocator64 {
|
|||
|
||||
// Test-only.
|
||||
void TestOnlyUnmap() {
|
||||
UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
|
||||
UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize());
|
||||
}
|
||||
|
||||
static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
|
||||
|
@ -224,15 +219,15 @@ class SizeClassAllocator64 {
|
|||
void PrintStats(uptr class_id, uptr rss) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
if (region->mapped_user == 0) return;
|
||||
uptr in_use = region->n_allocated - region->n_freed;
|
||||
uptr in_use = region->stats.n_allocated - region->stats.n_freed;
|
||||
uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
|
||||
Printf(
|
||||
" %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
|
||||
"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
|
||||
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd\n",
|
||||
class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
|
||||
region->n_allocated, region->n_freed, in_use,
|
||||
region->num_freed_chunks, avail_chunks, rss >> 10,
|
||||
region->rtoi.num_releases);
|
||||
region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
|
||||
region->mapped_user >> 10, region->stats.n_allocated,
|
||||
region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
|
||||
rss >> 10, region->rtoi.num_releases);
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
|
@ -242,8 +237,8 @@ class SizeClassAllocator64 {
|
|||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
total_mapped += region->mapped_user;
|
||||
n_allocated += region->n_allocated;
|
||||
n_freed += region->n_freed;
|
||||
n_allocated += region->stats.n_allocated;
|
||||
n_freed += region->stats.n_freed;
|
||||
}
|
||||
Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
|
||||
"remains %zd\n",
|
||||
|
@ -326,6 +321,11 @@ class SizeClassAllocator64 {
|
|||
|
||||
atomic_sint32_t release_to_os_interval_ms_;
|
||||
|
||||
struct Stats {
|
||||
uptr n_allocated;
|
||||
uptr n_freed;
|
||||
};
|
||||
|
||||
struct ReleaseToOsInfo {
|
||||
uptr n_freed_at_last_release;
|
||||
uptr num_releases;
|
||||
|
@ -341,7 +341,8 @@ class SizeClassAllocator64 {
|
|||
uptr mapped_user; // Bytes mapped for user memory.
|
||||
uptr mapped_meta; // Bytes mapped for metadata.
|
||||
u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
|
||||
uptr n_allocated, n_freed; // Just stats.
|
||||
bool exhausted; // Whether region is out of space for new chunks.
|
||||
Stats stats;
|
||||
ReleaseToOsInfo rtoi;
|
||||
};
|
||||
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
|
||||
|
@ -386,7 +387,26 @@ class SizeClassAllocator64 {
|
|||
kFreeArraySize);
|
||||
}
|
||||
|
||||
void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
|
||||
bool MapWithCallback(uptr beg, uptr size) {
|
||||
uptr mapped = reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(beg, size));
|
||||
if (!mapped)
|
||||
return false;
|
||||
CHECK_EQ(beg, mapped);
|
||||
MapUnmapCallback().OnMap(beg, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
void MapWithCallbackOrDie(uptr beg, uptr size) {
|
||||
CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
|
||||
MapUnmapCallback().OnMap(beg, size);
|
||||
}
|
||||
|
||||
void UnmapWithCallbackOrDie(uptr beg, uptr size) {
|
||||
MapUnmapCallback().OnUnmap(beg, size);
|
||||
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||
}
|
||||
|
||||
bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
|
||||
uptr num_freed_chunks) {
|
||||
uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
|
||||
if (region->mapped_free_array < needed_space) {
|
||||
|
@ -395,66 +415,87 @@ class SizeClassAllocator64 {
|
|||
uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
|
||||
region->mapped_free_array;
|
||||
uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
|
||||
MapWithCallback(current_map_end, new_map_size);
|
||||
if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size)))
|
||||
return false;
|
||||
region->mapped_free_array = new_mapped_free_array;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,
|
||||
NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
|
||||
RegionInfo *region, uptr requested_count) {
|
||||
// region->mutex is held.
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr beg_idx = region->allocated_user;
|
||||
uptr end_idx = beg_idx + requested_count * size;
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
if (end_idx > region->mapped_user) {
|
||||
const uptr size = ClassIdToSize(class_id);
|
||||
const uptr new_space_beg = region->allocated_user;
|
||||
const uptr new_space_end = new_space_beg + requested_count * size;
|
||||
const uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
|
||||
// Map more space for chunks, if necessary.
|
||||
if (new_space_end > region->mapped_user) {
|
||||
if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
|
||||
region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
|
||||
// Do the mmap for the user memory.
|
||||
uptr map_size = kUserMapSize;
|
||||
while (end_idx > region->mapped_user + map_size)
|
||||
while (new_space_end > region->mapped_user + map_size)
|
||||
map_size += kUserMapSize;
|
||||
CHECK_GE(region->mapped_user + map_size, end_idx);
|
||||
MapWithCallback(region_beg + region->mapped_user, map_size);
|
||||
CHECK_GE(region->mapped_user + map_size, new_space_end);
|
||||
if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
|
||||
map_size)))
|
||||
return false;
|
||||
stat->Add(AllocatorStatMapped, map_size);
|
||||
region->mapped_user += map_size;
|
||||
}
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
uptr total_count = (region->mapped_user - beg_idx) / size;
|
||||
uptr num_freed_chunks = region->num_freed_chunks;
|
||||
EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count);
|
||||
for (uptr i = 0; i < total_count; i++) {
|
||||
uptr chunk = beg_idx + i * size;
|
||||
free_array[num_freed_chunks + total_count - 1 - i] =
|
||||
PointerToCompactPtr(0, chunk);
|
||||
}
|
||||
if (kRandomShuffleChunks)
|
||||
RandomShuffle(&free_array[num_freed_chunks], total_count,
|
||||
®ion->rand_state);
|
||||
region->num_freed_chunks += total_count;
|
||||
region->allocated_user += total_count * size;
|
||||
CHECK_LE(region->allocated_user, region->mapped_user);
|
||||
const uptr new_chunks_count = (region->mapped_user - new_space_beg) / size;
|
||||
|
||||
region->allocated_meta += total_count * kMetadataSize;
|
||||
if (region->allocated_meta > region->mapped_meta) {
|
||||
uptr map_size = kMetaMapSize;
|
||||
while (region->allocated_meta > region->mapped_meta + map_size)
|
||||
map_size += kMetaMapSize;
|
||||
// Do the mmap for the metadata.
|
||||
CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
|
||||
MapWithCallback(GetMetadataEnd(region_beg) -
|
||||
region->mapped_meta - map_size, map_size);
|
||||
region->mapped_meta += map_size;
|
||||
}
|
||||
CHECK_LE(region->allocated_meta, region->mapped_meta);
|
||||
if (region->mapped_user + region->mapped_meta >
|
||||
// Calculate the required space for metadata.
|
||||
const uptr requested_allocated_meta =
|
||||
region->allocated_meta + new_chunks_count * kMetadataSize;
|
||||
uptr requested_mapped_meta = region->mapped_meta;
|
||||
while (requested_allocated_meta > requested_mapped_meta)
|
||||
requested_mapped_meta += kMetaMapSize;
|
||||
// Check whether this size class is exhausted.
|
||||
if (region->mapped_user + requested_mapped_meta >
|
||||
kRegionSize - kFreeArraySize) {
|
||||
Printf("%s: Out of memory. Dying. ", SanitizerToolName);
|
||||
if (!region->exhausted) {
|
||||
region->exhausted = true;
|
||||
Printf("%s: Out of memory. ", SanitizerToolName);
|
||||
Printf("The process has exhausted %zuMB for size class %zu.\n",
|
||||
kRegionSize / 1024 / 1024, size);
|
||||
Die();
|
||||
kRegionSize >> 20, size);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// Map more space for metadata, if necessary.
|
||||
if (requested_mapped_meta > region->mapped_meta) {
|
||||
if (UNLIKELY(!MapWithCallback(
|
||||
GetMetadataEnd(region_beg) - requested_mapped_meta,
|
||||
requested_mapped_meta - region->mapped_meta)))
|
||||
return false;
|
||||
region->mapped_meta = requested_mapped_meta;
|
||||
}
|
||||
|
||||
// If necessary, allocate more space for the free array and populate it with
|
||||
// newly allocated chunks.
|
||||
const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
|
||||
if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
|
||||
return false;
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
for (uptr i = 0, chunk = new_space_beg; i < new_chunks_count;
|
||||
i++, chunk += size)
|
||||
free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
|
||||
if (kRandomShuffleChunks)
|
||||
RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
|
||||
®ion->rand_state);
|
||||
|
||||
// All necessary memory is mapped and now it is safe to advance all
|
||||
// 'allocated_*' counters.
|
||||
region->num_freed_chunks += new_chunks_count;
|
||||
region->allocated_user += new_chunks_count * size;
|
||||
CHECK_LE(region->allocated_user, region->mapped_user);
|
||||
region->allocated_meta = requested_allocated_meta;
|
||||
CHECK_LE(region->allocated_meta, region->mapped_meta);
|
||||
region->exhausted = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
|
||||
|
@ -478,8 +519,8 @@ class SizeClassAllocator64 {
|
|||
uptr n = region->num_freed_chunks;
|
||||
if (n * chunk_size < page_size)
|
||||
return; // No chance to release anything.
|
||||
if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size <
|
||||
page_size) {
|
||||
if ((region->stats.n_freed -
|
||||
region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
|
||||
return; // Nothing new to release.
|
||||
}
|
||||
|
||||
|
@ -508,7 +549,7 @@ class SizeClassAllocator64 {
|
|||
CHECK_GT(chunk - prev, scaled_chunk_size);
|
||||
if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) {
|
||||
MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev);
|
||||
region->rtoi.n_freed_at_last_release = region->n_freed;
|
||||
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
|
||||
region->rtoi.num_releases++;
|
||||
}
|
||||
range_beg = chunk;
|
||||
|
@ -517,5 +558,3 @@ class SizeClassAllocator64 {
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -92,6 +92,9 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
|
|||
const char *name = nullptr);
|
||||
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
|
||||
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
||||
// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
|
||||
// that case returns nullptr.
|
||||
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size);
|
||||
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||
void *MmapNoAccess(uptr size);
|
||||
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||
|
|
|
@ -198,7 +198,7 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
|
|||
return (void *)p;
|
||||
}
|
||||
|
||||
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
|
||||
void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
|
||||
RoundUpTo(size, PageSize),
|
||||
|
@ -207,6 +207,8 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
|
|||
-1, 0);
|
||||
int reserrno;
|
||||
if (internal_iserror(p, &reserrno)) {
|
||||
if (tolerate_enomem && reserrno == ENOMEM)
|
||||
return nullptr;
|
||||
char mem_type[30];
|
||||
internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
|
||||
fixed_addr);
|
||||
|
@ -216,6 +218,14 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
|
|||
return (void *)p;
|
||||
}
|
||||
|
||||
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
|
||||
return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/);
|
||||
}
|
||||
|
||||
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
|
||||
return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/);
|
||||
}
|
||||
|
||||
bool MprotectNoAccess(uptr addr, uptr size) {
|
||||
return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
|
||||
}
|
||||
|
|
|
@ -235,6 +235,18 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
|
|||
return p;
|
||||
}
|
||||
|
||||
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
|
||||
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
|
||||
MEM_COMMIT, PAGE_READWRITE);
|
||||
if (p == 0) {
|
||||
char mem_type[30];
|
||||
internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
|
||||
fixed_addr);
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
|
||||
// FIXME: make this really NoReserve?
|
||||
return MmapOrDie(size, mem_type);
|
||||
|
|
|
@ -436,29 +436,30 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
|
|||
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
|
||||
}
|
||||
|
||||
template<class Allocator>
|
||||
void FailInAssertionOnOOM() {
|
||||
Allocator a;
|
||||
a.Init(kReleaseToOSIntervalNever);
|
||||
SizeClassAllocatorLocalCache<Allocator> cache;
|
||||
memset(&cache, 0, sizeof(cache));
|
||||
cache.Init(0);
|
||||
AllocatorStats stats;
|
||||
stats.Init();
|
||||
const size_t kNumChunks = 128;
|
||||
uint32_t chunks[kNumChunks];
|
||||
for (int i = 0; i < 1000000; i++) {
|
||||
a.GetFromAllocator(&stats, 52, chunks, kNumChunks);
|
||||
}
|
||||
|
||||
a.TestOnlyUnmap();
|
||||
}
|
||||
|
||||
// Don't test OOM conditions on Win64 because it causes other tests on the same
|
||||
// machine to OOM.
|
||||
#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
|
||||
TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
|
||||
EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
|
||||
Allocator64 a;
|
||||
a.Init(kReleaseToOSIntervalNever);
|
||||
SizeClassAllocatorLocalCache<Allocator64> cache;
|
||||
memset(&cache, 0, sizeof(cache));
|
||||
cache.Init(0);
|
||||
AllocatorStats stats;
|
||||
stats.Init();
|
||||
|
||||
const size_t kNumChunks = 128;
|
||||
uint32_t chunks[kNumChunks];
|
||||
bool allocation_failed = false;
|
||||
for (int i = 0; i < 1000000; i++) {
|
||||
if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
|
||||
allocation_failed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(allocation_failed, true);
|
||||
|
||||
a.TestOnlyUnmap();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -970,9 +971,9 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
|
|||
const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
|
||||
ASSERT_LT(2 * kAllocationSize, kRegionSize);
|
||||
ASSERT_GT(3 * kAllocationSize, kRegionSize);
|
||||
cache.Allocate(a, kClassID);
|
||||
EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
|
||||
"The process has exhausted");
|
||||
EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
|
||||
EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
|
||||
EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
|
||||
|
||||
const uptr Class2 = 100;
|
||||
const uptr Size2 = SpecialSizeClassMap::Size(Class2);
|
||||
|
@ -980,11 +981,12 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
|
|||
char *p[7];
|
||||
for (int i = 0; i < 7; i++) {
|
||||
p[i] = (char*)cache.Allocate(a, Class2);
|
||||
EXPECT_NE(p[i], nullptr);
|
||||
fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
|
||||
p[i][Size2 - 1] = 42;
|
||||
if (i) ASSERT_LT(p[i - 1], p[i]);
|
||||
}
|
||||
EXPECT_DEATH(cache.Allocate(a, Class2), "The process has exhausted");
|
||||
EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
|
||||
cache.Deallocate(a, Class2, p[0]);
|
||||
cache.Drain(a);
|
||||
ASSERT_EQ(p[6][Size2 - 1], 42);
|
||||
|
|
Loading…
Reference in New Issue