forked from OSchip/llvm-project
[Sanitizers] 32 bit allocator respects allocator_may_return_null flag
Summary: Make SizeClassAllocator32 return nullptr when it encounters OOM, which allows the entire sanitizer's allocator to follow allocator_may_return_null=1 policy, even for small allocations (LargeMmapAllocator is already fixed by D34243). Will add a test for OOM in primary allocator later, when SizeClassAllocator64 can gracefully handle OOM too. Reviewers: eugenis Subscribers: kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34433 llvm-svn: 305972
This commit is contained in:
parent
fe6414b043
commit
f3cc7cc3d8
|
@ -144,8 +144,10 @@ struct SizeClassAllocator32LocalCache {
|
|||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(allocator, class_id);
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(allocator, class_id)))
|
||||
return nullptr;
|
||||
}
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
void *res = c->batch[--c->count];
|
||||
PREFETCH(c->batch[c->count - 1]);
|
||||
|
@ -227,14 +229,17 @@ struct SizeClassAllocator32LocalCache {
|
|||
Deallocate(allocator, batch_class_id, b);
|
||||
}
|
||||
|
||||
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
|
||||
if (UNLIKELY(!b))
|
||||
return false;
|
||||
CHECK_GT(b->Count(), 0);
|
||||
b->CopyToArray(c->batch);
|
||||
c->count = b->Count();
|
||||
DestroyBatch(class_id, allocator, b);
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
|
||||
|
@ -244,6 +249,10 @@ struct SizeClassAllocator32LocalCache {
|
|||
uptr first_idx_to_drain = c->count - cnt;
|
||||
TransferBatch *b = CreateBatch(
|
||||
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
|
||||
// Failure to allocate a batch while releasing memory is non recoverable.
|
||||
// TODO(alekseys): Figure out how to do it without allocating a new batch.
|
||||
if (UNLIKELY(!b))
|
||||
DieOnFailure::OnOOM();
|
||||
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
|
||||
&c->batch[first_idx_to_drain], cnt);
|
||||
c->count -= cnt;
|
||||
|
|
|
@ -24,7 +24,8 @@ template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
|
|||
// be returned by MmapOrDie().
|
||||
//
|
||||
// Region:
|
||||
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
|
||||
// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
|
||||
// kRegionSize).
|
||||
// Since the regions are aligned by kRegionSize, there are exactly
|
||||
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||
// a ByteMap possible_regions to store the size classes of each Region.
|
||||
|
@ -149,8 +150,9 @@ class SizeClassAllocator32 {
|
|||
CHECK_LT(class_id, kNumClasses);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
if (sci->free_list.empty())
|
||||
PopulateFreeList(stat, c, sci, class_id);
|
||||
if (sci->free_list.empty() &&
|
||||
UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
|
||||
return nullptr;
|
||||
CHECK(!sci->free_list.empty());
|
||||
TransferBatch *b = sci->free_list.front();
|
||||
sci->free_list.pop_front();
|
||||
|
@ -277,8 +279,10 @@ class SizeClassAllocator32 {
|
|||
|
||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
|
||||
"SizeClassAllocator32"));
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
|
||||
kRegionSize, kRegionSize, "SizeClassAllocator32"));
|
||||
if (UNLIKELY(!res))
|
||||
return 0;
|
||||
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||
stat->Add(AllocatorStatMapped, kRegionSize);
|
||||
CHECK_EQ(0U, (res & (kRegionSize - 1)));
|
||||
|
@ -291,16 +295,20 @@ class SizeClassAllocator32 {
|
|||
return &size_class_info_array[class_id];
|
||||
}
|
||||
|
||||
void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
SizeClassInfo *sci, uptr class_id) {
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr reg = AllocateRegion(stat, class_id);
|
||||
if (UNLIKELY(!reg))
|
||||
return false;
|
||||
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
uptr max_count = TransferBatch::MaxCached(class_id);
|
||||
TransferBatch *b = nullptr;
|
||||
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
|
||||
if (!b) {
|
||||
b = c->CreateBatch(class_id, this, (TransferBatch*)i);
|
||||
if (!b)
|
||||
return false;
|
||||
b->Clear();
|
||||
}
|
||||
b->Add((void*)i);
|
||||
|
@ -314,6 +322,7 @@ class SizeClassAllocator32 {
|
|||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_back(b);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
ByteMap possible_regions;
|
||||
|
|
|
@ -95,7 +95,9 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
|||
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||
void *MmapNoAccess(uptr size);
|
||||
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
|
||||
// Dies on all but out of memory errors, in the latter case returns nullptr.
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type);
|
||||
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
|
||||
// unaccessible memory.
|
||||
bool MprotectNoAccess(uptr addr, uptr size);
|
||||
|
|
|
@ -164,11 +164,14 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
|||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
// We do it by maping a bit more and then unmaping redundant pieces.
|
||||
// We probably can do it with fewer syscalls in some OS-dependent way.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = size + alignment;
|
||||
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
|
||||
uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
|
||||
if (!map_res)
|
||||
return nullptr;
|
||||
uptr map_end = map_res + map_size;
|
||||
uptr res = map_res;
|
||||
if (res & (alignment - 1)) // Not aligned.
|
||||
|
|
|
@ -131,18 +131,24 @@ void UnmapOrDie(void *addr, uptr size) {
|
|||
}
|
||||
}
|
||||
|
||||
static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
|
||||
const char *mmap_type) {
|
||||
error_t last_error = GetLastError();
|
||||
if (last_error == ERROR_NOT_ENOUGH_MEMORY)
|
||||
return nullptr;
|
||||
ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
|
||||
}
|
||||
|
||||
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
||||
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
if (rv == 0) {
|
||||
error_t last_error = GetLastError();
|
||||
if (last_error != ERROR_NOT_ENOUGH_MEMORY)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate", last_error);
|
||||
}
|
||||
if (rv == 0)
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
|
||||
return rv;
|
||||
}
|
||||
|
||||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
|
||||
|
@ -152,7 +158,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
|||
uptr mapped_addr =
|
||||
(uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
if (!mapped_addr)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
// If we got it right on the first try, return. Otherwise, unmap it and go to
|
||||
// the slow path.
|
||||
|
@ -172,8 +178,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
|||
mapped_addr =
|
||||
(uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (!mapped_addr)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
|
||||
GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
// Find the aligned address.
|
||||
uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
|
||||
|
@ -191,7 +196,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
|||
|
||||
// Fail if we can't make this work quickly.
|
||||
if (retries == kMaxRetries && mapped_addr == 0)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
return (void *)mapped_addr;
|
||||
}
|
||||
|
|
|
@ -72,12 +72,12 @@ TEST(SanitizerCommon, SortTest) {
|
|||
EXPECT_TRUE(IsSorted(array, 2));
|
||||
}
|
||||
|
||||
TEST(SanitizerCommon, MmapAlignedOrDie) {
|
||||
TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
for (uptr size = 1; size <= 32; size *= 2) {
|
||||
for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
|
||||
for (int iter = 0; iter < 100; iter++) {
|
||||
uptr res = (uptr)MmapAlignedOrDie(
|
||||
uptr res = (uptr)MmapAlignedOrDieOnFatalError(
|
||||
size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
|
||||
EXPECT_EQ(0U, res % (alignment * PageSize));
|
||||
internal_memset((void*)res, 1, size * PageSize);
|
||||
|
|
Loading…
Reference in New Issue