forked from OSchip/llvm-project
[Sanitizers] Move cached allocator_may_return_null flag to sanitizer_allocator
Summary: Move cached allocator_may_return_null flag to sanitizer_allocator.cc and provide API to consolidate and unify the behavior of all specific allocators. Make all sanitizers using CombinedAllocator to follow AllocatorReturnNullOrDieOnOOM() rules to behave the same way when OOM happens. When OOM happens, turn allocator_out_of_memory flag on regardless of allocator_may_return_null flag value (it used to not to be set when allocator_may_return_null == true). release_to_os_interval_ms and rss_limit_exceeded will likely be moved to sanitizer_allocator.cc too (later). Reviewers: eugenis Subscribers: srhines, kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34310 llvm-svn: 305858
This commit is contained in:
parent
91ef9de643
commit
ccab11b0e8
|
@ -266,7 +266,8 @@ struct Allocator {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(const AllocatorOptions &options) {
|
void Initialize(const AllocatorOptions &options) {
|
||||||
allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
|
SetAllocatorMayReturnNull(options.may_return_null);
|
||||||
|
allocator.Init(options.release_to_os_interval_ms);
|
||||||
SharedInitCode(options);
|
SharedInitCode(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +303,7 @@ struct Allocator {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReInitialize(const AllocatorOptions &options) {
|
void ReInitialize(const AllocatorOptions &options) {
|
||||||
allocator.SetMayReturnNull(options.may_return_null);
|
SetAllocatorMayReturnNull(options.may_return_null);
|
||||||
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
|
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
|
||||||
SharedInitCode(options);
|
SharedInitCode(options);
|
||||||
|
|
||||||
|
@ -323,7 +324,7 @@ struct Allocator {
|
||||||
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
|
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
|
||||||
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
|
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
|
||||||
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
|
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
|
||||||
options->may_return_null = allocator.MayReturnNull();
|
options->may_return_null = AllocatorMayReturnNull();
|
||||||
options->alloc_dealloc_mismatch =
|
options->alloc_dealloc_mismatch =
|
||||||
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
|
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
|
||||||
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
|
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
|
||||||
|
@ -374,7 +375,7 @@ struct Allocator {
|
||||||
if (UNLIKELY(!asan_inited))
|
if (UNLIKELY(!asan_inited))
|
||||||
AsanInitFromRtl();
|
AsanInitFromRtl();
|
||||||
if (RssLimitExceeded())
|
if (RssLimitExceeded())
|
||||||
return allocator.ReturnNullOrDieOnOOM();
|
return AsanAllocator::FailureHandler::OnOOM();
|
||||||
Flags &fl = *flags();
|
Flags &fl = *flags();
|
||||||
CHECK(stack);
|
CHECK(stack);
|
||||||
const uptr min_alignment = SHADOW_GRANULARITY;
|
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||||
|
@ -407,7 +408,7 @@ struct Allocator {
|
||||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||||
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||||
(void*)size);
|
(void*)size);
|
||||||
return allocator.ReturnNullOrDieOnBadRequest();
|
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||||
}
|
}
|
||||||
|
|
||||||
AsanThread *t = GetCurrentThread();
|
AsanThread *t = GetCurrentThread();
|
||||||
|
@ -420,8 +421,8 @@ struct Allocator {
|
||||||
AllocatorCache *cache = &fallback_allocator_cache;
|
AllocatorCache *cache = &fallback_allocator_cache;
|
||||||
allocated = allocator.Allocate(cache, needed_size, 8);
|
allocated = allocator.Allocate(cache, needed_size, 8);
|
||||||
}
|
}
|
||||||
|
if (!allocated)
|
||||||
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
|
return nullptr;
|
||||||
|
|
||||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
||||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||||
|
@ -632,7 +633,7 @@ struct Allocator {
|
||||||
|
|
||||||
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||||
return allocator.ReturnNullOrDieOnBadRequest();
|
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||||
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
||||||
// If the memory comes from the secondary allocator no need to clear it
|
// If the memory comes from the secondary allocator no need to clear it
|
||||||
// as it comes directly from mmap.
|
// as it comes directly from mmap.
|
||||||
|
|
|
@ -38,8 +38,8 @@ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||||
static Allocator allocator;
|
static Allocator allocator;
|
||||||
|
|
||||||
void InitializeAllocator() {
|
void InitializeAllocator() {
|
||||||
|
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||||
allocator.InitLinkerInitialized(
|
allocator.InitLinkerInitialized(
|
||||||
common_flags()->allocator_may_return_null,
|
|
||||||
common_flags()->allocator_release_to_os_interval_ms);
|
common_flags()->allocator_release_to_os_interval_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -119,9 +119,8 @@ static AllocatorCache fallback_allocator_cache;
|
||||||
static SpinMutex fallback_mutex;
|
static SpinMutex fallback_mutex;
|
||||||
|
|
||||||
void MsanAllocatorInit() {
|
void MsanAllocatorInit() {
|
||||||
allocator.Init(
|
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||||
common_flags()->allocator_may_return_null,
|
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
|
||||||
common_flags()->allocator_release_to_os_interval_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
|
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
|
||||||
|
@ -139,7 +138,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
|
||||||
if (size > kMaxAllowedMallocSize) {
|
if (size > kMaxAllowedMallocSize) {
|
||||||
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
|
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
|
||||||
(void *)size);
|
(void *)size);
|
||||||
return allocator.ReturnNullOrDieOnBadRequest();
|
return Allocator::FailureHandler::OnBadRequest();
|
||||||
}
|
}
|
||||||
MsanThread *t = GetCurrentThread();
|
MsanThread *t = GetCurrentThread();
|
||||||
void *allocated;
|
void *allocated;
|
||||||
|
@ -197,7 +196,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
|
||||||
|
|
||||||
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
|
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
|
||||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||||
return allocator.ReturnNullOrDieOnBadRequest();
|
return Allocator::FailureHandler::OnBadRequest();
|
||||||
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
|
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,8 +94,7 @@ InternalAllocator *internal_allocator() {
|
||||||
SpinMutexLock l(&internal_alloc_init_mu);
|
SpinMutexLock l(&internal_alloc_init_mu);
|
||||||
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
|
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
|
||||||
0) {
|
0) {
|
||||||
internal_allocator_instance->Init(
|
internal_allocator_instance->Init(kReleaseToOSIntervalNever);
|
||||||
/* may_return_null */ false, kReleaseToOSIntervalNever);
|
|
||||||
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
|
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
||||||
|
|
||||||
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
||||||
if (CallocShouldReturnNullDueToOverflow(count, size))
|
if (CallocShouldReturnNullDueToOverflow(count, size))
|
||||||
return internal_allocator()->ReturnNullOrDieOnBadRequest();
|
return InternalAllocator::FailureHandler::OnBadRequest();
|
||||||
void *p = InternalAlloc(count * size, cache);
|
void *p = InternalAlloc(count * size, cache);
|
||||||
if (p) internal_memset(p, 0, count * size);
|
if (p) internal_memset(p, 0, count * size);
|
||||||
return p;
|
return p;
|
||||||
|
@ -209,12 +208,15 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
|
||||||
return (max / size) < n;
|
return (max / size) < n;
|
||||||
}
|
}
|
||||||
|
|
||||||
static atomic_uint8_t reporting_out_of_memory = {0};
|
static atomic_uint8_t allocator_out_of_memory = {0};
|
||||||
|
static atomic_uint8_t allocator_may_return_null = {0};
|
||||||
|
|
||||||
bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
|
bool IsAllocatorOutOfMemory() {
|
||||||
|
return atomic_load_relaxed(&allocator_out_of_memory);
|
||||||
|
}
|
||||||
|
|
||||||
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
|
// Prints error message and kills the program.
|
||||||
if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
|
void NORETURN ReportAllocatorCannotReturnNull() {
|
||||||
Report("%s's allocator is terminating the process instead of returning 0\n",
|
Report("%s's allocator is terminating the process instead of returning 0\n",
|
||||||
SanitizerToolName);
|
SanitizerToolName);
|
||||||
Report("If you don't like this behavior set allocator_may_return_null=1\n");
|
Report("If you don't like this behavior set allocator_may_return_null=1\n");
|
||||||
|
@ -222,4 +224,35 @@ void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool AllocatorMayReturnNull() {
|
||||||
|
return atomic_load(&allocator_may_return_null, memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetAllocatorMayReturnNull(bool may_return_null) {
|
||||||
|
atomic_store(&allocator_may_return_null, may_return_null,
|
||||||
|
memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *ReturnNullOrDieOnFailure::OnBadRequest() {
|
||||||
|
if (AllocatorMayReturnNull())
|
||||||
|
return nullptr;
|
||||||
|
ReportAllocatorCannotReturnNull();
|
||||||
|
}
|
||||||
|
|
||||||
|
void *ReturnNullOrDieOnFailure::OnOOM() {
|
||||||
|
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||||
|
if (AllocatorMayReturnNull())
|
||||||
|
return nullptr;
|
||||||
|
ReportAllocatorCannotReturnNull();
|
||||||
|
}
|
||||||
|
|
||||||
|
void *DieOnFailure::OnBadRequest() {
|
||||||
|
ReportAllocatorCannotReturnNull();
|
||||||
|
}
|
||||||
|
|
||||||
|
void *DieOnFailure::OnOOM() {
|
||||||
|
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||||
|
ReportAllocatorCannotReturnNull();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
|
@ -24,12 +24,28 @@
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
// Returns true if ReportAllocatorCannotReturnNull(true) was called.
|
// Since flags are immutable and allocator behavior can be changed at runtime
|
||||||
// Can be use to avoid memory hungry operations.
|
// (unit tests or ASan on Android are some examples), allocator_may_return_null
|
||||||
bool IsReportingOOM();
|
// flag value is cached here and can be altered later.
|
||||||
|
bool AllocatorMayReturnNull();
|
||||||
|
void SetAllocatorMayReturnNull(bool may_return_null);
|
||||||
|
|
||||||
// Prints error message and kills the program.
|
// Allocator failure handling policies:
|
||||||
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
|
// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
|
||||||
|
// dies otherwise.
|
||||||
|
struct ReturnNullOrDieOnFailure {
|
||||||
|
static void *OnBadRequest();
|
||||||
|
static void *OnOOM();
|
||||||
|
};
|
||||||
|
// Always dies on the failure.
|
||||||
|
struct DieOnFailure {
|
||||||
|
static void *OnBadRequest();
|
||||||
|
static void *OnOOM();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Returns true if allocator detected OOM condition. Can be used to avoid memory
|
||||||
|
// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
|
||||||
|
bool IsAllocatorOutOfMemory();
|
||||||
|
|
||||||
// Allocators call these callbacks on mmap/munmap.
|
// Allocators call these callbacks on mmap/munmap.
|
||||||
struct NoOpMapUnmapCallback {
|
struct NoOpMapUnmapCallback {
|
||||||
|
|
|
@ -24,22 +24,18 @@ template <class PrimaryAllocator, class AllocatorCache,
|
||||||
class SecondaryAllocator> // NOLINT
|
class SecondaryAllocator> // NOLINT
|
||||||
class CombinedAllocator {
|
class CombinedAllocator {
|
||||||
public:
|
public:
|
||||||
void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
|
typedef typename SecondaryAllocator::FailureHandler FailureHandler;
|
||||||
|
|
||||||
|
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
|
||||||
primary_.Init(release_to_os_interval_ms);
|
primary_.Init(release_to_os_interval_ms);
|
||||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
secondary_.InitLinkerInitialized();
|
||||||
}
|
|
||||||
|
|
||||||
void InitLinkerInitialized(
|
|
||||||
bool may_return_null, s32 release_to_os_interval_ms) {
|
|
||||||
secondary_.InitLinkerInitialized(may_return_null);
|
|
||||||
stats_.InitLinkerInitialized();
|
stats_.InitLinkerInitialized();
|
||||||
InitCommon(may_return_null, release_to_os_interval_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Init(bool may_return_null, s32 release_to_os_interval_ms) {
|
void Init(s32 release_to_os_interval_ms) {
|
||||||
secondary_.Init(may_return_null);
|
primary_.Init(release_to_os_interval_ms);
|
||||||
|
secondary_.Init();
|
||||||
stats_.Init();
|
stats_.Init();
|
||||||
InitCommon(may_return_null, release_to_os_interval_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
|
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
|
||||||
|
@ -47,7 +43,7 @@ class CombinedAllocator {
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
size = 1;
|
size = 1;
|
||||||
if (size + alignment < size)
|
if (size + alignment < size)
|
||||||
return ReturnNullOrDieOnBadRequest();
|
return FailureHandler::OnBadRequest();
|
||||||
uptr original_size = size;
|
uptr original_size = size;
|
||||||
// If alignment requirements are to be fulfilled by the frontend allocator
|
// If alignment requirements are to be fulfilled by the frontend allocator
|
||||||
// rather than by the primary or secondary, passing an alignment lower than
|
// rather than by the primary or secondary, passing an alignment lower than
|
||||||
|
@ -55,44 +51,24 @@ class CombinedAllocator {
|
||||||
// alignment check.
|
// alignment check.
|
||||||
if (alignment > 8)
|
if (alignment > 8)
|
||||||
size = RoundUpTo(size, alignment);
|
size = RoundUpTo(size, alignment);
|
||||||
void *res;
|
|
||||||
bool from_primary = primary_.CanAllocate(size, alignment);
|
|
||||||
// The primary allocator should return a 2^x aligned allocation when
|
// The primary allocator should return a 2^x aligned allocation when
|
||||||
// requested 2^x bytes, hence using the rounded up 'size' when being
|
// requested 2^x bytes, hence using the rounded up 'size' when being
|
||||||
// serviced by the primary (this is no longer true when the primary is
|
// serviced by the primary (this is no longer true when the primary is
|
||||||
// using a non-fixed base address). The secondary takes care of the
|
// using a non-fixed base address). The secondary takes care of the
|
||||||
// alignment without such requirement, and allocating 'size' would use
|
// alignment without such requirement, and allocating 'size' would use
|
||||||
// extraneous memory, so we employ 'original_size'.
|
// extraneous memory, so we employ 'original_size'.
|
||||||
if (from_primary)
|
void *res;
|
||||||
|
if (primary_.CanAllocate(size, alignment))
|
||||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||||
else
|
else
|
||||||
res = secondary_.Allocate(&stats_, original_size, alignment);
|
res = secondary_.Allocate(&stats_, original_size, alignment);
|
||||||
|
if (!res)
|
||||||
|
return FailureHandler::OnOOM();
|
||||||
if (alignment > 8)
|
if (alignment > 8)
|
||||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MayReturnNull() const {
|
|
||||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *ReturnNullOrDieOnBadRequest() {
|
|
||||||
if (MayReturnNull())
|
|
||||||
return nullptr;
|
|
||||||
ReportAllocatorCannotReturnNull(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *ReturnNullOrDieOnOOM() {
|
|
||||||
if (MayReturnNull())
|
|
||||||
return nullptr;
|
|
||||||
ReportAllocatorCannotReturnNull(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetMayReturnNull(bool may_return_null) {
|
|
||||||
secondary_.SetMayReturnNull(may_return_null);
|
|
||||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
s32 ReleaseToOSIntervalMs() const {
|
s32 ReleaseToOSIntervalMs() const {
|
||||||
return primary_.ReleaseToOSIntervalMs();
|
return primary_.ReleaseToOSIntervalMs();
|
||||||
}
|
}
|
||||||
|
@ -213,6 +189,5 @@ class CombinedAllocator {
|
||||||
PrimaryAllocator primary_;
|
PrimaryAllocator primary_;
|
||||||
SecondaryAllocator secondary_;
|
SecondaryAllocator secondary_;
|
||||||
AllocatorGlobalStats stats_;
|
AllocatorGlobalStats stats_;
|
||||||
atomic_uint8_t may_return_null_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,8 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
|
||||||
InternalAllocatorCache;
|
InternalAllocatorCache;
|
||||||
|
|
||||||
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
|
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
|
||||||
LargeMmapAllocator<> > InternalAllocator;
|
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
|
||||||
|
> InternalAllocator;
|
||||||
|
|
||||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
|
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
|
||||||
uptr alignment = 0);
|
uptr alignment = 0);
|
||||||
|
|
|
@ -17,17 +17,19 @@
|
||||||
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
||||||
// The main purpose of this allocator is to cover large and rare allocation
|
// The main purpose of this allocator is to cover large and rare allocation
|
||||||
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
||||||
template <class MapUnmapCallback = NoOpMapUnmapCallback>
|
template <class MapUnmapCallback = NoOpMapUnmapCallback,
|
||||||
|
class FailureHandlerT = ReturnNullOrDieOnFailure>
|
||||||
class LargeMmapAllocator {
|
class LargeMmapAllocator {
|
||||||
public:
|
public:
|
||||||
void InitLinkerInitialized(bool may_return_null) {
|
typedef FailureHandlerT FailureHandler;
|
||||||
|
|
||||||
|
void InitLinkerInitialized() {
|
||||||
page_size_ = GetPageSizeCached();
|
page_size_ = GetPageSizeCached();
|
||||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Init(bool may_return_null) {
|
void Init() {
|
||||||
internal_memset(this, 0, sizeof(*this));
|
internal_memset(this, 0, sizeof(*this));
|
||||||
InitLinkerInitialized(may_return_null);
|
InitLinkerInitialized();
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
||||||
|
@ -37,11 +39,11 @@ class LargeMmapAllocator {
|
||||||
map_size += alignment;
|
map_size += alignment;
|
||||||
// Overflow.
|
// Overflow.
|
||||||
if (map_size < size)
|
if (map_size < size)
|
||||||
return ReturnNullOrDieOnBadRequest();
|
return FailureHandler::OnBadRequest();
|
||||||
uptr map_beg = reinterpret_cast<uptr>(
|
uptr map_beg = reinterpret_cast<uptr>(
|
||||||
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
|
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
|
||||||
if (!map_beg)
|
if (!map_beg)
|
||||||
return ReturnNullOrDieOnOOM();
|
return FailureHandler::OnOOM();
|
||||||
CHECK(IsAligned(map_beg, page_size_));
|
CHECK(IsAligned(map_beg, page_size_));
|
||||||
MapUnmapCallback().OnMap(map_beg, map_size);
|
MapUnmapCallback().OnMap(map_beg, map_size);
|
||||||
uptr map_end = map_beg + map_size;
|
uptr map_end = map_beg + map_size;
|
||||||
|
@ -75,24 +77,6 @@ class LargeMmapAllocator {
|
||||||
return reinterpret_cast<void*>(res);
|
return reinterpret_cast<void*>(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MayReturnNull() const {
|
|
||||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *ReturnNullOrDieOnBadRequest() {
|
|
||||||
if (MayReturnNull()) return nullptr;
|
|
||||||
ReportAllocatorCannotReturnNull(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *ReturnNullOrDieOnOOM() {
|
|
||||||
if (MayReturnNull()) return nullptr;
|
|
||||||
ReportAllocatorCannotReturnNull(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetMayReturnNull(bool may_return_null) {
|
|
||||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Deallocate(AllocatorStats *stat, void *p) {
|
void Deallocate(AllocatorStats *stat, void *p) {
|
||||||
Header *h = GetHeader(p);
|
Header *h = GetHeader(p);
|
||||||
{
|
{
|
||||||
|
@ -278,7 +262,6 @@ class LargeMmapAllocator {
|
||||||
struct Stats {
|
struct Stats {
|
||||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||||
} stats;
|
} stats;
|
||||||
atomic_uint8_t may_return_null_;
|
|
||||||
SpinMutex mutex_;
|
SpinMutex mutex_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -495,7 +495,7 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
|
||||||
VReport(2, "Symbolizer is disabled.\n");
|
VReport(2, "Symbolizer is disabled.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (IsReportingOOM()) {
|
if (IsAllocatorOutOfMemory()) {
|
||||||
VReport(2, "Cannot use internal symbolizer: out of memory\n");
|
VReport(2, "Cannot use internal symbolizer: out of memory\n");
|
||||||
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
|
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
|
||||||
VReport(2, "Using internal symbolizer.\n");
|
VReport(2, "Using internal symbolizer.\n");
|
||||||
|
|
|
@ -426,8 +426,8 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
|
||||||
TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
|
TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
|
||||||
TestMapUnmapCallback::map_count = 0;
|
TestMapUnmapCallback::map_count = 0;
|
||||||
TestMapUnmapCallback::unmap_count = 0;
|
TestMapUnmapCallback::unmap_count = 0;
|
||||||
LargeMmapAllocator<TestMapUnmapCallback> a;
|
LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a;
|
||||||
a.Init(/* may_return_null */ false);
|
a.Init();
|
||||||
AllocatorStats stats;
|
AllocatorStats stats;
|
||||||
stats.Init();
|
stats.Init();
|
||||||
void *x = a.Allocate(&stats, 1 << 20, 1);
|
void *x = a.Allocate(&stats, 1 << 20, 1);
|
||||||
|
@ -463,8 +463,8 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
TEST(SanitizerCommon, LargeMmapAllocator) {
|
TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
LargeMmapAllocator<> a;
|
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
|
||||||
a.Init(/* may_return_null */ false);
|
a.Init();
|
||||||
AllocatorStats stats;
|
AllocatorStats stats;
|
||||||
stats.Init();
|
stats.Init();
|
||||||
|
|
||||||
|
@ -546,8 +546,9 @@ void TestCombinedAllocator() {
|
||||||
typedef
|
typedef
|
||||||
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
|
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
|
||||||
Allocator;
|
Allocator;
|
||||||
|
SetAllocatorMayReturnNull(true);
|
||||||
Allocator *a = new Allocator;
|
Allocator *a = new Allocator;
|
||||||
a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever);
|
a->Init(kReleaseToOSIntervalNever);
|
||||||
std::mt19937 r;
|
std::mt19937 r;
|
||||||
|
|
||||||
AllocatorCache cache;
|
AllocatorCache cache;
|
||||||
|
@ -561,7 +562,7 @@ void TestCombinedAllocator() {
|
||||||
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
|
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
|
||||||
|
|
||||||
// Set to false
|
// Set to false
|
||||||
a->SetMayReturnNull(false);
|
SetAllocatorMayReturnNull(false);
|
||||||
EXPECT_DEATH(a->Allocate(&cache, -1, 1),
|
EXPECT_DEATH(a->Allocate(&cache, -1, 1),
|
||||||
"allocator is terminating the process");
|
"allocator is terminating the process");
|
||||||
|
|
||||||
|
@ -873,8 +874,8 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
|
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
|
||||||
LargeMmapAllocator<> a;
|
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
|
||||||
a.Init(/* may_return_null */ false);
|
a.Init();
|
||||||
AllocatorStats stats;
|
AllocatorStats stats;
|
||||||
stats.Init();
|
stats.Init();
|
||||||
|
|
||||||
|
@ -900,8 +901,8 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
|
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
|
||||||
LargeMmapAllocator<> a;
|
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
|
||||||
a.Init(/* may_return_null */ false);
|
a.Init();
|
||||||
AllocatorStats stats;
|
AllocatorStats stats;
|
||||||
stats.Init();
|
stats.Init();
|
||||||
|
|
||||||
|
|
|
@ -273,6 +273,8 @@ struct ScudoAllocator {
|
||||||
static const uptr MaxAllowedMallocSize =
|
static const uptr MaxAllowedMallocSize =
|
||||||
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
|
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
|
||||||
|
|
||||||
|
typedef ReturnNullOrDieOnFailure FailureHandler;
|
||||||
|
|
||||||
ScudoBackendAllocator BackendAllocator;
|
ScudoBackendAllocator BackendAllocator;
|
||||||
ScudoQuarantine AllocatorQuarantine;
|
ScudoQuarantine AllocatorQuarantine;
|
||||||
|
|
||||||
|
@ -326,7 +328,8 @@ struct ScudoAllocator {
|
||||||
DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
|
DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
|
||||||
DeleteSizeMismatch = Options.DeleteSizeMismatch;
|
DeleteSizeMismatch = Options.DeleteSizeMismatch;
|
||||||
ZeroContents = Options.ZeroContents;
|
ZeroContents = Options.ZeroContents;
|
||||||
BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
|
SetAllocatorMayReturnNull(Options.MayReturnNull);
|
||||||
|
BackendAllocator.Init(Options.ReleaseToOSIntervalMs);
|
||||||
AllocatorQuarantine.Init(
|
AllocatorQuarantine.Init(
|
||||||
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
|
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
|
||||||
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
|
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
|
||||||
|
@ -354,11 +357,11 @@ struct ScudoAllocator {
|
||||||
dieWithMessage("ERROR: alignment is not a power of 2\n");
|
dieWithMessage("ERROR: alignment is not a power of 2\n");
|
||||||
}
|
}
|
||||||
if (Alignment > MaxAlignment)
|
if (Alignment > MaxAlignment)
|
||||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
return FailureHandler::OnBadRequest();
|
||||||
if (Alignment < MinAlignment)
|
if (Alignment < MinAlignment)
|
||||||
Alignment = MinAlignment;
|
Alignment = MinAlignment;
|
||||||
if (Size >= MaxAllowedMallocSize)
|
if (Size >= MaxAllowedMallocSize)
|
||||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
return FailureHandler::OnBadRequest();
|
||||||
if (Size == 0)
|
if (Size == 0)
|
||||||
Size = 1;
|
Size = 1;
|
||||||
|
|
||||||
|
@ -366,7 +369,7 @@ struct ScudoAllocator {
|
||||||
uptr AlignedSize = (Alignment > MinAlignment) ?
|
uptr AlignedSize = (Alignment > MinAlignment) ?
|
||||||
NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
|
NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
|
||||||
if (AlignedSize >= MaxAllowedMallocSize)
|
if (AlignedSize >= MaxAllowedMallocSize)
|
||||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
return FailureHandler::OnBadRequest();
|
||||||
|
|
||||||
// Primary and Secondary backed allocations have a different treatment. We
|
// Primary and Secondary backed allocations have a different treatment. We
|
||||||
// deal with alignment requirements of Primary serviced allocations here,
|
// deal with alignment requirements of Primary serviced allocations here,
|
||||||
|
@ -391,7 +394,7 @@ struct ScudoAllocator {
|
||||||
AllocationAlignment, FromPrimary);
|
AllocationAlignment, FromPrimary);
|
||||||
}
|
}
|
||||||
if (!Ptr)
|
if (!Ptr)
|
||||||
return BackendAllocator.ReturnNullOrDieOnOOM();
|
return FailureHandler::OnOOM();
|
||||||
|
|
||||||
// If requested, we will zero out the entire contents of the returned chunk.
|
// If requested, we will zero out the entire contents of the returned chunk.
|
||||||
if ((ForceZeroContents || ZeroContents) && FromPrimary)
|
if ((ForceZeroContents || ZeroContents) && FromPrimary)
|
||||||
|
@ -583,7 +586,7 @@ struct ScudoAllocator {
|
||||||
initThreadMaybe();
|
initThreadMaybe();
|
||||||
uptr Total = NMemB * Size;
|
uptr Total = NMemB * Size;
|
||||||
if (Size != 0 && Total / Size != NMemB) // Overflow check
|
if (Size != 0 && Total / Size != NMemB) // Overflow check
|
||||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
return FailureHandler::OnBadRequest();
|
||||||
return allocate(Total, MinAlignment, FromMalloc, true);
|
return allocate(Total, MinAlignment, FromMalloc, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,11 +23,10 @@ template <class PrimaryAllocator, class AllocatorCache,
|
||||||
class SecondaryAllocator>
|
class SecondaryAllocator>
|
||||||
class ScudoCombinedAllocator {
|
class ScudoCombinedAllocator {
|
||||||
public:
|
public:
|
||||||
void Init(bool AllocatorMayReturnNull, s32 ReleaseToOSIntervalMs) {
|
void Init(s32 ReleaseToOSIntervalMs) {
|
||||||
Primary.Init(ReleaseToOSIntervalMs);
|
Primary.Init(ReleaseToOSIntervalMs);
|
||||||
Secondary.Init(AllocatorMayReturnNull);
|
Secondary.Init();
|
||||||
Stats.Init();
|
Stats.Init();
|
||||||
atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment,
|
void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment,
|
||||||
|
@ -37,18 +36,6 @@ class ScudoCombinedAllocator {
|
||||||
return Secondary.Allocate(&Stats, Size, Alignment);
|
return Secondary.Allocate(&Stats, Size, Alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ReturnNullOrDieOnBadRequest() {
|
|
||||||
if (atomic_load_relaxed(&MayReturnNull))
|
|
||||||
return nullptr;
|
|
||||||
ReportAllocatorCannotReturnNull(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *ReturnNullOrDieOnOOM() {
|
|
||||||
if (atomic_load_relaxed(&MayReturnNull))
|
|
||||||
return nullptr;
|
|
||||||
ReportAllocatorCannotReturnNull(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) {
|
void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) {
|
||||||
if (FromPrimary)
|
if (FromPrimary)
|
||||||
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
|
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
|
||||||
|
@ -78,7 +65,6 @@ class ScudoCombinedAllocator {
|
||||||
PrimaryAllocator Primary;
|
PrimaryAllocator Primary;
|
||||||
SecondaryAllocator Secondary;
|
SecondaryAllocator Secondary;
|
||||||
AllocatorGlobalStats Stats;
|
AllocatorGlobalStats Stats;
|
||||||
atomic_uint8_t MayReturnNull;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SCUDO_ALLOCATOR_COMBINED_H_
|
#endif // SCUDO_ALLOCATOR_COMBINED_H_
|
||||||
|
|
|
@ -24,9 +24,8 @@
|
||||||
class ScudoLargeMmapAllocator {
|
class ScudoLargeMmapAllocator {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
void Init(bool AllocatorMayReturnNull) {
|
void Init() {
|
||||||
PageSize = GetPageSizeCached();
|
PageSize = GetPageSizeCached();
|
||||||
atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
||||||
|
@ -42,7 +41,7 @@ class ScudoLargeMmapAllocator {
|
||||||
|
|
||||||
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
|
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
|
||||||
if (MapBeg == ~static_cast<uptr>(0))
|
if (MapBeg == ~static_cast<uptr>(0))
|
||||||
return ReturnNullOrDieOnOOM();
|
return ReturnNullOrDieOnFailure::OnOOM();
|
||||||
// A page-aligned pointer is assumed after that, so check it now.
|
// A page-aligned pointer is assumed after that, so check it now.
|
||||||
CHECK(IsAligned(MapBeg, PageSize));
|
CHECK(IsAligned(MapBeg, PageSize));
|
||||||
uptr MapEnd = MapBeg + MapSize;
|
uptr MapEnd = MapBeg + MapSize;
|
||||||
|
@ -96,12 +95,6 @@ class ScudoLargeMmapAllocator {
|
||||||
return reinterpret_cast<void *>(Ptr);
|
return reinterpret_cast<void *>(Ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ReturnNullOrDieOnOOM() {
|
|
||||||
if (atomic_load_relaxed(&MayReturnNull))
|
|
||||||
return nullptr;
|
|
||||||
ReportAllocatorCannotReturnNull(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Deallocate(AllocatorStats *Stats, void *Ptr) {
|
void Deallocate(AllocatorStats *Stats, void *Ptr) {
|
||||||
SecondaryHeader *Header = getHeader(Ptr);
|
SecondaryHeader *Header = getHeader(Ptr);
|
||||||
{
|
{
|
||||||
|
@ -140,7 +133,6 @@ class ScudoLargeMmapAllocator {
|
||||||
const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
|
const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
|
||||||
uptr PageSize;
|
uptr PageSize;
|
||||||
SpinMutex StatsMutex;
|
SpinMutex StatsMutex;
|
||||||
atomic_uint8_t MayReturnNull;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
|
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
|
||||||
|
|
|
@ -112,9 +112,8 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeAllocator() {
|
void InitializeAllocator() {
|
||||||
allocator()->Init(
|
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||||
common_flags()->allocator_may_return_null,
|
allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
|
||||||
common_flags()->allocator_release_to_os_interval_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeAllocatorLate() {
|
void InitializeAllocatorLate() {
|
||||||
|
@ -151,7 +150,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
|
||||||
|
|
||||||
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||||
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
|
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
|
||||||
return allocator()->ReturnNullOrDieOnBadRequest();
|
return Allocator::FailureHandler::OnBadRequest();
|
||||||
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
|
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
|
||||||
if (p == 0)
|
if (p == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -164,7 +163,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||||
|
|
||||||
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
|
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
|
||||||
if (CallocShouldReturnNullDueToOverflow(size, n))
|
if (CallocShouldReturnNullDueToOverflow(size, n))
|
||||||
return allocator()->ReturnNullOrDieOnBadRequest();
|
return Allocator::FailureHandler::OnBadRequest();
|
||||||
void *p = user_alloc(thr, pc, n * size);
|
void *p = user_alloc(thr, pc, n * size);
|
||||||
if (p)
|
if (p)
|
||||||
internal_memset(p, 0, n * size);
|
internal_memset(p, 0, n * size);
|
||||||
|
|
Loading…
Reference in New Issue