Return memory to OS right after free (not in the async thread).

Summary:
In order to avoid starting a separate thread to return unused memory to
the system (the thread interferes with process startup on Android,
Zygota waits for all threads to exit before fork, but this thread never
exits), try to return it right after free.

Reviewers: eugenis

Subscribers: cryptoad, filcab, danalbert, kubabrecka, llvm-commits

Patch by Aleksey Shlyapnikov.

Differential Revision: https://reviews.llvm.org/D27003

llvm-svn: 288091
This commit is contained in:
Evgeniy Stepanov 2016-11-29 00:22:50 +00:00
parent b9e53c9056
commit d3305afc75
19 changed files with 129 additions and 88 deletions

View File

@ -79,11 +79,13 @@ static struct AsanDeactivatedFlags {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
"allocator_may_return_null %d, coverage %d, coverage_dir %s, "
"allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.may_return_null, coverage, coverage_dir);
allocator_options.may_return_null, coverage, coverage_dir,
allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;

View File

@ -33,3 +33,4 @@ COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)

View File

@ -211,6 +211,7 @@ void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
@ -219,6 +220,7 @@ void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
}
struct Allocator {
@ -262,7 +264,7 @@ struct Allocator {
}
void Initialize(const AllocatorOptions &options) {
allocator.Init(options.may_return_null);
allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
SharedInitCode(options);
}
@ -291,6 +293,7 @@ struct Allocator {
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
// Poison all existing allocation's redzones.
@ -312,6 +315,7 @@ struct Allocator {
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
}
// -------------------- Helper methods. -------------------------
@ -687,8 +691,6 @@ struct Allocator {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
void ReleaseToOS() { allocator.ReleaseToOS(); }
};
static Allocator instance(LINKER_INITIALIZED);
@ -730,11 +732,8 @@ StackTrace AsanChunkView::GetFreeStack() {
return GetStackTraceFromId(GetFreeStackId());
}
void ReleaseToOS() { instance.ReleaseToOS(); }
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
SetAllocatorReleaseToOSCallback(ReleaseToOS);
}
void ReInitializeAllocator(const AllocatorOptions &options) {

View File

@ -37,6 +37,7 @@ struct AllocatorOptions {
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);

View File

@ -64,7 +64,9 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
allocator.InitLinkerInitialized(
common_flags()->allocator_may_return_null,
common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {

View File

@ -103,7 +103,9 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
void MsanAllocatorInit() {
allocator.Init(common_flags()->allocator_may_return_null);
allocator.Init(
common_flags()->allocator_may_return_null,
common_flags()->allocator_release_to_os_interval_ms);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {

View File

@ -94,7 +94,8 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
internal_allocator_instance->Init(/* may_return_null*/ false);
internal_allocator_instance->Init(
/* may_return_null */ false, kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}

View File

@ -24,21 +24,22 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
void InitCommon(bool may_return_null) {
primary_.Init();
void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
primary_.Init(release_to_os_interval_ms);
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
void InitLinkerInitialized(bool may_return_null) {
void InitLinkerInitialized(
bool may_return_null, s32 release_to_os_interval_ms) {
secondary_.InitLinkerInitialized(may_return_null);
stats_.InitLinkerInitialized();
InitCommon(may_return_null);
InitCommon(may_return_null, release_to_os_interval_ms);
}
void Init(bool may_return_null) {
void Init(bool may_return_null, s32 release_to_os_interval_ms) {
secondary_.Init(may_return_null);
stats_.Init();
InitCommon(may_return_null);
InitCommon(may_return_null, release_to_os_interval_ms);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
@ -83,6 +84,14 @@ class CombinedAllocator {
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
s32 ReleaseToOSIntervalMs() const {
return primary_.ReleaseToOSIntervalMs();
}
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
bool RssLimitIsExceeded() {
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
}
@ -193,8 +202,6 @@ class CombinedAllocator {
primary_.ForceUnlock();
}
void ReleaseToOS() { primary_.ReleaseToOS(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {

View File

@ -90,11 +90,19 @@ class SizeClassAllocator32 {
SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
void Init() {
void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
s32 ReleaseToOSIntervalMs() const {
return kReleaseToOSIntervalNever;
}
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
// This is empty here. Currently only implemented in 64-bit allocator.
}
void *MapWithCallback(uptr size) {
size = RoundUpTo(size, GetPageSizeCached());
void *res = MmapOrDie(size, "SizeClassAllocator32");
@ -229,10 +237,6 @@ class SizeClassAllocator32 {
return 0;
}
// This is empty here. Currently only implemented in 64-bit allocator.
void ReleaseToOS() { }
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;

View File

@ -69,7 +69,7 @@ class SizeClassAllocator64 {
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
void Init() {
void Init(s32 release_to_os_interval_ms) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
if (kUsingConstantSpaceBeg) {
CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
@ -79,9 +79,19 @@ class SizeClassAllocator64 {
reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
MapWithCallback(SpaceEnd(), AdditionalSize());
}
s32 ReleaseToOSIntervalMs() const {
return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
}
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
memory_order_relaxed);
}
void MapWithCallback(uptr beg, uptr size) {
CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
MapUnmapCallback().OnMap(beg, size);
@ -111,6 +121,8 @@ class SizeClassAllocator64 {
free_array[old_num_chunks + i] = chunks[i];
region->num_freed_chunks = new_num_freed_chunks;
region->n_freed += n_chunks;
MaybeReleaseToOS(class_id);
}
NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
@ -284,11 +296,6 @@ class SizeClassAllocator64 {
GetPageSizeCached());
}
void ReleaseToOS() {
for (uptr class_id = 1; class_id < kNumClasses; class_id++)
ReleaseToOS(class_id);
}
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
@ -317,12 +324,13 @@ class SizeClassAllocator64 {
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
static const uptr kFreeArrayMapSize = 1 << 16;
// Granularity of ReleaseToOs (aka madvise).
static const uptr kReleaseToOsGranularity = 1 << 12;
atomic_sint32_t release_to_os_interval_ms_;
struct ReleaseToOsInfo {
uptr n_freed_at_last_release;
uptr num_releases;
u64 last_release_at_ns;
};
struct RegionInfo {
@ -454,50 +462,63 @@ class SizeClassAllocator64 {
CompactPtrT first, CompactPtrT last) {
uptr beg_ptr = CompactPtrToPointer(region_beg, first);
uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
CHECK_GE(end_ptr - beg_ptr, kReleaseToOsGranularity);
beg_ptr = RoundUpTo(beg_ptr, kReleaseToOsGranularity);
end_ptr = RoundDownTo(end_ptr, kReleaseToOsGranularity);
const uptr page_size = GetPageSizeCached();
CHECK_GE(end_ptr - beg_ptr, page_size);
beg_ptr = RoundUpTo(beg_ptr, page_size);
end_ptr = RoundDownTo(end_ptr, page_size);
if (end_ptr == beg_ptr) return false;
ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr);
return true;
}
// Releases some RAM back to OS.
// Attempts to release some RAM back to OS. The region is expected to be
// locked.
// Algorithm:
// * Lock the region.
// * Sort the chunks.
// * Find ranges fully covered by free-d chunks
// * Release them to OS with madvise.
//
// TODO(kcc): make sure we don't do it too frequently.
void ReleaseToOS(uptr class_id) {
void MaybeReleaseToOS(uptr class_id) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
const uptr page_size = GetPageSizeCached();
uptr n = region->num_freed_chunks;
if (n * chunk_size < page_size)
return; // No chance to release anything.
if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size <
page_size) {
return; // Nothing new to release.
}
s32 interval_ms = ReleaseToOSIntervalMs();
if (interval_ms < 0)
return;
u64 now_ns = NanoTime();
if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > now_ns)
return; // Memory was returned recently.
region->rtoi.last_release_at_ns = now_ns;
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
uptr chunk_size = ClassIdToSize(class_id);
uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale;
BlockingMutexLock l(&region->mutex);
uptr n = region->num_freed_chunks;
if (n * chunk_size < kReleaseToOsGranularity)
return; // No chance to release anything.
if ((region->rtoi.n_freed_at_last_release - region->n_freed) * chunk_size <
kReleaseToOsGranularity)
return; // Nothing new to release.
SortArray(free_array, n);
uptr beg = free_array[0];
const uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
const uptr kScaledGranularity = page_size >> kCompactPtrScale;
uptr range_beg = free_array[0];
uptr prev = free_array[0];
for (uptr i = 1; i < n; i++) {
uptr chunk = free_array[i];
CHECK_GT(chunk, prev);
if (chunk - prev != scaled_chunk_size) {
CHECK_GT(chunk - prev, scaled_chunk_size);
if (prev + scaled_chunk_size - beg >= kScaledGranularity) {
MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev);
if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) {
MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev);
region->rtoi.n_freed_at_last_release = region->n_freed;
region->rtoi.num_releases++;
}
beg = chunk;
range_beg = chunk;
}
prev = chunk;
}

View File

@ -37,6 +37,11 @@ struct atomic_uint16_t {
volatile Type val_dont_use;
};
struct atomic_sint32_t {
typedef s32 Type;
volatile Type val_dont_use;
};
struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;

View File

@ -375,12 +375,6 @@ void SetCheckFailedCallback(CheckFailedCallbackType callback);
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
// Callback to be called when we want to try releasing unused allocator memory
// back to the OS.
typedef void (*AllocatorReleaseToOSCallback)();
// The callback should be registered once at the tool init time.
void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback);
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
bool IsHandledDeadlySignal(int signum);
@ -843,6 +837,10 @@ struct StackDepotStats {
uptr allocated;
};
// The default value for allocator_release_to_os_interval_ms common flag to
// indicate that sanitizer allocator should not attempt to release memory to OS.
const s32 kReleaseToOSIntervalNever = -1;
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,

View File

@ -70,18 +70,11 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
SoftRssLimitExceededCallback = Callback;
}
static AllocatorReleaseToOSCallback ReleseCallback;
void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback) {
CHECK_EQ(ReleseCallback, nullptr);
ReleseCallback = Callback;
}
#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
bool heap_profile = common_flags()->heap_profile;
bool allocator_release_to_os = common_flags()->allocator_release_to_os;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
@ -127,7 +120,6 @@ void BackgroundThread(void *arg) {
SoftRssLimitExceededCallback(false);
}
}
if (allocator_release_to_os && ReleseCallback) ReleseCallback();
if (heap_profile &&
current_rss_mb > rss_during_last_reported_profile * 1.1) {
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
@ -162,7 +154,6 @@ void MaybeStartBackgroudThread() {
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
!common_flags()->allocator_release_to_os &&
!common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);

View File

@ -119,9 +119,11 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
" This limit does not affect memory allocations other than"
" malloc/new.")
COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
COMMON_FLAG(bool, allocator_release_to_os, false,
"Experimental. If true, try to periodically release unused"
" memory to the OS.\n")
COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever,
"Experimental. Only affects a 64-bit allocator. If set, tries to "
"release unused memory to the OS, but not more often than this "
"interval (in milliseconds). Negative values mean do not attempt "
"to release memory to the OS.\n")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")

View File

@ -141,7 +141,7 @@ TEST(SanitizerCommon, InternalSizeClassMap) {
template <class Allocator>
void TestSizeClassAllocator() {
Allocator *a = new Allocator;
a->Init();
a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -238,7 +238,7 @@ TEST(SanitizerCommon, SizeClassAllocator32Compact) {
template <class Allocator>
void SizeClassAllocatorMetadataStress() {
Allocator *a = new Allocator;
a->Init();
a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -292,7 +292,7 @@ TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
template <class Allocator>
void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
Allocator *a = new Allocator;
a->Init();
a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -366,7 +366,7 @@ TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
TestMapUnmapCallback::unmap_count = 0;
typedef SizeClassAllocator64<AP64WithCallback> Allocator64WithCallBack;
Allocator64WithCallBack *a = new Allocator64WithCallBack;
a->Init();
a->Init(kReleaseToOSIntervalNever);
EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
memset(&cache, 0, sizeof(cache));
@ -397,7 +397,7 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TestMapUnmapCallback>
Allocator32WithCallBack;
Allocator32WithCallBack *a = new Allocator32WithCallBack;
a->Init();
a->Init(kReleaseToOSIntervalNever);
EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
memset(&cache, 0, sizeof(cache));
@ -430,7 +430,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
template<class Allocator>
void FailInAssertionOnOOM() {
Allocator a;
a.Init();
a.Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -538,7 +538,7 @@ void TestCombinedAllocator() {
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Allocator;
Allocator *a = new Allocator;
a->Init(/* may_return_null */ true);
a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever);
AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
@ -627,7 +627,7 @@ void TestSizeClassAllocatorLocalCache() {
typedef typename AllocatorCache::Allocator Allocator;
Allocator *a = new Allocator();
a->Init();
a->Init(kReleaseToOSIntervalNever);
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -702,7 +702,7 @@ void *AllocatorLeakTestWorker(void *arg) {
TEST(SanitizerCommon, AllocatorLeakTest) {
typedef AllocatorCache::Allocator Allocator;
Allocator a;
a.Init();
a.Init(kReleaseToOSIntervalNever);
uptr total_used_memory = 0;
for (int i = 0; i < 100; i++) {
pthread_t t;
@ -735,7 +735,7 @@ static void *DeallocNewThreadWorker(void *arg) {
// able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator, AllocatorCacheDeallocNewThread) {
AllocatorCache::Allocator allocator;
allocator.Init();
allocator.Init(kReleaseToOSIntervalNever);
AllocatorCache main_cache;
AllocatorCache child_cache;
memset(&main_cache, 0, sizeof(main_cache));
@ -806,7 +806,7 @@ void IterationTestCallback(uptr chunk, void *arg) {
template <class Allocator>
void TestSizeClassAllocatorIteration() {
Allocator *a = new Allocator;
a->Init();
a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -947,7 +947,7 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
const uptr kRegionSize =
kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
SpecialAllocator64 *a = new SpecialAllocator64;
a->Init();
a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);

View File

@ -212,6 +212,7 @@ static thread_local QuarantineCache ThreadQuarantineCache;
void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
MayReturnNull = cf->allocator_may_return_null;
ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
QuarantineSizeMb = f->QuarantineSizeMb;
ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
DeallocationTypeMismatch = f->DeallocationTypeMismatch;
@ -221,6 +222,7 @@ void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
cf->allocator_may_return_null = MayReturnNull;
cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
f->QuarantineSizeMb = QuarantineSizeMb;
f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
f->DeallocationTypeMismatch = DeallocationTypeMismatch;
@ -276,7 +278,7 @@ struct Allocator {
DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
DeleteSizeMismatch = Options.DeleteSizeMismatch;
ZeroContents = Options.ZeroContents;
BackendAllocator.Init(Options.MayReturnNull);
BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
AllocatorQuarantine.Init(
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);

View File

@ -94,6 +94,7 @@ struct AllocatorOptions {
u32 QuarantineSizeMb;
u32 ThreadLocalQuarantineSizeKb;
bool MayReturnNull;
s32 ReleaseToOSIntervalMs;
bool DeallocationTypeMismatch;
bool DeleteSizeMismatch;
bool ZeroContents;

View File

@ -111,7 +111,9 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
}
void InitializeAllocator() {
allocator()->Init(common_flags()->allocator_may_return_null);
allocator()->Init(
common_flags()->allocator_may_return_null,
common_flags()->allocator_release_to_os_interval_ms);
}
void InitializeAllocatorLate() {

View File

@ -2,8 +2,8 @@
//
// RUN: %clangxx_asan -std=c++11 %s -o %t
// RUN: %env_asan_opts=allocator_release_to_os=1 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE
// RUN: %env_asan_opts=allocator_release_to_os=0 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE
// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=0 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE
// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE
//
// REQUIRES: x86_64-target-arch
#include <stdlib.h>