forked from OSchip/llvm-project
[scudo][standalone] Allow setting release to OS
Summary: Add a method to set the release to OS value as the system runs, and allow this to be set differently in the primary and the secondary. Also, add a default value to use for primary and secondary. This allows Android to have a default that is different for primary/secondary. Update mallopt to support setting the release to OS value. Reviewers: pcc, cryptoad Reviewed By: cryptoad Subscribers: cryptoad, jfb, #sanitizers, llvm-commits Tags: #sanitizers, #llvm Differential Revision: https://reviews.llvm.org/D74448
This commit is contained in:
parent
b75692c30e
commit
5f91c7b980
|
@ -40,15 +40,15 @@ struct AndroidConfig {
|
|||
using SizeClassMap = AndroidSizeClassMap;
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
// 256MB regions
|
||||
typedef SizeClassAllocator64<SizeClassMap, 28U,
|
||||
typedef SizeClassAllocator64<SizeClassMap, 28U, 1000, 1000,
|
||||
/*MaySupportMemoryTagging=*/true>
|
||||
Primary;
|
||||
#else
|
||||
// 256KB regions
|
||||
typedef SizeClassAllocator32<SizeClassMap, 18U> Primary;
|
||||
typedef SizeClassAllocator32<SizeClassMap, 18U, 1000, 1000> Primary;
|
||||
#endif
|
||||
// Cache blocks up to 2MB
|
||||
typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20>> Secondary;
|
||||
typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20, 0, 1000>> Secondary;
|
||||
template <class A>
|
||||
using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
|
||||
};
|
||||
|
@ -57,12 +57,12 @@ struct AndroidSvelteConfig {
|
|||
using SizeClassMap = SvelteSizeClassMap;
|
||||
#if SCUDO_CAN_USE_PRIMARY64
|
||||
// 128MB regions
|
||||
typedef SizeClassAllocator64<SizeClassMap, 27U> Primary;
|
||||
typedef SizeClassAllocator64<SizeClassMap, 27U, 1000, 1000> Primary;
|
||||
#else
|
||||
// 64KB regions
|
||||
typedef SizeClassAllocator32<SizeClassMap, 16U> Primary;
|
||||
typedef SizeClassAllocator32<SizeClassMap, 16U, 1000, 1000> Primary;
|
||||
#endif
|
||||
typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18>> Secondary;
|
||||
typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18, 0, 0>> Secondary;
|
||||
template <class A>
|
||||
using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
|
||||
};
|
||||
|
|
|
@ -32,6 +32,8 @@ extern "C" inline void EmptyCallback() {}
|
|||
|
||||
namespace scudo {
|
||||
|
||||
enum class Option { ReleaseInterval };
|
||||
|
||||
template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
|
||||
class Allocator {
|
||||
public:
|
||||
|
@ -624,8 +626,14 @@ public:
|
|||
return Options.MayReturnNull;
|
||||
}
|
||||
|
||||
// TODO(kostyak): implement this as a "backend" to mallopt.
|
||||
bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
|
||||
bool setOption(Option O, sptr Value) {
|
||||
if (O == Option::ReleaseInterval) {
|
||||
Primary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
|
||||
Secondary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Return the usable size for a given chunk. Technically we lie, as we just
|
||||
// report the actual size of a chunk. This is done to counteract code actively
|
||||
|
|
|
@ -45,6 +45,6 @@ SCUDO_FLAG(bool, may_return_null, true,
|
|||
"returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
|
||||
"invalid allocation alignments, etc.")
|
||||
|
||||
SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? 1000 : 5000,
|
||||
SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
|
||||
"Interval (in milliseconds) at which to attempt release of unused "
|
||||
"memory to the OS. Negative values disable the feature.")
|
||||
|
|
|
@ -38,14 +38,18 @@ namespace scudo {
|
|||
// Memory used by this allocator is never unmapped but can be partially
|
||||
// reclaimed if the platform allows for it.
|
||||
|
||||
template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
|
||||
template <class SizeClassMapT, uptr RegionSizeLog,
|
||||
s32 MinReleaseToOsIntervalMs = INT32_MIN,
|
||||
s32 MaxReleaseToOsIntervalMs = INT32_MAX> class SizeClassAllocator32 {
|
||||
public:
|
||||
typedef SizeClassMapT SizeClassMap;
|
||||
// The bytemap can only track UINT8_MAX - 1 classes.
|
||||
static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
|
||||
// Regions should be large enough to hold the largest Block.
|
||||
static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
|
||||
typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
|
||||
typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog,
|
||||
MinReleaseToOsIntervalMs,
|
||||
MaxReleaseToOsIntervalMs> ThisT;
|
||||
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
|
||||
typedef typename CacheT::TransferBatch TransferBatch;
|
||||
static const bool SupportsMemoryTagging = false;
|
||||
|
@ -78,7 +82,7 @@ public:
|
|||
Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
|
||||
(getSizeByClassId(I) >= (PageSize / 32));
|
||||
}
|
||||
ReleaseToOsIntervalMs = ReleaseToOsInterval;
|
||||
setReleaseToOsIntervalMs(ReleaseToOsInterval);
|
||||
}
|
||||
void init(s32 ReleaseToOsInterval) {
|
||||
memset(this, 0, sizeof(*this));
|
||||
|
@ -176,6 +180,15 @@ public:
|
|||
getStats(Str, I, 0);
|
||||
}
|
||||
|
||||
void setReleaseToOsIntervalMs(s32 Interval) {
|
||||
if (Interval >= MaxReleaseToOsIntervalMs) {
|
||||
Interval = MaxReleaseToOsIntervalMs;
|
||||
} else if (Interval <= MinReleaseToOsIntervalMs) {
|
||||
Interval = MinReleaseToOsIntervalMs;
|
||||
}
|
||||
atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uptr releaseToOS() {
|
||||
uptr TotalReleasedBytes = 0;
|
||||
for (uptr I = 0; I < NumClasses; I++) {
|
||||
|
@ -356,6 +369,10 @@ private:
|
|||
AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
|
||||
}
|
||||
|
||||
s32 getReleaseToOsIntervalMs() {
|
||||
return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
|
||||
}
|
||||
|
||||
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
|
||||
bool Force = false) {
|
||||
const uptr BlockSize = getSizeByClassId(ClassId);
|
||||
|
@ -374,7 +391,7 @@ private:
|
|||
}
|
||||
|
||||
if (!Force) {
|
||||
const s32 IntervalMs = ReleaseToOsIntervalMs;
|
||||
const s32 IntervalMs = getReleaseToOsIntervalMs();
|
||||
if (IntervalMs < 0)
|
||||
return 0;
|
||||
if (Sci->ReleaseInfo.LastReleaseAtNs +
|
||||
|
@ -414,7 +431,7 @@ private:
|
|||
// through the whole NumRegions.
|
||||
uptr MinRegionIndex;
|
||||
uptr MaxRegionIndex;
|
||||
s32 ReleaseToOsIntervalMs;
|
||||
atomic_s32 ReleaseToOsIntervalMs;
|
||||
// Unless several threads request regions simultaneously from different size
|
||||
// classes, the stash rarely contains more than 1 entry.
|
||||
static constexpr uptr MaxStashedRegions = 4;
|
||||
|
|
|
@ -40,11 +40,15 @@ namespace scudo {
|
|||
// released if the platform allows for it.
|
||||
|
||||
template <class SizeClassMapT, uptr RegionSizeLog,
|
||||
s32 MinReleaseToOsIntervalMs = INT32_MIN,
|
||||
s32 MaxReleaseToOsIntervalMs = INT32_MAX,
|
||||
bool MaySupportMemoryTagging = false>
|
||||
class SizeClassAllocator64 {
|
||||
public:
|
||||
typedef SizeClassMapT SizeClassMap;
|
||||
typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog,
|
||||
MinReleaseToOsIntervalMs,
|
||||
MaxReleaseToOsIntervalMs,
|
||||
MaySupportMemoryTagging>
|
||||
ThisT;
|
||||
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
|
||||
|
@ -90,7 +94,7 @@ public:
|
|||
(getSizeByClassId(I) >= (PageSize / 32));
|
||||
Region->RandState = getRandomU32(&Seed);
|
||||
}
|
||||
ReleaseToOsIntervalMs = ReleaseToOsInterval;
|
||||
setReleaseToOsIntervalMs(ReleaseToOsInterval);
|
||||
|
||||
if (SupportsMemoryTagging)
|
||||
UseMemoryTagging = systemSupportsMemoryTagging();
|
||||
|
@ -186,6 +190,15 @@ public:
|
|||
getStats(Str, I, 0);
|
||||
}
|
||||
|
||||
void setReleaseToOsIntervalMs(s32 Interval) {
|
||||
if (Interval >= MaxReleaseToOsIntervalMs) {
|
||||
Interval = MaxReleaseToOsIntervalMs;
|
||||
} else if (Interval <= MinReleaseToOsIntervalMs) {
|
||||
Interval = MinReleaseToOsIntervalMs;
|
||||
}
|
||||
atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uptr releaseToOS() {
|
||||
uptr TotalReleasedBytes = 0;
|
||||
for (uptr I = 0; I < NumClasses; I++) {
|
||||
|
@ -241,7 +254,7 @@ private:
|
|||
uptr PrimaryBase;
|
||||
RegionInfo *RegionInfoArray;
|
||||
MapPlatformData Data;
|
||||
s32 ReleaseToOsIntervalMs;
|
||||
atomic_s32 ReleaseToOsIntervalMs;
|
||||
bool UseMemoryTagging;
|
||||
|
||||
RegionInfo *getRegionInfo(uptr ClassId) const {
|
||||
|
@ -375,6 +388,10 @@ private:
|
|||
getRegionBaseByClassId(ClassId));
|
||||
}
|
||||
|
||||
s32 getReleaseToOsIntervalMs() {
|
||||
return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
|
||||
}
|
||||
|
||||
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
|
||||
bool Force = false) {
|
||||
const uptr BlockSize = getSizeByClassId(ClassId);
|
||||
|
@ -394,7 +411,7 @@ private:
|
|||
}
|
||||
|
||||
if (!Force) {
|
||||
const s32 IntervalMs = ReleaseToOsIntervalMs;
|
||||
const s32 IntervalMs = getReleaseToOsIntervalMs();
|
||||
if (IntervalMs < 0)
|
||||
return 0;
|
||||
if (Region->ReleaseInfo.LastReleaseAtNs +
|
||||
|
|
|
@ -62,7 +62,9 @@ public:
|
|||
void releaseToOS() {}
|
||||
};
|
||||
|
||||
template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19>
|
||||
template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19,
|
||||
s32 MinReleaseToOsIntervalMs = INT32_MIN,
|
||||
s32 MaxReleaseToOsIntervalMs = INT32_MAX>
|
||||
class MapAllocatorCache {
|
||||
public:
|
||||
// Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length
|
||||
|
@ -71,7 +73,7 @@ public:
|
|||
static_assert(!SCUDO_FUCHSIA || MaxEntriesCount == 0U, "");
|
||||
|
||||
void initLinkerInitialized(s32 ReleaseToOsInterval) {
|
||||
ReleaseToOsIntervalMs = ReleaseToOsInterval;
|
||||
setReleaseToOsIntervalMs(ReleaseToOsInterval);
|
||||
}
|
||||
void init(s32 ReleaseToOsInterval) {
|
||||
memset(this, 0, sizeof(*this));
|
||||
|
@ -105,11 +107,11 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
s32 Interval;
|
||||
if (EmptyCache)
|
||||
empty();
|
||||
else if (ReleaseToOsIntervalMs >= 0)
|
||||
releaseOlderThan(Time -
|
||||
static_cast<u64>(ReleaseToOsIntervalMs) * 1000000);
|
||||
else if ((Interval = getReleaseToOsIntervalMs()) >= 0)
|
||||
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
|
||||
return EntryCached;
|
||||
}
|
||||
|
||||
|
@ -142,6 +144,15 @@ public:
|
|||
return MaxEntriesCount != 0U && Size <= MaxEntrySize;
|
||||
}
|
||||
|
||||
void setReleaseToOsIntervalMs(s32 Interval) {
|
||||
if (Interval >= MaxReleaseToOsIntervalMs) {
|
||||
Interval = MaxReleaseToOsIntervalMs;
|
||||
} else if (Interval <= MinReleaseToOsIntervalMs) {
|
||||
Interval = MinReleaseToOsIntervalMs;
|
||||
}
|
||||
atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
|
||||
|
||||
void disable() { Mutex.lock(); }
|
||||
|
@ -189,6 +200,10 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
s32 getReleaseToOsIntervalMs() {
|
||||
return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
|
||||
}
|
||||
|
||||
struct CachedBlock {
|
||||
uptr Block;
|
||||
uptr BlockEnd;
|
||||
|
@ -203,7 +218,7 @@ private:
|
|||
u32 EntriesCount;
|
||||
uptr LargestSize;
|
||||
u32 IsFullEvents;
|
||||
s32 ReleaseToOsIntervalMs;
|
||||
atomic_s32 ReleaseToOsIntervalMs;
|
||||
};
|
||||
|
||||
template <class CacheT> class MapAllocator {
|
||||
|
@ -251,6 +266,10 @@ public:
|
|||
|
||||
static uptr canCache(uptr Size) { return CacheT::canCache(Size); }
|
||||
|
||||
void setReleaseToOsIntervalMs(s32 Interval) {
|
||||
Cache.setReleaseToOsIntervalMs(Interval);
|
||||
}
|
||||
|
||||
void releaseToOS() { Cache.releaseToOS(); }
|
||||
|
||||
private:
|
||||
|
|
|
@ -157,7 +157,18 @@ void SCUDO_PREFIX(malloc_postinit)() {
|
|||
|
||||
INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
|
||||
if (param == M_DECAY_TIME) {
|
||||
// TODO(kostyak): set release_to_os_interval_ms accordingly.
|
||||
if (SCUDO_ANDROID) {
|
||||
if (value == 0) {
|
||||
// Will set the release values to their minimum values.
|
||||
value = INT32_MIN;
|
||||
} else {
|
||||
// Will set the release values to their maximum values.
|
||||
value = INT32_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
|
||||
static_cast<scudo::sptr>(value));
|
||||
return 1;
|
||||
} else if (param == M_PURGE) {
|
||||
SCUDO_ALLOCATOR.releaseToOS();
|
||||
|
|
Loading…
Reference in New Issue