[sanitizer] Support dynamic premapped R/W range in primary allocator.

The main use case for this change is HWASan aliasing mode, which premaps
the alias space adjacent to the dynamic shadow.  With this change, the
primary allocator can allocate from the alias space instead of a
separate region.

Reviewed By: vitalybuka, eugenis

Differential Revision: https://reviews.llvm.org/D98293
This commit is contained in:
Matt Morehouse 2021-03-23 09:31:19 -07:00
parent fd142e6c18
commit 642b80013c
4 changed files with 125 additions and 30 deletions

View File

@ -35,9 +35,9 @@ class CombinedAllocator {
secondary_.InitLinkerInitialized();
}
void Init(s32 release_to_os_interval_ms) {
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
stats_.Init();
primary_.Init(release_to_os_interval_ms);
primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.Init();
}

View File

@ -119,7 +119,8 @@ class SizeClassAllocator32 {
typedef SizeClassAllocator32<Params> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
void Init(s32 release_to_os_interval_ms) {
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
CHECK(!heap_start);
possible_regions.Init();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}

View File

@ -69,25 +69,45 @@ class SizeClassAllocator64 {
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
void Init(s32 release_to_os_interval_ms) {
// If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
// at heap_start and places the heap there. This mode requires kSpaceBeg ==
// ~(uptr)0.
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
if (kUsingConstantSpaceBeg) {
CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
PrimaryAllocatorName, kSpaceBeg));
PremappedHeap = heap_start != 0;
if (PremappedHeap) {
CHECK(!kUsingConstantSpaceBeg);
NonConstSpaceBeg = heap_start;
uptr RegionInfoSize = AdditionalSize();
RegionInfoSpace =
address_range.Init(RegionInfoSize, PrimaryAllocatorName);
CHECK_NE(RegionInfoSpace, ~(uptr)0);
CHECK_EQ(RegionInfoSpace,
address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
"SizeClassAllocator: region info"));
MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
} else {
// Combined allocator expects that an 2^N allocation is always aligned to
// 2^N. For this to work, the start of the space needs to be aligned as
// high as the largest size class (which also needs to be a power of 2).
NonConstSpaceBeg = address_range.InitAligned(
TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
if (kUsingConstantSpaceBeg) {
CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
CHECK_EQ(kSpaceBeg,
address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
kSpaceBeg));
} else {
// Combined allocator expects that an 2^N allocation is always aligned
// to 2^N. For this to work, the start of the space needs to be aligned
// as high as the largest size class (which also needs to be a power of
// 2).
NonConstSpaceBeg = address_range.InitAligned(
TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
RegionInfoSpace = SpaceEnd();
MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
"SizeClassAllocator: region info");
}
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
"SizeClassAllocator: region info");
// Check that the RegionInfo array is aligned on the CacheLine size.
DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
}
s32 ReleaseToOSIntervalMs() const {
@ -596,6 +616,11 @@ class SizeClassAllocator64 {
atomic_sint32_t release_to_os_interval_ms_;
uptr RegionInfoSpace;
// True if the user has already mapped the entire heap R/W.
bool PremappedHeap;
struct Stats {
uptr n_allocated;
uptr n_freed;
@ -625,7 +650,7 @@ class SizeClassAllocator64 {
RegionInfo *GetRegionInfo(uptr class_id) const {
DCHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
return &regions[class_id];
}
@ -650,6 +675,9 @@ class SizeClassAllocator64 {
}
bool MapWithCallback(uptr beg, uptr size, const char *name) {
if (PremappedHeap)
return beg >= NonConstSpaceBeg &&
beg + size <= NonConstSpaceBeg + kSpaceSize;
uptr mapped = address_range.Map(beg, size, name);
if (UNLIKELY(!mapped))
return false;
@ -659,11 +687,18 @@ class SizeClassAllocator64 {
}
void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
if (PremappedHeap) {
CHECK_GE(beg, NonConstSpaceBeg);
CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
return;
}
CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
MapUnmapCallback().OnMap(beg, size);
}
void UnmapWithCallbackOrDie(uptr beg, uptr size) {
if (PremappedHeap)
return;
MapUnmapCallback().OnUnmap(beg, size);
address_range.Unmap(beg, size);
}
@ -832,6 +867,9 @@ class SizeClassAllocator64 {
// Attempts to release RAM occupied by freed chunks back to OS. The region is
// expected to be locked.
//
// TODO(morehouse): Support a callback on memory release so HWASan can release
// aliases as well.
void MaybeReleaseToOS(uptr class_id, bool force) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);

View File

@ -196,9 +196,9 @@ TEST(SanitizerCommon, DenseSizeClassMap) {
}
template <class Allocator>
void TestSizeClassAllocator() {
void TestSizeClassAllocator(uptr premapped_heap = 0) {
Allocator *a = new Allocator;
a->Init(kReleaseToOSIntervalNever);
a->Init(kReleaseToOSIntervalNever, premapped_heap);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -265,6 +265,25 @@ void TestSizeClassAllocator() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
// Allocates kAllocatorSize aligned bytes on construction and frees it on
// destruction.
class ScopedPremappedHeap {
public:
ScopedPremappedHeap() {
BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap");
AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);
}
~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }
uptr Addr() { return AlignedAddr; }
private:
void *BasePtr;
uptr AlignedAddr;
};
// These tests can fail on Windows if memory is somewhat full and lit happens
// to run them all at the same time. FIXME: Make them not flaky and reenable.
#if !SANITIZER_WINDOWS
@ -277,6 +296,13 @@ TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
}
#if !SANITIZER_ANDROID
// Android only has 39-bit address space, so mapping 2 * kAllocatorSize
// sometimes fails.
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
ScopedPremappedHeap h;
TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
}
//FIXME(kostyak): find values so that those work on Android as well.
TEST(SanitizerCommon, SizeClassAllocator64Compact) {
TestSizeClassAllocator<Allocator64Compact>();
@ -320,9 +346,9 @@ TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
}
template <class Allocator>
void SizeClassAllocatorMetadataStress() {
void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {
Allocator *a = new Allocator;
a->Init(kReleaseToOSIntervalNever);
a->Init(kReleaseToOSIntervalNever, premapped_heap);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -362,6 +388,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
}
#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
ScopedPremappedHeap h;
SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
}
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
@ -374,9 +405,10 @@ TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
}
template <class Allocator>
void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,
uptr premapped_heap = 0) {
Allocator *a = new Allocator;
a->Init(kReleaseToOSIntervalNever);
a->Init(kReleaseToOSIntervalNever, premapped_heap);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -409,6 +441,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
1ULL << (SANITIZER_ANDROID ? 31 : 33));
}
#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
ScopedPremappedHeap h;
SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());
}
TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
}
@ -624,10 +661,10 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
}
template <class PrimaryAllocator>
void TestCombinedAllocator() {
void TestCombinedAllocator(uptr premapped_heap = 0) {
typedef CombinedAllocator<PrimaryAllocator> Allocator;
Allocator *a = new Allocator;
a->Init(kReleaseToOSIntervalNever);
a->Init(kReleaseToOSIntervalNever, premapped_heap);
std::mt19937 r;
typename Allocator::AllocatorCache cache;
@ -699,6 +736,14 @@ TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
}
#if !SANITIZER_ANDROID
#if !SANITIZER_WINDOWS
// Windows fails to map 1TB, so disable this test.
TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
ScopedPremappedHeap h;
TestCombinedAllocator<Allocator64Dynamic>(h.Addr());
}
#endif
TEST(SanitizerCommon, CombinedAllocator64Compact) {
TestCombinedAllocator<Allocator64Compact>();
}
@ -714,12 +759,12 @@ TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {
}
template <class Allocator>
void TestSizeClassAllocatorLocalCache() {
void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {
using AllocatorCache = typename Allocator::AllocatorCache;
AllocatorCache cache;
Allocator *a = new Allocator();
a->Init(kReleaseToOSIntervalNever);
a->Init(kReleaseToOSIntervalNever, premapped_heap);
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -760,6 +805,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
}
#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
ScopedPremappedHeap h;
TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
}
TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
TestSizeClassAllocatorLocalCache<Allocator64Compact>();
}
@ -891,9 +941,9 @@ void IterationTestCallback(uptr chunk, void *arg) {
}
template <class Allocator>
void TestSizeClassAllocatorIteration() {
void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {
Allocator *a = new Allocator;
a->Init(kReleaseToOSIntervalNever);
a->Init(kReleaseToOSIntervalNever, premapped_heap);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@ -942,6 +992,12 @@ TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
TestSizeClassAllocatorIteration<Allocator64Dynamic>();
}
#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
ScopedPremappedHeap h;
TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
}
#endif
#endif
#endif