forked from OSchip/llvm-project
[asan/msan] new 32-bit allocator, basic functionality so far
llvm-svn: 169496
This commit is contained in:
parent
22dd8da6cd
commit
2044135dca
|
@ -128,6 +128,7 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(uptr size, uptr alignment) {
|
void *Allocate(uptr size, uptr alignment) {
|
||||||
|
if (size < alignment) size = alignment;
|
||||||
CHECK(CanAllocate(size, alignment));
|
CHECK(CanAllocate(size, alignment));
|
||||||
return AllocateBySizeClass(SizeClassMap::ClassID(size));
|
return AllocateBySizeClass(SizeClassMap::ClassID(size));
|
||||||
}
|
}
|
||||||
|
@ -181,7 +182,7 @@ class SizeClassAllocator64 {
|
||||||
uptr chunk_idx = GetChunkIdx((uptr)p, size);
|
uptr chunk_idx = GetChunkIdx((uptr)p, size);
|
||||||
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
|
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
|
||||||
uptr begin = reg_beg + chunk_idx * size;
|
uptr begin = reg_beg + chunk_idx * size;
|
||||||
return (void*)begin;
|
return reinterpret_cast<void*>(begin);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr GetActuallyAllocatedSize(void *p) {
|
static uptr GetActuallyAllocatedSize(void *p) {
|
||||||
|
@ -220,7 +221,6 @@ class SizeClassAllocator64 {
|
||||||
private:
|
private:
|
||||||
static const uptr kRegionSize = kSpaceSize / kNumClasses;
|
static const uptr kRegionSize = kSpaceSize / kNumClasses;
|
||||||
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
|
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
|
||||||
COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
|
|
||||||
// kRegionSize must be >= 2^32.
|
// kRegionSize must be >= 2^32.
|
||||||
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
|
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
|
||||||
// Populate the free list with at most this number of bytes at once
|
// Populate the free list with at most this number of bytes at once
|
||||||
|
@ -258,10 +258,10 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void PopulateFreeList(uptr class_id, RegionInfo *region) {
|
void PopulateFreeList(uptr class_id, RegionInfo *region) {
|
||||||
|
CHECK(region->free_list.empty());
|
||||||
uptr size = SizeClassMap::Size(class_id);
|
uptr size = SizeClassMap::Size(class_id);
|
||||||
uptr beg_idx = region->allocated_user;
|
uptr beg_idx = region->allocated_user;
|
||||||
uptr end_idx = beg_idx + kPopulateSize;
|
uptr end_idx = beg_idx + kPopulateSize;
|
||||||
region->free_list.clear();
|
|
||||||
uptr region_beg = kSpaceBeg + kRegionSize * class_id;
|
uptr region_beg = kSpaceBeg + kRegionSize * class_id;
|
||||||
uptr idx = beg_idx;
|
uptr idx = beg_idx;
|
||||||
uptr i = 0;
|
uptr i = 0;
|
||||||
|
@ -301,6 +301,161 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// SizeClassAllocator32 -- allocator for 32-bit address space.
|
||||||
|
// This allocator can theoretically be used on 64-bit arch, but there it is less
|
||||||
|
// efficient than SizeClassAllocator64.
|
||||||
|
//
|
||||||
|
// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
|
||||||
|
// be returned by MmapOrDie().
|
||||||
|
//
|
||||||
|
// Region:
|
||||||
|
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
|
||||||
|
// Since the regions are aligned by kRegionSize, there are exactly
|
||||||
|
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||||
|
// an u8 array possible_regions_[kNumPossibleRegions] to store the size classes.
|
||||||
|
// 0 size class means the region is not used by the allocator.
|
||||||
|
//
|
||||||
|
// One Region is used to allocate chunks of a single size class.
|
||||||
|
// A Region looks like this:
|
||||||
|
// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
|
||||||
|
//
|
||||||
|
// In order to avoid false sharing the objects of this class should be
|
||||||
|
// chache-line aligned.
|
||||||
|
template <const uptr kSpaceBeg, const u64 kSpaceSize,
|
||||||
|
const uptr kMetadataSize, class SizeClassMap>
|
||||||
|
class SizeClassAllocator32 {
|
||||||
|
public:
|
||||||
|
// Don't need to call Init if the object is a global (i.e. zero-initialized).
|
||||||
|
void Init() {
|
||||||
|
internal_memset(this, 0, sizeof(*this));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CanAllocate(uptr size, uptr alignment) {
|
||||||
|
return size <= SizeClassMap::kMaxSize &&
|
||||||
|
alignment <= SizeClassMap::kMaxSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *Allocate(uptr size, uptr alignment) {
|
||||||
|
if (size < alignment) size = alignment;
|
||||||
|
CHECK(CanAllocate(size, alignment));
|
||||||
|
return AllocateBySizeClass(SizeClassMap::ClassID(size));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Deallocate(void *p) {
|
||||||
|
CHECK(PointerIsMine(p));
|
||||||
|
DeallocateBySizeClass(p, GetSizeClass(p));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *GetMetaData(void *p) {
|
||||||
|
CHECK(PointerIsMine(p));
|
||||||
|
uptr mem = reinterpret_cast<uptr>(p);
|
||||||
|
uptr beg = ComputeRegionBeg(mem);
|
||||||
|
uptr size = SizeClassMap::Size(GetSizeClass(p));
|
||||||
|
u32 offset = mem - beg;
|
||||||
|
uptr n = offset / (u32)size; // 32-bit division
|
||||||
|
uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
|
||||||
|
return (void*)meta;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PointerIsMine(void *p) {
|
||||||
|
return possible_regions_[ComputeRegionId(reinterpret_cast<uptr>(p))] != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr GetSizeClass(void *p) {
|
||||||
|
return possible_regions_[ComputeRegionId(reinterpret_cast<uptr>(p))] - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr GetActuallyAllocatedSize(void *p) {
|
||||||
|
CHECK(PointerIsMine(p));
|
||||||
|
return SizeClassMap::Size(GetSizeClass(p));
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr TotalMemoryUsed() {
|
||||||
|
// No need to lock here.
|
||||||
|
uptr res = 0;
|
||||||
|
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||||
|
if (possible_regions_[i])
|
||||||
|
res += kRegionSize;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestOnlyUnmap() {
|
||||||
|
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||||
|
if (possible_regions_[i])
|
||||||
|
UnmapOrDie(reinterpret_cast<void*>(i * kRegionSize), kRegionSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef SizeClassMap SizeClassMapT;
|
||||||
|
static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 128
|
||||||
|
private:
|
||||||
|
static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
|
||||||
|
static const uptr kRegionSize = 1 << kRegionSizeLog;
|
||||||
|
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
|
||||||
|
COMPILER_CHECK(kNumClasses <= 128);
|
||||||
|
|
||||||
|
struct SizeClassInfo {
|
||||||
|
SpinMutex mutex;
|
||||||
|
AllocatorFreeList free_list;
|
||||||
|
char padding[kCacheLineSize - sizeof(uptr) - sizeof (AllocatorFreeList)];
|
||||||
|
};
|
||||||
|
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
|
||||||
|
|
||||||
|
uptr ComputeRegionId(uptr mem) {
|
||||||
|
uptr res = mem >> kRegionSizeLog;
|
||||||
|
CHECK_LT(res, kNumPossibleRegions);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr ComputeRegionBeg(uptr mem) {
|
||||||
|
return mem & ~(kRegionSize - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr AllocateRegion(uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
|
||||||
|
"SizeClassAllocator32"));
|
||||||
|
CHECK_EQ(0U, (res & (kRegionSize - 1)));
|
||||||
|
CHECK_EQ(0U, possible_regions_[ComputeRegionId(res)]);
|
||||||
|
possible_regions_[ComputeRegionId(res)] = class_id + 1;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
return &size_class_info_array_[class_id];
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) {
|
||||||
|
if (!sci->free_list.empty()) return;
|
||||||
|
uptr size = SizeClassMap::Size(class_id);
|
||||||
|
uptr reg = AllocateRegion(class_id);
|
||||||
|
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||||
|
for (uptr i = reg; i < reg + n_chunks * size; i += size)
|
||||||
|
sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *AllocateBySizeClass(uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||||
|
SpinMutexLock l(&sci->mutex);
|
||||||
|
EnsureSizeClassHasAvailableChunks(sci, class_id);
|
||||||
|
CHECK(!sci->free_list.empty());
|
||||||
|
AllocatorListNode *node = sci->free_list.front();
|
||||||
|
sci->free_list.pop_front();
|
||||||
|
return reinterpret_cast<void*>(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DeallocateBySizeClass(void *p, uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||||
|
SpinMutexLock l(&sci->mutex);
|
||||||
|
sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
|
||||||
|
}
|
||||||
|
|
||||||
|
u8 possible_regions_[kNumPossibleRegions];
|
||||||
|
SizeClassInfo size_class_info_array_[kNumClasses];
|
||||||
|
};
|
||||||
|
|
||||||
// Objects of this type should be used as local caches for SizeClassAllocator64.
|
// Objects of this type should be used as local caches for SizeClassAllocator64.
|
||||||
// Since the typical use of this class is to have one object per thread in TLS,
|
// Since the typical use of this class is to have one object per thread in TLS,
|
||||||
// is has to be POD.
|
// is has to be POD.
|
||||||
|
|
|
@ -23,14 +23,20 @@
|
||||||
#if SANITIZER_WORDSIZE == 64
|
#if SANITIZER_WORDSIZE == 64
|
||||||
static const uptr kAllocatorSpace = 0x700000000000ULL;
|
static const uptr kAllocatorSpace = 0x700000000000ULL;
|
||||||
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
|
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
|
||||||
|
static const u64 kAddressSpaceSize = 1ULL << 47;
|
||||||
|
|
||||||
typedef SizeClassAllocator64<
|
typedef SizeClassAllocator64<
|
||||||
kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
|
kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
|
||||||
|
|
||||||
typedef SizeClassAllocator64<
|
typedef SizeClassAllocator64<
|
||||||
kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
|
kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
|
||||||
|
#else
|
||||||
|
static const u64 kAddressSpaceSize = 1ULL << 32;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
typedef SizeClassAllocator32<
|
||||||
|
0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
|
||||||
|
|
||||||
template <class SizeClassMap>
|
template <class SizeClassMap>
|
||||||
void TestSizeClassMap() {
|
void TestSizeClassMap() {
|
||||||
typedef SizeClassMap SCMap;
|
typedef SizeClassMap SCMap;
|
||||||
|
@ -71,8 +77,8 @@ TEST(SanitizerCommon, CompactSizeClassMap) {
|
||||||
|
|
||||||
template <class Allocator>
|
template <class Allocator>
|
||||||
void TestSizeClassAllocator() {
|
void TestSizeClassAllocator() {
|
||||||
Allocator a;
|
Allocator *a = new Allocator;
|
||||||
a.Init();
|
a->Init();
|
||||||
|
|
||||||
static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
|
static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
|
||||||
50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
|
50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
|
||||||
|
@ -82,19 +88,19 @@ void TestSizeClassAllocator() {
|
||||||
uptr last_total_allocated = 0;
|
uptr last_total_allocated = 0;
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
// Allocate a bunch of chunks.
|
// Allocate a bunch of chunks.
|
||||||
for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) {
|
for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
|
||||||
uptr size = sizes[s];
|
uptr size = sizes[s];
|
||||||
if (!a.CanAllocate(size, 1)) continue;
|
if (!a->CanAllocate(size, 1)) continue;
|
||||||
// printf("s = %ld\n", size);
|
// printf("s = %ld\n", size);
|
||||||
uptr n_iter = std::max((uptr)2, 1000000 / size);
|
uptr n_iter = std::max((uptr)2, 1000000 / size);
|
||||||
for (uptr i = 0; i < n_iter; i++) {
|
for (uptr i = 0; i < n_iter; i++) {
|
||||||
void *x = a.Allocate(size, 1);
|
void *x = a->Allocate(size, 1);
|
||||||
allocated.push_back(x);
|
allocated.push_back(x);
|
||||||
CHECK(a.PointerIsMine(x));
|
CHECK(a->PointerIsMine(x));
|
||||||
CHECK_GE(a.GetActuallyAllocatedSize(x), size);
|
CHECK_GE(a->GetActuallyAllocatedSize(x), size);
|
||||||
uptr class_id = a.GetSizeClass(x);
|
uptr class_id = a->GetSizeClass(x);
|
||||||
CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
|
CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
|
||||||
uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
|
uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
|
||||||
metadata[0] = reinterpret_cast<uptr>(x) + 1;
|
metadata[0] = reinterpret_cast<uptr>(x) + 1;
|
||||||
metadata[1] = 0xABCD;
|
metadata[1] = 0xABCD;
|
||||||
}
|
}
|
||||||
|
@ -102,19 +108,20 @@ void TestSizeClassAllocator() {
|
||||||
// Deallocate all.
|
// Deallocate all.
|
||||||
for (uptr i = 0; i < allocated.size(); i++) {
|
for (uptr i = 0; i < allocated.size(); i++) {
|
||||||
void *x = allocated[i];
|
void *x = allocated[i];
|
||||||
uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
|
uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
|
||||||
CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
|
CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
|
||||||
CHECK_EQ(metadata[1], 0xABCD);
|
CHECK_EQ(metadata[1], 0xABCD);
|
||||||
a.Deallocate(x);
|
a->Deallocate(x);
|
||||||
}
|
}
|
||||||
allocated.clear();
|
allocated.clear();
|
||||||
uptr total_allocated = a.TotalMemoryUsed();
|
uptr total_allocated = a->TotalMemoryUsed();
|
||||||
if (last_total_allocated == 0)
|
if (last_total_allocated == 0)
|
||||||
last_total_allocated = total_allocated;
|
last_total_allocated = total_allocated;
|
||||||
CHECK_EQ(last_total_allocated, total_allocated);
|
CHECK_EQ(last_total_allocated, total_allocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
a.TestOnlyUnmap();
|
a->TestOnlyUnmap();
|
||||||
|
delete a;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if SANITIZER_WORDSIZE == 64
|
#if SANITIZER_WORDSIZE == 64
|
||||||
|
@ -127,6 +134,10 @@ TEST(SanitizerCommon, SizeClassAllocator64Compact) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
TEST(SanitizerCommon, SizeClassAllocator32Compact) {
|
||||||
|
TestSizeClassAllocator<Allocator32Compact>();
|
||||||
|
}
|
||||||
|
|
||||||
template <class Allocator>
|
template <class Allocator>
|
||||||
void SizeClassAllocator64MetadataStress() {
|
void SizeClassAllocator64MetadataStress() {
|
||||||
Allocator a;
|
Allocator a;
|
||||||
|
@ -181,7 +192,6 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
TEST(SanitizerCommon, LargeMmapAllocator) {
|
TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
fprintf(stderr, "xxxx %ld\n", 0L);
|
|
||||||
LargeMmapAllocator a;
|
LargeMmapAllocator a;
|
||||||
a.Init();
|
a.Init();
|
||||||
|
|
||||||
|
@ -190,7 +200,6 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
static const uptr size = 1000;
|
static const uptr size = 1000;
|
||||||
// Allocate some.
|
// Allocate some.
|
||||||
for (int i = 0; i < kNumAllocs; i++) {
|
for (int i = 0; i < kNumAllocs; i++) {
|
||||||
fprintf(stderr, "zzz0 %ld\n", size);
|
|
||||||
allocated[i] = a.Allocate(size, 1);
|
allocated[i] = a.Allocate(size, 1);
|
||||||
}
|
}
|
||||||
// Deallocate all.
|
// Deallocate all.
|
||||||
|
@ -205,7 +214,6 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
|
|
||||||
// Allocate some more, also add metadata.
|
// Allocate some more, also add metadata.
|
||||||
for (int i = 0; i < kNumAllocs; i++) {
|
for (int i = 0; i < kNumAllocs; i++) {
|
||||||
fprintf(stderr, "zzz1 %ld\n", size);
|
|
||||||
void *x = a.Allocate(size, 1);
|
void *x = a.Allocate(size, 1);
|
||||||
CHECK_GE(a.GetActuallyAllocatedSize(x), size);
|
CHECK_GE(a.GetActuallyAllocatedSize(x), size);
|
||||||
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
|
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
|
||||||
|
@ -227,7 +235,6 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
|
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
|
||||||
for (int i = 0; i < kNumAllocs; i++) {
|
for (int i = 0; i < kNumAllocs; i++) {
|
||||||
uptr size = ((i % 10) + 1) * 4096;
|
uptr size = ((i % 10) + 1) * 4096;
|
||||||
fprintf(stderr, "zzz1 %ld %ld\n", size, alignment);
|
|
||||||
allocated[i] = a.Allocate(size, alignment);
|
allocated[i] = a.Allocate(size, alignment);
|
||||||
CHECK_EQ(0, (uptr)allocated[i] % alignment);
|
CHECK_EQ(0, (uptr)allocated[i] % alignment);
|
||||||
char *p = (char*)allocated[i];
|
char *p = (char*)allocated[i];
|
||||||
|
|
Loading…
Reference in New Issue