[sanitizer] make LargeMmapAllocator::GetBlockBegin faster by not using a linked list

llvm-svn: 171035
This commit is contained in:
Kostya Serebryany 2012-12-24 14:35:14 +00:00
parent 19969e5045
commit fc7de2910c
2 changed files with 48 additions and 28 deletions

View File

@ -714,11 +714,9 @@ class LargeMmapAllocator {
h->map_size = map_size;
{
SpinMutexLock l(&mutex_);
h->next = list_;
h->prev = 0;
if (list_)
list_->prev = h;
list_ = h;
uptr idx = n_chunks_++;
h->chunk_idx = idx;
chunks_[idx] = h;
}
return reinterpret_cast<void*>(res);
}
@ -727,14 +725,12 @@ class LargeMmapAllocator {
Header *h = GetHeader(p);
{
SpinMutexLock l(&mutex_);
Header *prev = h->prev;
Header *next = h->next;
if (prev)
prev->next = next;
if (next)
next->prev = prev;
if (h == list_)
list_ = next;
uptr idx = h->chunk_idx;
CHECK_EQ(chunks_[idx], h);
CHECK_LT(idx, n_chunks_);
chunks_[idx] = chunks_[n_chunks_ - 1];
chunks_[idx]->chunk_idx = idx;
n_chunks_--;
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
@ -743,8 +739,10 @@ class LargeMmapAllocator {
uptr TotalMemoryUsed() {
SpinMutexLock l(&mutex_);
uptr res = 0;
for (Header *l = list_; l; l = l->next) {
res += RoundUpMapSize(l->size);
for (uptr i = 0; i < n_chunks_; i++) {
Header *h = chunks_[i];
CHECK_EQ(h->chunk_idx, i);
res += RoundUpMapSize(h->size);
}
return res;
}
@ -765,20 +763,32 @@ class LargeMmapAllocator {
void *GetBlockBegin(void *ptr) {
uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
for (Header *l = list_; l; l = l->next) {
if (p >= l->map_beg && p < l->map_beg + l->map_size)
return GetUser(l);
uptr nearest_chunk = 0;
// Cache-friendly linear search.
for (uptr i = 0; i < n_chunks_; i++) {
uptr ch = reinterpret_cast<uptr>(chunks_[i]);
if (p < ch) continue; // p is at left to this chunk, skip it.
if (p - ch < p - nearest_chunk)
nearest_chunk = ch;
}
return 0;
if (!nearest_chunk)
return 0;
Header *h = reinterpret_cast<Header *>(nearest_chunk);
CHECK_GE(nearest_chunk, h->map_beg);
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
CHECK_LE(nearest_chunk, p);
if (h->map_beg + h->map_size < p)
return 0;
return GetUser(h);
}
private:
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
struct Header {
uptr map_beg;
uptr map_size;
uptr size;
Header *next;
Header *prev;
uptr chunk_idx;
};
Header *GetHeader(uptr p) {
@ -797,7 +807,8 @@ class LargeMmapAllocator {
}
uptr page_size_;
Header *list_;
Header *chunks_[kMaxNumChunks];
uptr n_chunks_;
SpinMutex mutex_;
};

View File

@ -244,12 +244,13 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
LargeMmapAllocator<> a;
a.Init();
static const int kNumAllocs = 100;
static const int kNumAllocs = 1000;
char *allocated[kNumAllocs];
static const uptr size = 1000;
static const uptr size = 4000;
// Allocate some.
for (int i = 0; i < kNumAllocs; i++) {
allocated[i] = (char *)a.Allocate(size, 1);
CHECK(a.PointerIsMine(allocated[i]));
}
// Deallocate all.
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
@ -269,6 +270,11 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
*meta = i;
allocated[i] = x;
}
for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
char *p = allocated[i % kNumAllocs];
CHECK(a.PointerIsMine(p));
CHECK(a.PointerIsMine(p + 2000));
}
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
// Deallocate all in reverse order.
for (int i = 0; i < kNumAllocs; i++) {
@ -280,9 +286,12 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
a.Deallocate(p);
}
CHECK_EQ(a.TotalMemoryUsed(), 0);
// Test alignments.
uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
for (int i = 0; i < kNumAllocs; i++) {
const uptr kNumAlignedAllocs = 100;
for (int i = 0; i < kNumAlignedAllocs; i++) {
uptr size = ((i % 10) + 1) * 4096;
char *p = allocated[i] = (char *)a.Allocate(size, alignment);
CHECK_EQ(p, a.GetBlockBegin(p));
@ -291,7 +300,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
CHECK_EQ(0, (uptr)allocated[i] % alignment);
p[0] = p[size - 1] = 0;
}
for (int i = 0; i < kNumAllocs; i++) {
for (int i = 0; i < kNumAlignedAllocs; i++) {
a.Deallocate(allocated[i]);
}
}
@ -381,7 +390,7 @@ void TestSizeClassAllocatorLocalCache() {
const int kNumIter = 100;
uptr saved_total = 0;
for (int class_id = 1; class_id <= 5; class_id++) {
for (int i = 0; i < kNumIter; i++) {
for (int it = 0; it < kNumIter; it++) {
void *allocated[kNumAllocs];
for (uptr i = 0; i < kNumAllocs; i++) {
allocated[i] = cache.Allocate(a, class_id);
@ -391,7 +400,7 @@ void TestSizeClassAllocatorLocalCache() {
}
cache.Drain(a);
uptr total_allocated = a->TotalMemoryUsed();
if (i)
if (it)
CHECK_EQ(saved_total, total_allocated);
saved_total = total_allocated;
}