[sanitizers] Fix check failure on dealloc from new thread

Summary:
Adds a test for this case, which was reduced from a chromium build of
WebKit's DumpRenderTree.

Reviewers: eugenis

CC: glider

Differential Revision: http://llvm-reviews.chandlerc.com/D495

llvm-svn: 176552
This commit is contained in:
Reid Kleckner 2013-03-06 14:54:08 +00:00
parent 105963d17c
commit 66c26e5e96
2 changed files with 49 additions and 2 deletions

View File

@ -342,6 +342,7 @@ class SizeClassAllocator64 {
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) { NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
RegionInfo *region = GetRegionInfo(class_id); RegionInfo *region = GetRegionInfo(class_id);
CHECK_GT(b->count, 0);
region->free_list.Push(b); region->free_list.Push(b);
region->n_freed += b->count; region->n_freed += b->count;
} }
@ -535,6 +536,7 @@ class SizeClassAllocator64 {
beg_idx += count * size; beg_idx += count * size;
if (beg_idx + count * size + size > region->mapped_user) if (beg_idx + count * size + size > region->mapped_user)
break; break;
CHECK_GT(b->count, 0);
region->free_list.Push(b); region->free_list.Push(b);
} }
return b; return b;
@ -620,6 +622,7 @@ class SizeClassAllocator32 {
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id); SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex); SpinMutexLock l(&sci->mutex);
CHECK_GT(b->count, 0);
sci->free_list.push_front(b); sci->free_list.push_front(b);
} }
@ -741,13 +744,16 @@ class SizeClassAllocator32 {
} }
b->batch[b->count++] = (void*)i; b->batch[b->count++] = (void*)i;
if (b->count == max_count) { if (b->count == max_count) {
CHECK_GT(b->count, 0);
sci->free_list.push_back(b); sci->free_list.push_back(b);
b = 0; b = 0;
} }
} }
if (b) if (b) {
CHECK_GT(b->count, 0);
sci->free_list.push_back(b); sci->free_list.push_back(b);
} }
}
struct State { struct State {
u8 possible_regions[kNumPossibleRegions]; u8 possible_regions[kNumPossibleRegions];
@ -791,8 +797,12 @@ struct SizeClassAllocatorLocalCache {
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL); CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id)); stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id]; PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count)) if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id); Drain(allocator, class_id);
c->batch[c->count++] = p; c->batch[c->count++] = p;
@ -818,7 +828,7 @@ struct SizeClassAllocatorLocalCache {
AllocatorStats stats_; AllocatorStats stats_;
void InitCache() { void InitCache() {
if (per_class_[0].max_count) if (per_class_[1].max_count)
return; return;
for (uptr i = 0; i < kNumClasses; i++) { for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i]; PerClass *c = &per_class_[i];
@ -853,6 +863,7 @@ struct SizeClassAllocatorLocalCache {
} }
b->count = cnt; b->count = cnt;
c->count -= cnt; c->count -= cnt;
CHECK_GT(b->count, 0);
allocator->DeallocateBatch(&stats_, class_id, b); allocator->DeallocateBatch(&stats_, class_id, b);
} }
}; };

View File

@ -482,6 +482,42 @@ TEST(SanitizerCommon, AllocatorLeakTest) {
a.TestOnlyUnmap(); a.TestOnlyUnmap();
} }
// Struct which is allocated to pass info to new threads. The new thread frees
// it.
struct NewThreadParams {
AllocatorCache *thread_cache;
AllocatorCache::Allocator *allocator;
uptr class_id;
};
// Called in a new thread. Just frees its argument.
static void *DeallocNewThreadWorker(void *arg) {
NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
params->thread_cache->Deallocate(params->allocator, params->class_id, params);
return NULL;
}
// The allocator cache is supposed to be POD and zero initialized. We should be
// able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator, AllocatorCacheDeallocNewThread) {
AllocatorCache::Allocator allocator;
allocator.Init();
AllocatorCache main_cache;
AllocatorCache child_cache;
memset(&main_cache, 0, sizeof(main_cache));
memset(&child_cache, 0, sizeof(child_cache));
uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
main_cache.Allocate(&allocator, class_id));
params->thread_cache = &child_cache;
params->allocator = &allocator;
params->class_id = class_id;
pthread_t t;
EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
EXPECT_EQ(NULL, pthread_join(t, 0));
}
#endif #endif
TEST(Allocator, Basic) { TEST(Allocator, Basic) {