Change StackDepot interface to use StackTrace more extensively

llvm-svn: 220637
This commit is contained in:
Alexey Samsonov 2014-10-26 06:23:07 +00:00
parent 6e7af8156f
commit 3741ab82ba
15 changed files with 93 additions and 104 deletions

View File

@ -354,7 +354,7 @@ static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
meta[1] = chunk_beg;
}
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
m->alloc_context_id = StackDepotPut(*stack);
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
@ -423,7 +423,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(stack->trace, stack->size);
m->free_context_id = StackDepotPut(*stack);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),

View File

@ -217,7 +217,7 @@ using namespace __asan; // NOLINT
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
GET_STACK_TRACE_FATAL_HERE;
u32 stack_id = StackDepotPut(stack.trace, stack.size);
u32 stack_id = StackDepotPut(stack);
BlockingMutexLock lock(&mu_for_globals);
if (!global_registration_site_vector)
global_registration_site_vector =

View File

@ -30,7 +30,7 @@ namespace __asan {
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack)
stack_id = StackDepotPut(args->stack->trace, args->stack->size);
stack_id = StackDepotPut(*args->stack);
thread = args->thread;
thread->set_context(this);
}

View File

@ -63,7 +63,7 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
ChunkMetadata *m = Metadata(p);
CHECK(m);
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
m->stack_trace_id = StackDepotPut(stack);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
}

View File

@ -371,8 +371,8 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
u32 stack_trace_id = 0;
if (resolution > 0) {
StackTrace stack = StackDepotGet(m.stack_trace_id());
uptr size = Min(stack.size, resolution);
stack_trace_id = StackDepotPut(stack.trace, size);
stack.size = Min(stack.size, resolution);
stack_trace_id = StackDepotPut(stack);
} else {
stack_trace_id = m.stack_trace_id();
}

View File

@ -94,11 +94,10 @@ void ProcessGlobalRegions(Frontier *frontier) {
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
CHECK(stack_id);
uptr size = 0;
const uptr *trace = map->Get(stack_id, &size);
StackTrace stack = map->Get(stack_id);
// The top frame is our malloc/calloc/etc. The next frame is the caller.
if (size >= 2)
return trace[1];
if (stack.size >= 2)
return stack.trace[1];
return 0;
}

View File

@ -280,7 +280,7 @@ u32 ChainOrigin(u32 id, StackTrace *stack) {
}
}
StackDepotHandle h = StackDepotPut_WithHandle(stack->trace, stack->size);
StackDepotHandle h = StackDepotPut_WithHandle(*stack);
if (!h.valid()) return id;
if (flags()->origin_history_per_stack_limit > 0) {

View File

@ -102,7 +102,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
} else if (flags()->poison_in_malloc) {
__msan_poison(allocated, size);
if (__msan_get_track_origins()) {
u32 stack_id = StackDepotPut(stack->trace, stack->size);
u32 stack_id = StackDepotPut(*stack);
CHECK(stack_id);
u32 id;
ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
@ -125,7 +125,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
if (flags()->poison_in_free) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
u32 stack_id = StackDepotPut(stack->trace, stack->size);
u32 stack_id = StackDepotPut(*stack);
CHECK(stack_id);
u32 id;
ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);

View File

@ -842,7 +842,7 @@ void __msan_allocated_memory(const void* data, uptr size) {
if (flags()->poison_in_malloc)
__msan_poison(data, size);
if (__msan_get_track_origins()) {
u32 stack_id = StackDepotPut(stack.trace, stack.size);
u32 stack_id = StackDepotPut(stack);
u32 id;
ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
__msan_set_origin(data, size, Origin(id, 1).raw_id());

View File

@ -18,32 +18,6 @@
namespace __sanitizer {
// FIXME: Get rid of this class in favor of StackTrace.
struct StackDepotDesc {
const uptr *stack;
uptr size;
u32 hash() const {
// murmur2
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
u32 h = seed ^ (size * sizeof(uptr));
for (uptr i = 0; i < size; i++) {
u32 k = stack[i];
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
bool is_valid() { return size > 0 && stack; }
};
struct StackDepotNode {
StackDepotNode *link;
u32 id;
@ -59,14 +33,14 @@ struct StackDepotNode {
static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
static const u32 kHashMask = ~kUseCountMask;
typedef StackDepotDesc args_type;
typedef StackTrace args_type;
bool eq(u32 hash, const args_type &args) const {
u32 hash_bits =
atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
if ((hash & kHashMask) != hash_bits || args.size != size) return false;
uptr i = 0;
for (; i < size; i++) {
if (stack[i] != args.stack[i]) return false;
if (stack[i] != args.trace[i]) return false;
}
return true;
}
@ -76,11 +50,10 @@ struct StackDepotNode {
void store(const args_type &args, u32 hash) {
atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
size = args.size;
internal_memcpy(stack, args.stack, size * sizeof(uptr));
internal_memcpy(stack, args.trace, size * sizeof(uptr));
}
args_type load() const {
args_type ret = {&stack[0], size};
return ret;
return args_type(&stack[0], size);
}
StackDepotHandle get_handle() { return StackDepotHandle(this); }
@ -100,8 +73,6 @@ void StackDepotHandle::inc_use_count_unsafe() {
StackDepotNode::kUseCountMask;
CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
}
uptr StackDepotHandle::size() { return node_->size; }
uptr *StackDepotHandle::stack() { return &node_->stack[0]; }
// FIXME(dvyukov): this single reserved bit is used in TSan.
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
@ -112,20 +83,17 @@ StackDepotStats *StackDepotGetStats() {
return theDepot.GetStats();
}
u32 StackDepotPut(const uptr *stack, uptr size) {
StackDepotDesc desc = {stack, size};
StackDepotHandle h = theDepot.Put(desc);
u32 StackDepotPut(StackTrace stack) {
StackDepotHandle h = theDepot.Put(stack);
return h.valid() ? h.id() : 0;
}
StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) {
StackDepotDesc desc = {stack, size};
return theDepot.Put(desc);
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
return theDepot.Put(stack);
}
StackTrace StackDepotGet(u32 id) {
StackDepotDesc desc = theDepot.Get(id);
return StackTrace(desc.stack, desc.size);
return theDepot.Get(id);
}
void StackDepotLockAll() {
@ -156,18 +124,15 @@ StackDepotReverseMap::StackDepotReverseMap()
InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
}
const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
if (!map_.size()) return 0;
StackTrace StackDepotReverseMap::Get(u32 id) {
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, 0};
uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
IdDescPair::IdComparator);
if (idx > map_.size()) {
*size = 0;
return 0;
}
StackDepotNode *desc = map_[idx].desc;
*size = desc->size;
return desc->stack;
if (idx > map_.size())
return StackTrace();
return map_[idx].desc->load();
}
} // namespace __sanitizer

View File

@ -29,16 +29,13 @@ struct StackDepotHandle {
u32 id();
int use_count();
void inc_use_count_unsafe();
uptr size();
uptr *stack();
};
const int kStackDepotMaxUseCount = 1U << 20;
StackDepotStats *StackDepotGetStats();
// FIXME: Pass StackTrace as an input argument here.
u32 StackDepotPut(const uptr *stack, uptr size);
StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size);
u32 StackDepotPut(StackTrace stack);
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
// Retrieves a stored stack trace by the id.
StackTrace StackDepotGet(u32 id);
@ -52,7 +49,7 @@ void StackDepotUnlockAll();
class StackDepotReverseMap {
public:
StackDepotReverseMap();
const uptr *Get(u32 id, uptr *size);
StackTrace Get(u32 id);
private:
struct IdDescPair {

View File

@ -33,11 +33,33 @@ struct StackTrace {
const uptr *trace;
uptr size;
StackTrace() : trace(nullptr), size(0) {}
StackTrace(const uptr *trace, uptr size) : trace(trace), size(size) {}
// Prints a symbolized stacktrace, followed by an empty line.
void Print() const;
u32 hash() const {
// murmur2
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
u32 h = seed ^ (size * sizeof(uptr));
for (uptr i = 0; i < size; i++) {
u32 k = trace[i];
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
bool is_valid() const { return size > 0 && trace; }
static bool WillUseFastUnwind(bool request_fast_unwind) {
// Check if fast unwind is available. Fast unwind is the only option on Mac.
// It is also the only option on FreeBSD as the slow unwinding that

View File

@ -18,12 +18,13 @@
namespace __sanitizer {
TEST(SanitizerCommon, StackDepotBasic) {
uptr s1[] = {1, 2, 3, 4, 5};
u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
uptr array[] = {1, 2, 3, 4, 5};
StackTrace s1(array, ARRAY_SIZE(array));
u32 i1 = StackDepotPut(s1);
StackTrace stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(s1), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, s1, sizeof(s1)));
EXPECT_EQ(ARRAY_SIZE(array), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));
}
TEST(SanitizerCommon, StackDepotAbsent) {
@ -32,7 +33,7 @@ TEST(SanitizerCommon, StackDepotAbsent) {
}
TEST(SanitizerCommon, StackDepotEmptyStack) {
u32 i1 = StackDepotPut(0, 0);
u32 i1 = StackDepotPut(StackTrace());
StackTrace stack = StackDepotGet(i1);
EXPECT_EQ((uptr*)0, stack.trace);
}
@ -43,44 +44,49 @@ TEST(SanitizerCommon, StackDepotZeroId) {
}
TEST(SanitizerCommon, StackDepotSame) {
uptr s1[] = {1, 2, 3, 4, 6};
u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
u32 i2 = StackDepotPut(s1, ARRAY_SIZE(s1));
uptr array[] = {1, 2, 3, 4, 6};
StackTrace s1(array, ARRAY_SIZE(array));
u32 i1 = StackDepotPut(s1);
u32 i2 = StackDepotPut(s1);
EXPECT_EQ(i1, i2);
StackTrace stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(s1), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, s1, sizeof(s1)));
EXPECT_EQ(ARRAY_SIZE(array), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));
}
TEST(SanitizerCommon, StackDepotSeveral) {
uptr s1[] = {1, 2, 3, 4, 7};
u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
uptr s2[] = {1, 2, 3, 4, 8, 9};
u32 i2 = StackDepotPut(s2, ARRAY_SIZE(s2));
uptr array1[] = {1, 2, 3, 4, 7};
StackTrace s1(array1, ARRAY_SIZE(array1));
u32 i1 = StackDepotPut(s1);
uptr array2[] = {1, 2, 3, 4, 8, 9};
StackTrace s2(array2, ARRAY_SIZE(array2));
u32 i2 = StackDepotPut(s2);
EXPECT_NE(i1, i2);
}
TEST(SanitizerCommon, StackDepotReverseMap) {
uptr s1[] = {1, 2, 3, 4, 5};
uptr s2[] = {7, 1, 3, 0};
uptr s3[] = {10, 2, 5, 3};
uptr s4[] = {1, 3, 2, 5};
uptr array1[] = {1, 2, 3, 4, 5};
uptr array2[] = {7, 1, 3, 0};
uptr array3[] = {10, 2, 5, 3};
uptr array4[] = {1, 3, 2, 5};
u32 ids[4] = {0};
ids[0] = StackDepotPut(s1, ARRAY_SIZE(s1));
ids[1] = StackDepotPut(s2, ARRAY_SIZE(s2));
ids[2] = StackDepotPut(s3, ARRAY_SIZE(s3));
ids[3] = StackDepotPut(s4, ARRAY_SIZE(s4));
StackTrace s1(array1, ARRAY_SIZE(array1));
StackTrace s2(array2, ARRAY_SIZE(array2));
StackTrace s3(array3, ARRAY_SIZE(array3));
StackTrace s4(array4, ARRAY_SIZE(array4));
ids[0] = StackDepotPut(s1);
ids[1] = StackDepotPut(s2);
ids[2] = StackDepotPut(s3);
ids[3] = StackDepotPut(s4);
StackDepotReverseMap map;
for (uptr i = 0; i < 4; i++) {
uptr sz_map;
const uptr *sp_map;
StackTrace stack = StackDepotGet(ids[i]);
sp_map = map.Get(ids[i], &sz_map);
EXPECT_EQ(stack.size, sz_map);
EXPECT_EQ(stack.trace, sp_map);
StackTrace from_map = map.Get(ids[i]);
EXPECT_EQ(stack.size, from_map.size);
EXPECT_EQ(stack.trace, from_map.trace);
}
}

View File

@ -19,13 +19,13 @@ namespace __dsan {
static Context *ctx;
static u32 CurrentStackTrace(Thread *thr, uptr skip) {
BufferedStackTrace trace;
BufferedStackTrace stack;
thr->ignore_interceptors = true;
trace.Unwind(1000, 0, 0, 0, 0, 0, false);
stack.Unwind(1000, 0, 0, 0, 0, 0, false);
thr->ignore_interceptors = false;
if (trace.size <= skip)
if (stack.size <= skip)
return 0;
return StackDepotPut(trace.trace + skip, trace.size - skip);
return StackDepotPut(StackTrace(stack.trace + skip, stack.size - skip));
}
static void PrintStackTrace(Thread *thr, u32 stk) {

View File

@ -462,8 +462,8 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
u32 id = StackDepotPut(thr->shadow_stack,
thr->shadow_stack_pos - thr->shadow_stack);
u32 id = StackDepotPut(__sanitizer::StackTrace(
thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
if (pc != 0)
thr->shadow_stack_pos--;
return id;