tsan: increase max shadow stack size + reduce memory consumption at the same time (by not memorizing full stacks in traces)

llvm-svn: 163322
This commit is contained in:
Dmitry Vyukov 2012-09-06 15:18:14 +00:00
parent fb3cdd83b0
commit c87e7280b8
7 changed files with 99 additions and 10 deletions

View File

@ -29,7 +29,8 @@ const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 43;
#ifndef TSAN_GO
const int kShadowStackSize = 1024;
const int kShadowStackSize = 4*1024;
const int kTraceStackSize = 256;
#endif
#ifdef TSAN_SHADOW_COUNT

View File

@ -146,7 +146,7 @@ void PrintReport(const ReportDesc *rep) {
#else
static void PrintStack(const ReportStack *ent) {
void PrintStack(const ReportStack *ent) {
for (int i = 0; ent; ent = ent->next, i++) {
TsanPrintf(" %s()\n %s:%d +0x%zx\n",
ent->func, ent->file, ent->line, (void*)ent->offset);

View File

@ -230,6 +230,7 @@ int Finalize(ThreadState *thr) {
return failed ? flags()->exitcode : 0;
}
#ifndef TSAN_GO
u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
return 0;
@ -243,6 +244,7 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos--;
return id;
}
#endif
void TraceSwitch(ThreadState *thr) {
thr->nomalloc++;

View File

@ -85,9 +85,9 @@ static void StackStripMain(ReportStack *stack) {
} else if (last || last2) {
// Ensure that we recovered stack completely. Trimmed stack
// can actually happen if we do not instrument some code,
// so it's only a DCHECK. However we must try hard to not miss it
// so it's only a debug print. However we must try hard to not miss it
// due to our fault.
TsanPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
}
#else
if (last && 0 == internal_strcmp(last, "schedunlock"))
@ -163,6 +163,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->stack = SymbolizeStack(tctx->creation_stack);
}
#ifndef TSAN_GO
static ThreadContext *FindThread(int unique_id) {
CTX()->thread_mtx.CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
@ -173,6 +174,7 @@ static ThreadContext *FindThread(int unique_id) {
}
return 0;
}
#endif
void ScopedReport::AddMutex(const SyncVar *s) {
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
@ -230,6 +232,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
}
}
#ifndef TSAN_GO
void ScopedReport::AddSleep(u32 stack_id) {
uptr ssz = 0;
const uptr *stack = StackDepotGet(stack_id, &ssz);
@ -239,6 +242,7 @@ void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStack(trace);
}
}
#endif
const ReportDesc *ScopedReport::GetReport() const {
return rep_;
@ -285,8 +289,6 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
} else if (typ == EventTypeFuncEnter) {
stack[pos++] = pc;
} else if (typ == EventTypeFuncExit) {
// Since we have full stacks, this should never happen.
DCHECK_GT(pos, 0);
if (pos > 0)
pos--;
}

View File

@ -236,15 +236,19 @@ void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
n_ = thr->shadow_stack_pos - thr->shadow_stack;
if (n_ + !!toppc == 0)
return;
uptr start = 0;
if (c_) {
CHECK_NE(s_, 0);
CHECK_LE(n_ + !!toppc, c_);
if (n_ + !!toppc > c_) {
start = n_ - c_ + !!toppc;
n_ = c_ - !!toppc;
}
} else {
s_ = (uptr*)internal_alloc(MBlockStackTrace,
(n_ + !!toppc) * sizeof(s_[0]));
}
for (uptr i = 0; i < n_; i++)
s_[i] = thr->shadow_stack[i];
s_[i] = thr->shadow_stack[start + i];
if (toppc) {
s_[n_] = toppc;
n_++;

View File

@ -47,12 +47,12 @@ struct TraceHeader {
StackTrace stack0; // Start stack for the trace.
u64 epoch0; // Start epoch for the trace.
#ifndef TSAN_GO
uptr stack0buf[kShadowStackSize];
uptr stack0buf[kTraceStackSize];
#endif
TraceHeader()
#ifndef TSAN_GO
: stack0(stack0buf, kShadowStackSize)
: stack0(stack0buf, kTraceStackSize)
#else
: stack0()
#endif

View File

@ -0,0 +1,80 @@
//===-- tsan_stack_test.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_sync.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
#include <string.h>
namespace __tsan {
static void TestStackTrace(StackTrace *trace) {
ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0);
trace->ObtainCurrent(&thr, 0);
EXPECT_EQ(trace->Size(), (uptr)0);
trace->ObtainCurrent(&thr, 42);
EXPECT_EQ(trace->Size(), (uptr)1);
EXPECT_EQ(trace->Get(0), (uptr)42);
*thr.shadow_stack_pos++ = 100;
*thr.shadow_stack_pos++ = 101;
trace->ObtainCurrent(&thr, 0);
EXPECT_EQ(trace->Size(), (uptr)2);
EXPECT_EQ(trace->Get(0), (uptr)100);
EXPECT_EQ(trace->Get(1), (uptr)101);
trace->ObtainCurrent(&thr, 42);
EXPECT_EQ(trace->Size(), (uptr)3);
EXPECT_EQ(trace->Get(0), (uptr)100);
EXPECT_EQ(trace->Get(1), (uptr)101);
EXPECT_EQ(trace->Get(2), (uptr)42);
}
TEST(StackTrace, Basic) {
ScopedInRtl in_rtl;
StackTrace trace;
TestStackTrace(&trace);
}
TEST(StackTrace, StaticBasic) {
ScopedInRtl in_rtl;
uptr buf[10];
StackTrace trace1(buf, 10);
TestStackTrace(&trace1);
StackTrace trace2(buf, 3);
TestStackTrace(&trace2);
}
TEST(StackTrace, StaticTrim) {
ScopedInRtl in_rtl;
uptr buf[2];
StackTrace trace(buf, 2);
ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0);
*thr.shadow_stack_pos++ = 100;
*thr.shadow_stack_pos++ = 101;
*thr.shadow_stack_pos++ = 102;
trace.ObtainCurrent(&thr, 0);
EXPECT_EQ(trace.Size(), (uptr)2);
EXPECT_EQ(trace.Get(0), (uptr)101);
EXPECT_EQ(trace.Get(1), (uptr)102);
trace.ObtainCurrent(&thr, 42);
EXPECT_EQ(trace.Size(), (uptr)2);
EXPECT_EQ(trace.Get(0), (uptr)102);
EXPECT_EQ(trace.Get(1), (uptr)42);
}
} // namespace __tsan