[tsan] add coarse-grained lock around the DeadlockDetector. We can do better than that, but that's a start.

llvm-svn: 201861
This commit is contained in:
Kostya Serebryany 2014-02-21 15:07:18 +00:00
parent 030237d0ff
commit 0548c79859
7 changed files with 74 additions and 25 deletions

View File

@ -31,6 +31,7 @@ enum MutexType {
MutexTypeAtExit, MutexTypeAtExit,
MutexTypeMBlock, MutexTypeMBlock,
MutexTypeJavaMBlock, MutexTypeJavaMBlock,
MutexTypeDeadlockDetector,
// This must be the last. // This must be the last.
MutexTypeCount MutexTypeCount

View File

@ -82,7 +82,9 @@ Context::Context()
CreateThreadContext, kMaxTid, kThreadQuarantineSize)) CreateThreadContext, kMaxTid, kThreadQuarantineSize))
, racy_stacks(MBlockRacyStacks) , racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses) , racy_addresses(MBlockRacyAddresses)
, fired_suppressions(8) { , fired_suppressions(8)
, dd_mtx(MutexTypeDeadlockDetector, StatMtxDeadlockDetector) {
dd.clear();
} }
// The objects are allocated in TLS, so one may rely on zero-initialization. // The objects are allocated in TLS, so one may rely on zero-initialization.

View File

@ -549,6 +549,9 @@ struct Context {
// Number of fired suppressions may be large enough. // Number of fired suppressions may be large enough.
InternalMmapVector<FiredSuppression> fired_suppressions; InternalMmapVector<FiredSuppression> fired_suppressions;
__sanitizer::DeadlockDetector<DDBV> dd;
Mutex dd_mtx;
Flags flags; Flags flags;
u64 stat[StatCnt]; u64 stat[StatCnt];

View File

@ -22,11 +22,10 @@
namespace __tsan { namespace __tsan {
static __sanitizer::DeadlockDetector<DDBV> g_dd;
static void EnsureDeadlockDetectorID(ThreadState *thr, SyncVar *s) { static void EnsureDeadlockDetectorID(Context *ctx, SyncVar *s) {
if (!g_dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id)) if (!ctx->dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
s->deadlock_detector_id = g_dd.newNode(reinterpret_cast<uptr>(s)); s->deadlock_detector_id = ctx->dd.newNode(reinterpret_cast<uptr>(s));
} }
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
@ -61,8 +60,9 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
if (s == 0) if (s == 0)
return; return;
if (common_flags()->detect_deadlocks) { if (common_flags()->detect_deadlocks) {
if (g_dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id)) Lock lk(&ctx->dd_mtx);
g_dd.removeNode(s->deadlock_detector_id); if (ctx->dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
ctx->dd.removeNode(s->deadlock_detector_id);
s->deadlock_detector_id = 0; s->deadlock_detector_id = 0;
} }
if (IsAppMem(addr)) { if (IsAppMem(addr)) {
@ -92,11 +92,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
} }
void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
Context *ctx = CTX();
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec); DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
CHECK_GT(rec, 0); CHECK_GT(rec, 0);
if (IsAppMem(addr)) if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1); MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch(); thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
if (s->owner_tid == SyncVar::kInvalidTid) { if (s->owner_tid == SyncVar::kInvalidTid) {
@ -119,24 +120,25 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
s->recursion += rec; s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
if (common_flags()->detect_deadlocks) { if (common_flags()->detect_deadlocks) {
EnsureDeadlockDetectorID(thr, s); Lock lk(&ctx->dd_mtx);
if (g_dd.isHeld(&thr->deadlock_detector_tls, s->deadlock_detector_id)) { EnsureDeadlockDetectorID(ctx, s);
if (ctx->dd.isHeld(&thr->deadlock_detector_tls, s->deadlock_detector_id)) {
// FIXME: add tests, handle the real recursive locks. // FIXME: add tests, handle the real recursive locks.
Printf("ThreadSanitizer: reursive-lock\n"); Printf("ThreadSanitizer: reursive-lock\n");
} }
// Printf("MutexLock: %zx\n", s->deadlock_detector_id); // Printf("MutexLock: %zx\n", s->deadlock_detector_id);
bool has_deadlock = bool has_deadlock =
g_dd.onLock(&thr->deadlock_detector_tls, s->deadlock_detector_id); ctx->dd.onLock(&thr->deadlock_detector_tls, s->deadlock_detector_id);
if (has_deadlock) { if (has_deadlock) {
uptr path[10]; uptr path[10];
uptr len = g_dd.findPathToHeldLock(&thr->deadlock_detector_tls, uptr len = ctx->dd.findPathToHeldLock(&thr->deadlock_detector_tls,
s->deadlock_detector_id, path, s->deadlock_detector_id, path,
ARRAY_SIZE(path)); ARRAY_SIZE(path));
CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that. CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
ThreadRegistryLock l(CTX()->thread_registry); ThreadRegistryLock l(CTX()->thread_registry);
ScopedReport rep(ReportTypeDeadlock); ScopedReport rep(ReportTypeDeadlock);
for (uptr i = 0; i < len; i++) for (uptr i = 0; i < len; i++)
rep.AddMutex(reinterpret_cast<SyncVar*>(g_dd.getData(path[i]))); rep.AddMutex(reinterpret_cast<SyncVar*>(ctx->dd.getData(path[i])));
StackTrace trace; StackTrace trace;
trace.ObtainCurrent(thr, pc); trace.ObtainCurrent(thr, pc);
rep.AddStack(&trace); rep.AddStack(&trace);
@ -147,10 +149,11 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
} }
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
Context *ctx = CTX();
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all); DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr)) if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1); MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch(); thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0; int rec = 0;
@ -180,9 +183,10 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
} }
thr->mset.Del(s->GetId(), true); thr->mset.Del(s->GetId(), true);
if (common_flags()->detect_deadlocks) { if (common_flags()->detect_deadlocks) {
EnsureDeadlockDetectorID(thr, s); Lock lk(&ctx->dd_mtx);
EnsureDeadlockDetectorID(ctx, s);
// Printf("MutexUnlock: %zx\n", s->deadlock_detector_id); // Printf("MutexUnlock: %zx\n", s->deadlock_detector_id);
g_dd.onUnlock(&thr->deadlock_detector_tls, ctx->dd.onUnlock(&thr->deadlock_detector_tls,
s->deadlock_detector_id); s->deadlock_detector_id);
} }
s->mtx.Unlock(); s->mtx.Unlock();

View File

@ -145,6 +145,7 @@ void StatOutput(u64 *stat) {
name[StatMtxAnnotations] = " Annotations "; name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock "; name[StatMtxMBlock] = " MBlock ";
name[StatMtxJavaMBlock] = " JavaMBlock "; name[StatMtxJavaMBlock] = " JavaMBlock ";
name[StatMtxDeadlockDetector] = " DeadlockDetector ";
name[StatMtxFD] = " FD "; name[StatMtxFD] = " FD ";
Printf("Statistics:\n"); Printf("Statistics:\n");

View File

@ -143,6 +143,7 @@ enum StatType {
StatMtxAtExit, StatMtxAtExit,
StatMtxMBlock, StatMtxMBlock,
StatMtxJavaMBlock, StatMtxJavaMBlock,
StatMtxDeadlockDetector,
StatMtxFD, StatMtxFD,
// This must be the last. // This must be the last.

View File

@ -44,8 +44,8 @@ class LockTest {
// CHECK: Starting Test1 // CHECK: Starting Test1
fprintf(stderr, "Expecting lock inversion: %p %p\n", A(0), A(1)); fprintf(stderr, "Expecting lock inversion: %p %p\n", A(0), A(1));
// CHECK: Expecting lock inversion: [[A1:0x[a-f0-9]*]] [[A2:0x[a-f0-9]*]] // CHECK: Expecting lock inversion: [[A1:0x[a-f0-9]*]] [[A2:0x[a-f0-9]*]]
L(0); L(1); U(0); U(1); Lock_0_1();
L(1); L(0); U(0); U(1); Lock_1_0();
// CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) // CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock)
// CHECK: path: [[M1:M[0-9]+]] => [[M2:M[0-9]+]] => [[M1]] // CHECK: path: [[M1:M[0-9]+]] => [[M2:M[0-9]+]] => [[M1]]
// CHECK: Mutex [[M1]] ([[A1]]) created at: // CHECK: Mutex [[M1]] ([[A1]]) created at:
@ -59,9 +59,9 @@ class LockTest {
// CHECK: Starting Test2 // CHECK: Starting Test2
fprintf(stderr, "Expecting lock inversion: %p %p %p\n", A(0), A(1), A(2)); fprintf(stderr, "Expecting lock inversion: %p %p %p\n", A(0), A(1), A(2));
// CHECK: Expecting lock inversion: [[A1:0x[a-f0-9]*]] [[A2:0x[a-f0-9]*]] [[A3:0x[a-f0-9]*]] // CHECK: Expecting lock inversion: [[A1:0x[a-f0-9]*]] [[A2:0x[a-f0-9]*]] [[A3:0x[a-f0-9]*]]
L(0); L(1); U(0); U(1); Lock2(0, 1);
L(1); L(2); U(1); U(2); Lock2(1, 2);
L(2); L(0); U(0); U(2); Lock2(2, 0);
// CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) // CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock)
// CHECK: path: [[M1:M[0-9]+]] => [[M2:M[0-9]+]] => [[M3:M[0-9]+]] => [[M1]] // CHECK: path: [[M1:M[0-9]+]] => [[M2:M[0-9]+]] => [[M3:M[0-9]+]] => [[M1]]
// CHECK: Mutex [[M1]] ([[A1]]) created at: // CHECK: Mutex [[M1]] ([[A1]]) created at:
@ -76,11 +76,11 @@ class LockTest {
void Test3() { void Test3() {
fprintf(stderr, "Starting Test3\n"); fprintf(stderr, "Starting Test3\n");
// CHECK: Starting Test3 // CHECK: Starting Test3
L(0); L(1); U(0); U(1); Lock_0_1();
L(2); L(2);
CreateAndDestroyManyLocks(); CreateAndDestroyManyLocks();
U(2); U(2);
L(1); L(0); U(0); U(1); Lock_1_0();
// CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) // CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock)
// CHECK-NOT: WARNING: ThreadSanitizer: // CHECK-NOT: WARNING: ThreadSanitizer:
} }
@ -90,15 +90,27 @@ class LockTest {
void Test4() { void Test4() {
fprintf(stderr, "Starting Test4\n"); fprintf(stderr, "Starting Test4\n");
// CHECK: Starting Test4 // CHECK: Starting Test4
L(0); L(1); U(0); U(1); Lock_0_1();
L(2); L(2);
CreateLockUnlockAndDestroyManyLocks(); CreateLockUnlockAndDestroyManyLocks();
U(2); U(2);
L(1); L(0); U(0); U(1); Lock_1_0();
// CHECK-NOT: WARNING: ThreadSanitizer:
}
void Test5() {
fprintf(stderr, "Starting Test5\n");
// CHECK: Starting Test5
RunThreads(&LockTest::Lock_0_1, &LockTest::Lock_1_0);
// CHECK: WARNING: ThreadSanitizer: lock-order-inversion
// CHECK-NOT: WARNING: ThreadSanitizer: // CHECK-NOT: WARNING: ThreadSanitizer:
} }
private: private:
void Lock2(size_t l1, size_t l2) { L(l1); L(l2); U(l2); U(l1); }
void Lock_0_1() { Lock2(0, 1); }
void Lock_1_0() { Lock2(1, 0); }
void CreateAndDestroyManyLocks() { void CreateAndDestroyManyLocks() {
PaddedLock create_many_locks_but_never_acquire[kDeadlockGraphSize]; PaddedLock create_many_locks_but_never_acquire[kDeadlockGraphSize];
} }
@ -109,6 +121,30 @@ class LockTest {
many_locks[i].unlock(); many_locks[i].unlock();
} }
} }
// LockTest Member function callback.
struct CB {
void (LockTest::*f)();
LockTest *lt;
};
// Thread function with CB.
static void *Thread(void *param) {
CB *cb = (CB*)param;
(cb->lt->*cb->f)();
return NULL;
}
void RunThreads(void (LockTest::*f1)(), void (LockTest::*f2)()) {
const int kNumThreads = 2;
pthread_t t[kNumThreads];
CB cb[2] = {{f1, this}, {f2, this}};
for (int i = 0; i < kNumThreads; i++)
pthread_create(&t[i], 0, Thread, &cb[i]);
for (int i = 0; i < kNumThreads; i++)
pthread_join(t[i], 0);
}
static const size_t kDeadlockGraphSize = 4096; static const size_t kDeadlockGraphSize = 4096;
size_t n_; size_t n_;
PaddedLock *locks_; PaddedLock *locks_;
@ -119,6 +155,7 @@ int main() {
{ LockTest t(5); t.Test2(); } { LockTest t(5); t.Test2(); }
{ LockTest t(5); t.Test3(); } { LockTest t(5); t.Test3(); }
{ LockTest t(5); t.Test4(); } { LockTest t(5); t.Test4(); }
{ LockTest t(5); t.Test5(); }
fprintf(stderr, "DONE\n"); fprintf(stderr, "DONE\n");
// CHECK: DONE // CHECK: DONE
} }