diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h index 76ca8611b619..30d503def36f 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h @@ -143,6 +143,18 @@ class DeadlockDetector { return is_reachable; } + // Handles the try_lock event, returns false. + // When a try_lock event happens (i.e. a try_lock call succeeds) we need + // to add this lock to the currently held locks, but we should not try to + // change the lock graph or to detect a cycle. We may want to investigate + // whether a more aggressive strategy is possible for try_lock. + bool onTryLock(DeadlockDetectorTLS *dtls, uptr cur_node) { + ensureCurrentEpoch(dtls); + uptr cur_idx = nodeToIndex(cur_node); + dtls->addLock(cur_idx, current_epoch_); + return false; + } + // Finds a path between the lock 'cur_node' (which is currently held in dtls) // and some other currently held lock, returns the length of the path // or 0 on failure. @@ -170,8 +182,13 @@ class DeadlockDetector { } uptr testOnlyGetEpoch() const { return current_epoch_; } + bool testOnlyHasEdge(uptr l1, uptr l2) { + return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2)); + } // idx1 and idx2 are raw indices to g_, not lock IDs. - bool testOnlyHasEdge(uptr idx1, uptr idx2) { return g_.hasEdge(idx1, idx2); } + bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) { + return g_.hasEdge(idx1, idx2); + } void Print() { for (uptr from = 0; from < size(); from++) diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc index cc3cbabced88..49b3bf1a3502 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc @@ -301,14 +301,41 @@ void RunCorrectEpochFlush() { EXPECT_EQ(d.testOnlyGetEpoch(), d.size() * 2); d.onLock(&dtls, l0); d.onLock(&dtls, l1); - EXPECT_TRUE(d.testOnlyHasEdge(0, 1)); - EXPECT_FALSE(d.testOnlyHasEdge(1, 0)); - EXPECT_FALSE(d.testOnlyHasEdge(3, 0)); - EXPECT_FALSE(d.testOnlyHasEdge(4, 0)); - EXPECT_FALSE(d.testOnlyHasEdge(5, 0)); + EXPECT_TRUE(d.testOnlyHasEdgeRaw(0, 1)); + EXPECT_FALSE(d.testOnlyHasEdgeRaw(1, 0)); + EXPECT_FALSE(d.testOnlyHasEdgeRaw(3, 0)); + EXPECT_FALSE(d.testOnlyHasEdgeRaw(4, 0)); + EXPECT_FALSE(d.testOnlyHasEdgeRaw(5, 0)); } TEST(DeadlockDetector, CorrectEpochFlush) { RunCorrectEpochFlush(); RunCorrectEpochFlush(); } + +template +void RunTryLockTest() { + ScopedDD sdd; + DeadlockDetector &d = *sdd.dp; + DeadlockDetectorTLS &dtls = sdd.dtls; + + uptr l0 = d.newNode(0); + uptr l1 = d.newNode(0); + uptr l2 = d.newNode(0); + EXPECT_FALSE(d.onLock(&dtls, l0)); + EXPECT_FALSE(d.onTryLock(&dtls, l1)); + EXPECT_FALSE(d.onLock(&dtls, l2)); + EXPECT_TRUE(d.isHeld(&dtls, l0)); + EXPECT_TRUE(d.isHeld(&dtls, l1)); + EXPECT_TRUE(d.isHeld(&dtls, l2)); + EXPECT_FALSE(d.testOnlyHasEdge(l0, l1)); + EXPECT_TRUE(d.testOnlyHasEdge(l1, l2)); + d.onUnlock(&dtls, l0); + d.onUnlock(&dtls, l1); + d.onUnlock(&dtls, l2); +} + +TEST(DeadlockDetector, TryLockTest) { + RunTryLockTest(); + RunTryLockTest(); +} diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc index ad69c01a6161..ad8dd7155ae6 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc @@ -960,7 +960,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { if (res == EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); if (res == 0 || res == EOWNERDEAD) - MutexLock(thr, pc, (uptr)m); + MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); return res; } @@ -1004,7 +1004,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); int res = REAL(pthread_spin_trylock)(m); if (res == 0) { - MutexLock(thr, pc, (uptr)m); + MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); } return res; } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index b7090f918358..bc02f4029f49 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -728,7 +728,8 @@ void ProcessPendingSignals(ThreadState *thr); void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive, bool linker_init); void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); -void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1); +void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1, + bool try_lock = false); int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); void MutexReadLock(ThreadState *thr, uptr pc, uptr addr); void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc index 89098a885b7d..b82768413b10 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -92,7 +92,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { DestroyAndFree(s); } -void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { +void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) { Context *ctx = CTX(); DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec); CHECK_GT(rec, 0); @@ -128,8 +128,11 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { Printf("ThreadSanitizer: reursive-lock\n"); } // Printf("MutexLock: %zx\n", s->deadlock_detector_id); - bool has_deadlock = - ctx->dd.onLock(&thr->deadlock_detector_tls, s->deadlock_detector_id); + bool has_deadlock = try_lock + ? ctx->dd.onTryLock(&thr->deadlock_detector_tls, + s->deadlock_detector_id) + : ctx->dd.onLock(&thr->deadlock_detector_tls, + s->deadlock_detector_id); if (has_deadlock) { uptr path[10]; uptr len = ctx->dd.findPathToHeldLock(&thr->deadlock_detector_tls, diff --git a/compiler-rt/test/tsan/deadlock_detector_stress_test.cc b/compiler-rt/test/tsan/deadlock_detector_stress_test.cc index 95812df02e43..b4a826e34726 100644 --- a/compiler-rt/test/tsan/deadlock_detector_stress_test.cc +++ b/compiler-rt/test/tsan/deadlock_detector_stress_test.cc @@ -14,6 +14,7 @@ class PaddedLock { } void lock() { assert(0 == pthread_mutex_lock(&mu_)); } void unlock() { assert(0 == pthread_mutex_unlock(&mu_)); } + bool try_lock() { return 0 == pthread_mutex_trylock(&mu_); } private: pthread_mutex_t mu_; @@ -38,6 +39,11 @@ class LockTest { return &locks_[i]; } + bool T(size_t i) { + assert(i < n_); + return locks_[i].try_lock(); + } + // Simple lock order onversion. void Test1() { fprintf(stderr, "Starting Test1\n"); @@ -114,6 +120,34 @@ class LockTest { &LockTest::Lock1_Loop_2); } + void Test7() { + fprintf(stderr, "Starting Test7\n"); + // CHECK: Starting Test7 + L(0); T(1); U(1); U(0); + T(1); L(0); U(1); U(0); + // CHECK-NOT: WARNING: ThreadSanitizer: + fprintf(stderr, "No cycle: 0=>1\n"); + // CHECK: No cycle: 0=>1 + + T(2); L(3); U(3); U(2); + L(3); T(2); U(3); U(2); + // CHECK-NOT: WARNING: ThreadSanitizer: + fprintf(stderr, "No cycle: 2=>3\n"); + // CHECK: No cycle: 2=>3 + + T(4); L(5); U(4); U(5); + L(5); L(4); U(4); U(5); + // CHECK: WARNING: ThreadSanitizer: lock-order-inversion + fprintf(stderr, "Have cycle: 4=>5\n"); + // CHECK: Have cycle: 4=>5 + + L(7); L(6); U(6); U(7); + T(6); L(7); U(6); U(7); + // CHECK: WARNING: ThreadSanitizer: lock-order-inversion + fprintf(stderr, "Have cycle: 6=>7\n"); + // CHECK: Have cycle: 6=>7 + } + private: void Lock2(size_t l1, size_t l2) { L(l1); L(l2); U(l2); U(l1); } void Lock_0_1() { Lock2(0, 1); } @@ -177,6 +211,7 @@ int main() { { LockTest t(5); t.Test4(); } { LockTest t(5); t.Test5(); } { LockTest t(5); t.Test6(); } + { LockTest t(10); t.Test7(); } fprintf(stderr, "DONE\n"); // CHECK: DONE }