forked from OSchip/llvm-project
[sanitizer] make sure the deadlock detector survives the change of epochs; add a test and a comment
llvm-svn: 201572
This commit is contained in:
parent
b036778bdb
commit
bd86a29a2a
|
@ -12,6 +12,13 @@
|
|||
// When a lock event happens, the detector checks if the locks already held by
|
||||
// the current thread are reachable from the newly acquired lock.
|
||||
//
|
||||
// The detector can handle only a fixed amount of simultaneously live locks
|
||||
// (a lock is alive if it has been locked at least once and has not been
|
||||
// destroyed). When the maximal number of locks is reached the entire graph
|
||||
// is flushed and the new lock epoch is started. The node ids from the old
|
||||
// epochs can not be used with any of the detector methods except for
|
||||
// nodeBelongsToCurrentEpoch().
|
||||
//
|
||||
// FIXME: this is work in progress, nothing really works yet.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -37,6 +44,7 @@ class DeadlockDetectorTLS {
|
|||
|
||||
void addLock(uptr lock_id, uptr current_epoch) {
|
||||
// Printf("addLock: %zx %zx\n", lock_id, current_epoch);
|
||||
CHECK_LE(epoch_, current_epoch);
|
||||
if (current_epoch != epoch_) {
|
||||
bv_.clear();
|
||||
epoch_ = current_epoch;
|
||||
|
@ -46,11 +54,12 @@ class DeadlockDetectorTLS {
|
|||
|
||||
void removeLock(uptr lock_id, uptr current_epoch) {
|
||||
// Printf("remLock: %zx %zx\n", lock_id, current_epoch);
|
||||
CHECK_LE(epoch_, current_epoch);
|
||||
if (current_epoch != epoch_) {
|
||||
bv_.clear();
|
||||
epoch_ = current_epoch;
|
||||
}
|
||||
CHECK(bv_.clearBit(lock_id));
|
||||
bv_.clearBit(lock_id); // May already be cleared due to epoch update.
|
||||
}
|
||||
|
||||
const BV &getLocks() const { return bv_; }
|
||||
|
@ -107,6 +116,10 @@ class DeadlockDetector {
|
|||
// Get data associated with the node created by newNode().
|
||||
uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
|
||||
|
||||
bool nodeBelongsToCurrentEpoch(uptr node) {
|
||||
return node && (node / size() * size()) == current_epoch_;
|
||||
}
|
||||
|
||||
void removeNode(uptr node) {
|
||||
uptr idx = nodeToIndex(node);
|
||||
CHECK(!available_nodes_.getBit(idx));
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace __tsan {
|
|||
static __sanitizer::DeadlockDetector<DDBV> g_deadlock_detector;
|
||||
|
||||
static void EnsureDeadlockDetectorID(ThreadState *thr, SyncVar *s) {
|
||||
if (!s->deadlock_detector_id)
|
||||
if (!g_deadlock_detector.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
|
||||
s->deadlock_detector_id =
|
||||
g_deadlock_detector.newNode(reinterpret_cast<uptr>(s));
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (s == 0)
|
||||
return;
|
||||
if (common_flags()->detect_deadlocks) {
|
||||
if (s->deadlock_detector_id)
|
||||
if (g_deadlock_detector.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
|
||||
g_deadlock_detector.removeNode(s->deadlock_detector_id);
|
||||
s->deadlock_detector_id = 0;
|
||||
}
|
||||
|
@ -121,12 +121,11 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
|
|||
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
|
||||
if (common_flags()->detect_deadlocks) {
|
||||
EnsureDeadlockDetectorID(thr, s);
|
||||
// Printf("MutexLock: %zx\n", s->deadlock_detector_id);
|
||||
bool has_deadlock = g_deadlock_detector.onLock(&thr->deadlock_detector_tls,
|
||||
s->deadlock_detector_id);
|
||||
Printf("MutexLock: %zx;%s\n", s->deadlock_detector_id,
|
||||
has_deadlock
|
||||
? " ThreadSanitizer: lock-order-inversion (potential deadlock)"
|
||||
: "");
|
||||
if (has_deadlock)
|
||||
Printf("ThreadSanitizer: lock-order-inversion (potential deadlock)\n");
|
||||
}
|
||||
s->mtx.Unlock();
|
||||
}
|
||||
|
|
|
@ -60,16 +60,38 @@ class LockTest {
|
|||
fprintf(stderr, "Starting Test3\n");
|
||||
// CHECK: Starting Test3
|
||||
L(0); L(1); U(0); U(1);
|
||||
L(2);
|
||||
CreateAndDestroyManyLocks();
|
||||
U(2);
|
||||
L(1); L(0); U(0); U(1);
|
||||
// CHECK: ThreadSanitizer: lock-order-inversion (potential deadlock)
|
||||
// CHECK-NOT: ThreadSanitizer:
|
||||
}
|
||||
|
||||
// lock l0=>l1; then create and use lots of locks; then lock l1=>l0.
|
||||
// The deadlock epoch should have changed and we should not report anything.
|
||||
void Test4() {
|
||||
fprintf(stderr, "Starting Test4\n");
|
||||
// CHECK: Starting Test4
|
||||
L(0); L(1); U(0); U(1);
|
||||
L(2);
|
||||
CreateLockUnlockAndDestroyManyLocks();
|
||||
U(2);
|
||||
L(1); L(0); U(0); U(1);
|
||||
// CHECK-NOT: ThreadSanitizer:
|
||||
}
|
||||
|
||||
private:
|
||||
void CreateAndDestroyManyLocks() {
|
||||
PaddedLock create_many_locks_but_never_acquire[kDeadlockGraphSize];
|
||||
}
|
||||
void CreateLockUnlockAndDestroyManyLocks() {
|
||||
PaddedLock many_locks[kDeadlockGraphSize];
|
||||
for (size_t i = 0; i < kDeadlockGraphSize; i++) {
|
||||
many_locks[i].lock();
|
||||
many_locks[i].unlock();
|
||||
}
|
||||
}
|
||||
static const size_t kDeadlockGraphSize = 4096;
|
||||
size_t n_;
|
||||
PaddedLock *locks_;
|
||||
|
@ -79,6 +101,7 @@ int main() {
|
|||
{ LockTest t(5); t.Test1(); }
|
||||
{ LockTest t(5); t.Test2(); }
|
||||
{ LockTest t(5); t.Test3(); }
|
||||
{ LockTest t(5); t.Test4(); }
|
||||
fprintf(stderr, "DONE\n");
|
||||
// CHECK: DONE
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue