tsan: add flag to not report races between atomic and plain memory accesses

llvm-svn: 174165
This commit is contained in:
Dmitry Vyukov 2013-02-01 10:06:56 +00:00
parent 71242b064e
commit 52f0e4e1a0
3 changed files with 13 additions and 4 deletions

View File

@ -45,6 +45,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_thread_leaks = true;
f->report_destroy_locked = true;
f->report_signal_unsafe = true;
f->report_atomic_races = true;
f->force_seq_cst_atomics = false;
f->strip_path_prefix = "";
f->suppressions = "";
@ -72,6 +73,7 @@ void InitializeFlags(Flags *f, const char *env) {
ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(env, &f->suppressions, "suppressions");

View File

@ -43,6 +43,8 @@ struct Flags {
// Report violations of async signal-safety
// (e.g. malloc() call from a signal handler).
bool report_signal_unsafe;
// Report races between atomic and plain memory accesses.
bool report_atomic_races;
// If set, all atomics are effectively sequentially consistent (seq_cst),
// regardless of what user actually specified.
bool force_seq_cst_atomics;

View File

@ -244,7 +244,8 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
// This fast-path is critical for performance.
// Assume the access is atomic.
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
if (flags()->report_atomic_races)
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return *a;
}
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
@ -253,6 +254,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
T v = *a;
s->mtx.ReadUnlock();
__sync_synchronize();
if (flags()->report_atomic_races)
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
@ -261,7 +263,8 @@ template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
if (flags()->report_atomic_races)
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
@ -283,7 +286,8 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
if (flags()->report_atomic_races)
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))
@ -343,7 +347,8 @@ template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
if (flags()->report_atomic_races)
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))