forked from OSchip/llvm-project
tsan: speed up race deduplication
Race deduplication code proved to be a performance bottleneck in the past if suppressions/annotations are used, or just some races left unaddressed. And we still get user complaints about this: https://groups.google.com/forum/#!topic/thread-sanitizer/hB0WyiTI4e4 ReportRace already has several layers of caching for racy pcs/addresses to make deduplication faster. However, ReportRace still takes a global mutex (ThreadRegistry and ReportMutex) during deduplication and also calls mmap/munmap (which take process-wide semaphore in kernel), this makes deduplication non-scalable. This patch moves race deduplication outside of global mutexes and also removes all mmap/munmap calls. As the result, race_stress.cc with 100 threads and 10000 iterations become 30x faster: before: real 0m21.673s user 0m5.932s sys 0m34.885s after: real 0m0.720s user 0m23.646s sys 0m1.254s http://reviews.llvm.org/D12554 llvm-svn: 246758
This commit is contained in:
parent
b500101e1c
commit
3464dac0ca
|
@ -375,8 +375,8 @@ static void PrintMatchedSuppressions() {
|
|||
Printf("Suppressions used:\n");
|
||||
Printf(" count bytes template\n");
|
||||
for (uptr i = 0; i < matched.size(); i++)
|
||||
Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
|
||||
matched[i]->weight, matched[i]->templ);
|
||||
Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
|
||||
&matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
|
||||
Printf("%s\n\n", line);
|
||||
}
|
||||
|
||||
|
@ -598,7 +598,8 @@ void LeakReport::ApplySuppressions() {
|
|||
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
|
||||
if (s) {
|
||||
s->weight += leaks_[i].total_size;
|
||||
s->hit_count += leaks_[i].hit_count;
|
||||
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
|
||||
leaks_[i].hit_count);
|
||||
leaks_[i].is_suppressed = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,4 +63,20 @@ struct atomic_uintptr_t {
|
|||
# error "Unsupported compiler"
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Clutter-reducing helpers.
|
||||
|
||||
template<typename T>
|
||||
INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
|
||||
return atomic_load(a, memory_order_relaxed);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
|
||||
atomic_store(a, v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ATOMIC_H
|
||||
|
|
|
@ -127,13 +127,11 @@ void SuppressionContext::Parse(const char *str) {
|
|||
Printf("%s: failed to parse suppressions\n", SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
Suppression s;
|
||||
Suppression s = {};
|
||||
s.type = suppression_types_[type];
|
||||
s.templ = (char*)InternalAlloc(end2 - line + 1);
|
||||
internal_memcpy(s.templ, line, end2 - line);
|
||||
s.templ[end2 - line] = 0;
|
||||
s.hit_count = 0;
|
||||
s.weight = 0;
|
||||
suppressions_.push_back(s);
|
||||
has_suppression_type_[type] = true;
|
||||
}
|
||||
|
@ -163,7 +161,7 @@ const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
|
|||
void SuppressionContext::GetMatched(
|
||||
InternalMmapVector<Suppression *> *matched) {
|
||||
for (uptr i = 0; i < suppressions_.size(); i++)
|
||||
if (suppressions_[i].hit_count)
|
||||
if (atomic_load_relaxed(&suppressions_[i].hit_count))
|
||||
matched->push_back(&suppressions_[i]);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#define SANITIZER_SUPPRESSIONS_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
@ -21,7 +22,7 @@ namespace __sanitizer {
|
|||
struct Suppression {
|
||||
const char *type;
|
||||
char *templ;
|
||||
unsigned hit_count;
|
||||
atomic_uint32_t hit_count;
|
||||
uptr weight;
|
||||
};
|
||||
|
||||
|
|
|
@ -1840,7 +1840,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
|
|||
ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
|
||||
ThreadRegistryLock l(ctx->thread_registry);
|
||||
ScopedReport rep(ReportTypeErrnoInSignal);
|
||||
if (!IsFiredSuppression(ctx, rep, stack)) {
|
||||
if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
|
||||
rep.AddStack(stack, true);
|
||||
OutputReport(thr, rep);
|
||||
}
|
||||
|
|
|
@ -63,8 +63,8 @@ static const int kMaxDescLen = 128;
|
|||
struct ExpectRace {
|
||||
ExpectRace *next;
|
||||
ExpectRace *prev;
|
||||
int hitcount;
|
||||
int addcount;
|
||||
atomic_uintptr_t hitcount;
|
||||
atomic_uintptr_t addcount;
|
||||
uptr addr;
|
||||
uptr size;
|
||||
char *file;
|
||||
|
@ -90,7 +90,8 @@ static void AddExpectRace(ExpectRace *list,
|
|||
ExpectRace *race = list->next;
|
||||
for (; race != list; race = race->next) {
|
||||
if (race->addr == addr && race->size == size) {
|
||||
race->addcount++;
|
||||
atomic_store_relaxed(&race->addcount,
|
||||
atomic_load_relaxed(&race->addcount) + 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -100,8 +101,8 @@ static void AddExpectRace(ExpectRace *list,
|
|||
race->file = f;
|
||||
race->line = l;
|
||||
race->desc[0] = 0;
|
||||
race->hitcount = 0;
|
||||
race->addcount = 1;
|
||||
atomic_store_relaxed(&race->hitcount, 0);
|
||||
atomic_store_relaxed(&race->addcount, 1);
|
||||
if (desc) {
|
||||
int i = 0;
|
||||
for (; i < kMaxDescLen - 1 && desc[i]; i++)
|
||||
|
@ -130,7 +131,7 @@ static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
|
|||
return false;
|
||||
DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
|
||||
race->desc, race->addr, (int)race->size, race->file, race->line);
|
||||
race->hitcount++;
|
||||
atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -146,7 +147,7 @@ void InitializeDynamicAnnotations() {
|
|||
}
|
||||
|
||||
bool IsExpectedReport(uptr addr, uptr size) {
|
||||
Lock lock(&dyn_ann_ctx->mtx);
|
||||
ReadLock lock(&dyn_ann_ctx->mtx);
|
||||
if (CheckContains(&dyn_ann_ctx->expect, addr, size))
|
||||
return true;
|
||||
if (CheckContains(&dyn_ann_ctx->benign, addr, size))
|
||||
|
@ -155,20 +156,21 @@ bool IsExpectedReport(uptr addr, uptr size) {
|
|||
}
|
||||
|
||||
static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
|
||||
int *unique_count, int *hit_count, int ExpectRace::*counter) {
|
||||
int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
|
||||
ExpectRace *list = &dyn_ann_ctx->benign;
|
||||
for (ExpectRace *race = list->next; race != list; race = race->next) {
|
||||
(*unique_count)++;
|
||||
if (race->*counter == 0)
|
||||
const uptr cnt = atomic_load_relaxed(&(race->*counter));
|
||||
if (cnt == 0)
|
||||
continue;
|
||||
(*hit_count) += race->*counter;
|
||||
*hit_count += cnt;
|
||||
uptr i = 0;
|
||||
for (; i < matched->Size(); i++) {
|
||||
ExpectRace *race0 = &(*matched)[i];
|
||||
if (race->line == race0->line
|
||||
&& internal_strcmp(race->file, race0->file) == 0
|
||||
&& internal_strcmp(race->desc, race0->desc) == 0) {
|
||||
race0->*counter += race->*counter;
|
||||
atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -193,8 +195,8 @@ void PrintMatchedBenignRaces() {
|
|||
hit_count, (int)internal_getpid());
|
||||
for (uptr i = 0; i < hit_matched.Size(); i++) {
|
||||
Printf("%d %s:%d %s\n",
|
||||
hit_matched[i].hitcount, hit_matched[i].file,
|
||||
hit_matched[i].line, hit_matched[i].desc);
|
||||
atomic_load_relaxed(&hit_matched[i].hitcount),
|
||||
hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
|
||||
}
|
||||
}
|
||||
if (hit_matched.Size()) {
|
||||
|
@ -203,8 +205,8 @@ void PrintMatchedBenignRaces() {
|
|||
add_count, unique_count, (int)internal_getpid());
|
||||
for (uptr i = 0; i < add_matched.Size(); i++) {
|
||||
Printf("%d %s:%d %s\n",
|
||||
add_matched[i].addcount, add_matched[i].file,
|
||||
add_matched[i].line, add_matched[i].desc);
|
||||
atomic_load_relaxed(&add_matched[i].addcount),
|
||||
add_matched[i].file, add_matched[i].line, add_matched[i].desc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -303,7 +305,7 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
|
|||
Lock lock(&dyn_ann_ctx->mtx);
|
||||
while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
|
||||
ExpectRace *race = dyn_ann_ctx->expect.next;
|
||||
if (race->hitcount == 0) {
|
||||
if (atomic_load_relaxed(&race->hitcount) == 0) {
|
||||
ctx->nmissed_expected++;
|
||||
ReportMissedExpectedRace(race);
|
||||
}
|
||||
|
|
|
@ -80,17 +80,17 @@ void AllocatorPrintStats() {
|
|||
}
|
||||
|
||||
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
|
||||
if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
|
||||
if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
|
||||
!flags()->report_signal_unsafe)
|
||||
return;
|
||||
VarSizeStackTrace stack;
|
||||
ObtainCurrentStack(thr, pc, &stack);
|
||||
if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
|
||||
return;
|
||||
ThreadRegistryLock l(ctx->thread_registry);
|
||||
ScopedReport rep(ReportTypeSignalUnsafe);
|
||||
if (!IsFiredSuppression(ctx, rep, stack)) {
|
||||
rep.AddStack(stack, true);
|
||||
OutputReport(thr, rep);
|
||||
}
|
||||
rep.AddStack(stack, true);
|
||||
OutputReport(thr, rep);
|
||||
}
|
||||
|
||||
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||
|
|
|
@ -41,6 +41,8 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
|
|||
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
|
||||
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
|
||||
/*11 MutexTypeDDetector*/ {},
|
||||
/*12 MutexTypeFired*/ {MutexTypeLeaf},
|
||||
/*13 MutexTypeRacy*/ {MutexTypeLeaf},
|
||||
};
|
||||
|
||||
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
|
||||
|
|
|
@ -32,6 +32,8 @@ enum MutexType {
|
|||
MutexTypeMBlock,
|
||||
MutexTypeJavaMBlock,
|
||||
MutexTypeDDetector,
|
||||
MutexTypeFired,
|
||||
MutexTypeRacy,
|
||||
|
||||
// This must be the last.
|
||||
MutexTypeCount
|
||||
|
|
|
@ -99,8 +99,10 @@ Context::Context()
|
|||
, nmissed_expected()
|
||||
, thread_registry(new(thread_registry_placeholder) ThreadRegistry(
|
||||
CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
|
||||
, racy_mtx(MutexTypeRacy, StatMtxRacy)
|
||||
, racy_stacks(MBlockRacyStacks)
|
||||
, racy_addresses(MBlockRacyAddresses)
|
||||
, fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
|
||||
, fired_suppressions(8) {
|
||||
}
|
||||
|
||||
|
|
|
@ -458,7 +458,7 @@ struct RacyAddress {
|
|||
|
||||
struct FiredSuppression {
|
||||
ReportType type;
|
||||
uptr pc;
|
||||
uptr pc_or_addr;
|
||||
Suppression *supp;
|
||||
};
|
||||
|
||||
|
@ -480,9 +480,11 @@ struct Context {
|
|||
|
||||
ThreadRegistry *thread_registry;
|
||||
|
||||
Mutex racy_mtx;
|
||||
Vector<RacyStacks> racy_stacks;
|
||||
Vector<RacyAddress> racy_addresses;
|
||||
// Number of fired suppressions may be large enough.
|
||||
Mutex fired_suppressions_mtx;
|
||||
InternalMmapVector<FiredSuppression> fired_suppressions;
|
||||
DDetector *dd;
|
||||
|
||||
|
@ -587,8 +589,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc);
|
|||
|
||||
void ReportRace(ThreadState *thr);
|
||||
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
|
||||
bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
|
||||
StackTrace trace);
|
||||
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
|
||||
bool IsExpectedReport(uptr addr, uptr size);
|
||||
void PrintMatchedBenignRaces();
|
||||
|
||||
|
|
|
@ -369,27 +369,20 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
|
|||
// This function restores stack trace and mutex set for the thread/epoch.
|
||||
// It does so by getting stack trace and mutex set at the beginning of
|
||||
// trace part, and then replaying the trace till the given epoch.
|
||||
ctx->thread_registry->CheckLocked();
|
||||
ThreadContext *tctx = static_cast<ThreadContext*>(
|
||||
ctx->thread_registry->GetThreadLocked(tid));
|
||||
if (tctx == 0)
|
||||
return;
|
||||
if (tctx->status != ThreadStatusRunning
|
||||
&& tctx->status != ThreadStatusFinished
|
||||
&& tctx->status != ThreadStatusDead)
|
||||
return;
|
||||
Trace* trace = ThreadTrace(tctx->tid);
|
||||
Lock l(&trace->mtx);
|
||||
Trace* trace = ThreadTrace(tid);
|
||||
ReadLock l(&trace->mtx);
|
||||
const int partidx = (epoch / kTracePartSize) % TraceParts();
|
||||
TraceHeader* hdr = &trace->headers[partidx];
|
||||
if (epoch < hdr->epoch0)
|
||||
if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
|
||||
return;
|
||||
CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
|
||||
const u64 epoch0 = RoundDown(epoch, TraceSize());
|
||||
const u64 eend = epoch % TraceSize();
|
||||
const u64 ebegin = RoundDown(eend, kTracePartSize);
|
||||
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
|
||||
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
|
||||
InternalScopedBuffer<uptr> stack(kShadowStackSize);
|
||||
Vector<uptr> stack(MBlockReportStack);
|
||||
stack.Resize(hdr->stack0.size + 64);
|
||||
for (uptr i = 0; i < hdr->stack0.size; i++) {
|
||||
stack[i] = hdr->stack0.trace[i];
|
||||
DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
|
||||
|
@ -406,6 +399,8 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
|
|||
if (typ == EventTypeMop) {
|
||||
stack[pos] = pc;
|
||||
} else if (typ == EventTypeFuncEnter) {
|
||||
if (stack.Size() < pos + 2)
|
||||
stack.Resize(pos + 2);
|
||||
stack[pos++] = pc;
|
||||
} else if (typ == EventTypeFuncExit) {
|
||||
if (pos > 0)
|
||||
|
@ -428,50 +423,58 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
|
|||
if (pos == 0 && stack[0] == 0)
|
||||
return;
|
||||
pos++;
|
||||
stk->Init(stack.data(), pos);
|
||||
stk->Init(&stack[0], pos);
|
||||
}
|
||||
|
||||
static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
|
||||
uptr addr_min, uptr addr_max) {
|
||||
bool equal_stack = false;
|
||||
RacyStacks hash;
|
||||
if (flags()->suppress_equal_stacks) {
|
||||
hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
|
||||
hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
|
||||
for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
|
||||
if (hash == ctx->racy_stacks[i]) {
|
||||
DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
|
||||
equal_stack = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
bool equal_address = false;
|
||||
RacyAddress ra0 = {addr_min, addr_max};
|
||||
if (flags()->suppress_equal_addresses) {
|
||||
for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
|
||||
RacyAddress ra2 = ctx->racy_addresses[i];
|
||||
uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
|
||||
uptr minend = min(ra0.addr_max, ra2.addr_max);
|
||||
if (maxbeg < minend) {
|
||||
DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
|
||||
equal_address = true;
|
||||
break;
|
||||
{
|
||||
ReadLock lock(&ctx->racy_mtx);
|
||||
if (flags()->suppress_equal_stacks) {
|
||||
hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
|
||||
hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
|
||||
for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
|
||||
if (hash == ctx->racy_stacks[i]) {
|
||||
VPrintf(2,
|
||||
"ThreadSanitizer: suppressing report as doubled (stack)\n");
|
||||
equal_stack = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (flags()->suppress_equal_addresses) {
|
||||
for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
|
||||
RacyAddress ra2 = ctx->racy_addresses[i];
|
||||
uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
|
||||
uptr minend = min(ra0.addr_max, ra2.addr_max);
|
||||
if (maxbeg < minend) {
|
||||
VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
|
||||
equal_address = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (equal_stack || equal_address) {
|
||||
if (!equal_stack)
|
||||
ctx->racy_stacks.PushBack(hash);
|
||||
if (!equal_address)
|
||||
ctx->racy_addresses.PushBack(ra0);
|
||||
return true;
|
||||
if (!equal_stack && !equal_address)
|
||||
return false;
|
||||
if (!equal_stack) {
|
||||
Lock lock(&ctx->racy_mtx);
|
||||
ctx->racy_stacks.PushBack(hash);
|
||||
}
|
||||
return false;
|
||||
if (!equal_address) {
|
||||
Lock lock(&ctx->racy_mtx);
|
||||
ctx->racy_addresses.PushBack(ra0);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
|
||||
uptr addr_min, uptr addr_max) {
|
||||
Lock lock(&ctx->racy_mtx);
|
||||
if (flags()->suppress_equal_stacks) {
|
||||
RacyStacks hash;
|
||||
hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
|
||||
|
@ -487,26 +490,27 @@ static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
|
|||
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
|
||||
if (!flags()->report_bugs)
|
||||
return false;
|
||||
atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
|
||||
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
|
||||
const ReportDesc *rep = srep.GetReport();
|
||||
Suppression *supp = 0;
|
||||
uptr suppress_pc = 0;
|
||||
for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
|
||||
suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
|
||||
for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
|
||||
suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
|
||||
for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
|
||||
suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
|
||||
for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
|
||||
suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
|
||||
if (suppress_pc != 0) {
|
||||
FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
|
||||
uptr pc_or_addr = 0;
|
||||
for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
|
||||
pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
|
||||
for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
|
||||
pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
|
||||
for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
|
||||
pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
|
||||
for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
|
||||
pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
|
||||
if (pc_or_addr != 0) {
|
||||
Lock lock(&ctx->fired_suppressions_mtx);
|
||||
FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
|
||||
ctx->fired_suppressions.push_back(s);
|
||||
}
|
||||
{
|
||||
bool old_is_freeing = thr->is_freeing;
|
||||
thr->is_freeing = false;
|
||||
bool suppressed = OnReport(rep, suppress_pc != 0);
|
||||
bool suppressed = OnReport(rep, pc_or_addr != 0);
|
||||
thr->is_freeing = old_is_freeing;
|
||||
if (suppressed)
|
||||
return false;
|
||||
|
@ -518,16 +522,16 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
|
||||
StackTrace trace) {
|
||||
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
|
||||
ReadLock lock(&ctx->fired_suppressions_mtx);
|
||||
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
|
||||
if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
|
||||
if (ctx->fired_suppressions[k].type != type)
|
||||
continue;
|
||||
for (uptr j = 0; j < trace.size; j++) {
|
||||
FiredSuppression *s = &ctx->fired_suppressions[k];
|
||||
if (trace.trace[j] == s->pc) {
|
||||
if (trace.trace[j] == s->pc_or_addr) {
|
||||
if (s->supp)
|
||||
s->supp->hit_count++;
|
||||
atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -535,16 +539,15 @@ bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool IsFiredSuppression(Context *ctx,
|
||||
const ScopedReport &srep,
|
||||
uptr addr) {
|
||||
static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
|
||||
ReadLock lock(&ctx->fired_suppressions_mtx);
|
||||
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
|
||||
if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
|
||||
if (ctx->fired_suppressions[k].type != type)
|
||||
continue;
|
||||
FiredSuppression *s = &ctx->fired_suppressions[k];
|
||||
if (addr == s->pc) {
|
||||
if (addr == s->pc_or_addr) {
|
||||
if (s->supp)
|
||||
s->supp->hit_count++;
|
||||
atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -597,8 +600,6 @@ void ReportRace(ThreadState *thr) {
|
|||
return;
|
||||
}
|
||||
|
||||
ThreadRegistryLock l0(ctx->thread_registry);
|
||||
|
||||
ReportType typ = ReportTypeRace;
|
||||
if (thr->is_vptr_access && freed)
|
||||
typ = ReportTypeVptrUseAfterFree;
|
||||
|
@ -606,29 +607,35 @@ void ReportRace(ThreadState *thr) {
|
|||
typ = ReportTypeVptrRace;
|
||||
else if (freed)
|
||||
typ = ReportTypeUseAfterFree;
|
||||
ScopedReport rep(typ);
|
||||
if (IsFiredSuppression(ctx, rep, addr))
|
||||
|
||||
if (IsFiredSuppression(ctx, typ, addr))
|
||||
return;
|
||||
|
||||
const uptr kMop = 2;
|
||||
VarSizeStackTrace traces[kMop];
|
||||
const uptr toppc = TraceTopPC(thr);
|
||||
ObtainCurrentStack(thr, toppc, &traces[0]);
|
||||
if (IsFiredSuppression(ctx, rep, traces[0]))
|
||||
if (IsFiredSuppression(ctx, typ, traces[0]))
|
||||
return;
|
||||
InternalScopedBuffer<MutexSet> mset2(1);
|
||||
new(mset2.data()) MutexSet();
|
||||
|
||||
// MutexSet is too large to live on stack.
|
||||
Vector<u64> mset_buffer(MBlockScopedBuf);
|
||||
mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
|
||||
MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
|
||||
|
||||
Shadow s2(thr->racy_state[1]);
|
||||
RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
|
||||
if (IsFiredSuppression(ctx, rep, traces[1]))
|
||||
RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2);
|
||||
if (IsFiredSuppression(ctx, typ, traces[1]))
|
||||
return;
|
||||
|
||||
if (HandleRacyStacks(thr, traces, addr_min, addr_max))
|
||||
return;
|
||||
|
||||
ThreadRegistryLock l0(ctx->thread_registry);
|
||||
ScopedReport rep(typ);
|
||||
for (uptr i = 0; i < kMop; i++) {
|
||||
Shadow s(thr->racy_state[i]);
|
||||
rep.AddMemoryAccess(addr, s, traces[i],
|
||||
i == 0 ? &thr->mset : mset2.data());
|
||||
rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2);
|
||||
}
|
||||
|
||||
for (uptr i = 0; i < kMop; i++) {
|
||||
|
|
|
@ -164,8 +164,9 @@ void StatOutput(u64 *stat) {
|
|||
name[StatMtxAtExit] = " Atexit ";
|
||||
name[StatMtxAnnotations] = " Annotations ";
|
||||
name[StatMtxMBlock] = " MBlock ";
|
||||
name[StatMtxJavaMBlock] = " JavaMBlock ";
|
||||
name[StatMtxDeadlockDetector] = " DeadlockDetector ";
|
||||
name[StatMtxFired] = " FiredSuppressions ";
|
||||
name[StatMtxRacy] = " RacyStacks ";
|
||||
name[StatMtxFD] = " FD ";
|
||||
|
||||
Printf("Statistics:\n");
|
||||
|
|
|
@ -169,8 +169,9 @@ enum StatType {
|
|||
StatMtxAnnotations,
|
||||
StatMtxAtExit,
|
||||
StatMtxMBlock,
|
||||
StatMtxJavaMBlock,
|
||||
StatMtxDeadlockDetector,
|
||||
StatMtxFired,
|
||||
StatMtxRacy,
|
||||
StatMtxFD,
|
||||
|
||||
// This must be the last.
|
||||
|
|
|
@ -100,8 +100,8 @@ static uptr IsSuppressed(const char *stype, const AddressInfo &info,
|
|||
if (suppression_ctx->Match(info.function, stype, sp) ||
|
||||
suppression_ctx->Match(info.file, stype, sp) ||
|
||||
suppression_ctx->Match(info.module, stype, sp)) {
|
||||
DPrintf("ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ);
|
||||
(*sp)->hit_count++;
|
||||
VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ);
|
||||
atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed);
|
||||
return info.address;
|
||||
}
|
||||
return 0;
|
||||
|
@ -138,8 +138,8 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
|
|||
const DataInfo &global = loc->global;
|
||||
if (suppression_ctx->Match(global.name, stype, &s) ||
|
||||
suppression_ctx->Match(global.module, stype, &s)) {
|
||||
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
|
||||
s->hit_count++;
|
||||
VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ);
|
||||
atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed);
|
||||
*sp = s;
|
||||
return global.start;
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ void PrintMatchedSuppressions() {
|
|||
return;
|
||||
int hit_count = 0;
|
||||
for (uptr i = 0; i < matched.size(); i++)
|
||||
hit_count += matched[i]->hit_count;
|
||||
hit_count += atomic_load_relaxed(&matched[i]->hit_count);
|
||||
Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
|
||||
(int)internal_getpid());
|
||||
for (uptr i = 0; i < matched.size(); i++) {
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
// RUN: %clangxx_tsan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s
|
||||
#include "test.h"
|
||||
|
||||
const int kThreads = 16;
|
||||
const int kIters = 1000;
|
||||
|
||||
volatile int X = 0;
|
||||
|
||||
void *thr(void *arg) {
|
||||
for (int i = 0; i < kIters; i++)
|
||||
X++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
pthread_t th[kThreads];
|
||||
for (int i = 0; i < kThreads; i++)
|
||||
pthread_create(&th[i], 0, thr, 0);
|
||||
for (int i = 0; i < kThreads; i++)
|
||||
pthread_join(th[i], 0);
|
||||
fprintf(stderr, "DONE\n");
|
||||
}
|
||||
|
||||
// CHECK: ThreadSanitizer: data race
|
||||
// CHECK: DONE
|
Loading…
Reference in New Issue