tsan: add new trace

Add structures for the new trace format,
functions that serialize and add events to the trace
and trace replaying logic.

Differential Revision: https://reviews.llvm.org/D107911
This commit is contained in:
Dmitry Vyukov 2021-08-05 17:18:17 +02:00
parent f7347dfa03
commit c97318996f
8 changed files with 868 additions and 1 deletions

View File

@ -51,13 +51,18 @@ typedef __m128i m128;
namespace __tsan {
constexpr uptr kByteBits = 8;
// Thread slot ID.
enum class Sid : u8 {};
constexpr uptr kThreadSlotCount = 256;
constexpr Sid kFreeSid = static_cast<Sid>(255);
// Abstract time unit, vector clock element.
enum class Epoch : u16 {};
constexpr uptr kEpochBits = 14;
constexpr Epoch kEpochZero = static_cast<Epoch>(0);
constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
const int kClkBits = 42;
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;

View File

@ -555,6 +555,188 @@ StackID CurrentStackId(ThreadState *thr, uptr pc) {
return id;
}
namespace v3 {
ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
uptr addr, uptr size,
AccessType typ) {
DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
if (!kCollectHistory)
return true;
EventAccess *ev;
if (UNLIKELY(!TraceAcquire(thr, &ev)))
return false;
u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
thr->trace_prev_pc = pc;
if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))) {
ev->is_access = 1;
ev->is_read = !!(typ & kAccessRead);
ev->is_atomic = !!(typ & kAccessAtomic);
ev->size_log = size_log;
ev->pc_delta = pc_delta;
DCHECK_EQ(ev->pc_delta, pc_delta);
ev->addr = CompressAddr(addr);
TraceRelease(thr, ev);
return true;
}
auto *evex = reinterpret_cast<EventAccessExt *>(ev);
evex->is_access = 0;
evex->is_func = 0;
evex->type = EventType::kAccessExt;
evex->is_read = !!(typ & kAccessRead);
evex->is_atomic = !!(typ & kAccessAtomic);
evex->size_log = size_log;
evex->addr = CompressAddr(addr);
evex->pc = pc;
TraceRelease(thr, evex);
return true;
}
ALWAYS_INLINE USED bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
uptr addr, uptr size,
AccessType typ) {
if (!kCollectHistory)
return true;
EventAccessRange *ev;
if (UNLIKELY(!TraceAcquire(thr, &ev)))
return false;
thr->trace_prev_pc = pc;
ev->is_access = 0;
ev->is_func = 0;
ev->type = EventType::kAccessRange;
ev->is_read = !!(typ & kAccessRead);
ev->is_free = !!(typ & kAccessFree);
ev->size_lo = size;
ev->pc = CompressAddr(pc);
ev->addr = CompressAddr(addr);
ev->size_hi = size >> EventAccessRange::kSizeLoBits;
TraceRelease(thr, ev);
return true;
}
void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ) {
if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
return;
TraceSwitchPart(thr);
UNUSED bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
DCHECK(res);
}
void TraceFunc(ThreadState *thr, uptr pc) {
if (LIKELY(TryTraceFunc(thr, pc)))
return;
TraceSwitchPart(thr);
UNUSED bool res = TryTraceFunc(thr, pc);
DCHECK(res);
}
void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
StackID stk) {
DCHECK(type == EventType::kLock || type == EventType::kRLock);
if (!kCollectHistory)
return;
EventLock ev;
ev.is_access = 0;
ev.is_func = 0;
ev.type = type;
ev.pc = CompressAddr(pc);
ev.stack_lo = stk;
ev.stack_hi = stk >> EventLock::kStackIDLoBits;
ev._ = 0;
ev.addr = CompressAddr(addr);
TraceEvent(thr, ev);
}
void TraceMutexUnlock(ThreadState *thr, uptr addr) {
if (!kCollectHistory)
return;
EventUnlock ev;
ev.is_access = 0;
ev.is_func = 0;
ev.type = EventType::kUnlock;
ev._ = 0;
ev.addr = CompressAddr(addr);
TraceEvent(thr, ev);
}
void TraceTime(ThreadState *thr) {
if (!kCollectHistory)
return;
EventTime ev;
ev.is_access = 0;
ev.is_func = 0;
ev.type = EventType::kTime;
ev.sid = static_cast<u64>(thr->sid);
ev.epoch = static_cast<u64>(thr->epoch);
ev._ = 0;
TraceEvent(thr, ev);
}
NOINLINE
void TraceSwitchPart(ThreadState *thr) {
Trace *trace = &thr->tctx->trace;
Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
auto *part = trace->parts.Back();
DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
if (part) {
// We can get here when we still have space in the current trace part.
// The fast-path check in TraceAcquire has false positives in the middle of
// the part. Check if we are indeed at the end of the current part or not,
// and fill any gaps with NopEvent's.
Event *end = &part->events[TracePart::kSize];
DCHECK_GE(pos, &part->events[0]);
DCHECK_LE(pos, end);
if (pos + 1 < end) {
if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
TracePart::kAlignment)
*pos++ = NopEvent;
*pos++ = NopEvent;
DCHECK_LE(pos + 2, end);
atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
// Ensure we setup trace so that the next TraceAcquire
// won't detect trace part end.
Event *ev;
CHECK(TraceAcquire(thr, &ev));
return;
}
// We are indeed at the end.
for (; pos < end; pos++) *pos = NopEvent;
}
#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
// We just need to survive till exec.
CHECK(part);
atomic_store_relaxed(&thr->trace_pos,
reinterpret_cast<uptr>(&part->events[0]));
return;
}
#endif
part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
part->trace = trace;
thr->trace_prev_pc = 0;
{
Lock lock(&trace->mtx);
trace->parts.PushBack(part);
atomic_store_relaxed(&thr->trace_pos,
reinterpret_cast<uptr>(&part->events[0]));
}
// Make this part self-sufficient by restoring the current stack
// and mutex set in the beginning of the trace.
TraceTime(thr);
for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
CHECK(TryTraceFunc(thr, *pos));
for (uptr i = 0; i < thr->mset.Size(); i++) {
MutexSet::Desc d = thr->mset.Get(i);
TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
d.addr, d.stack_id);
}
}
} // namespace v3
void TraceSwitch(ThreadState *thr) {
#if !SANITIZER_GO
if (ctx->after_multithreaded_fork)

View File

@ -444,6 +444,13 @@ struct ThreadState {
const ReportDesc *current_report;
// Current position in tctx->trace.Back()->events (Event*).
atomic_uintptr_t trace_pos;
// PC of the last memory access, used to compute PC deltas in the trace.
uptr trace_prev_pc;
Sid sid;
Epoch epoch;
explicit ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
unsigned reuse_count, uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
@ -486,6 +493,8 @@ class ThreadContext final : public ThreadContextBase {
u64 epoch0;
u64 epoch1;
v3::Trace trace;
// Override superclass callbacks.
void OnDead() override;
void OnJoined(void *arg) override;
@ -549,6 +558,8 @@ struct Context {
ClockAlloc clock_alloc;
Flags flags;
Mutex slot_mtx;
};
extern Context *ctx; // The one and the only global runtime context.
@ -892,6 +903,88 @@ void LazyInitialize(ThreadState *thr) {
#endif
}
namespace v3 {
void TraceSwitchPart(ThreadState *thr);
bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
uptr size, AccessType typ, VarSizeStackTrace *pstk,
MutexSet *pmset, uptr *ptag);
template <typename EventT>
ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
EventT **ev) {
Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
#if SANITIZER_DEBUG
// TraceSwitch acquires these mutexes,
// so we lock them here to detect deadlocks more reliably.
{ Lock lock(&ctx->slot_mtx); }
{ Lock lock(&thr->tctx->trace.mtx); }
TracePart *current = thr->tctx->trace.parts.Back();
if (current) {
DCHECK_GE(pos, &current->events[0]);
DCHECK_LE(pos, &current->events[TracePart::kSize]);
} else {
DCHECK_EQ(pos, nullptr);
}
#endif
// TracePart is allocated with mmap and is at least 4K aligned.
// So the following check is a faster way to check for part end.
// It may have false positives in the middle of the trace,
// they are filtered out in TraceSwitch.
if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
return false;
*ev = reinterpret_cast<EventT *>(pos);
return true;
}
template <typename EventT>
ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
}
template <typename EventT>
void TraceEvent(ThreadState *thr, EventT ev) {
EventT *evp;
if (!TraceAcquire(thr, &evp)) {
TraceSwitchPart(thr);
UNUSED bool res = TraceAcquire(thr, &evp);
DCHECK(res);
}
*evp = ev;
TraceRelease(thr, evp);
}
ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
uptr pc = 0) {
if (!kCollectHistory)
return true;
EventFunc *ev;
if (UNLIKELY(!TraceAcquire(thr, &ev)))
return false;
ev->is_access = 0;
ev->is_func = 1;
ev->pc = pc;
TraceRelease(thr, ev);
return true;
}
WARN_UNUSED_RESULT
bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
WARN_UNUSED_RESULT
bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
void TraceFunc(ThreadState *thr, uptr pc = 0);
void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
StackID stk);
void TraceMutexUnlock(ThreadState *thr, uptr addr);
void TraceTime(ThreadState *thr);
} // namespace v3
} // namespace __tsan
#endif // TSAN_RTL_H

View File

@ -450,6 +450,225 @@ void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
ExtractTagFromStack(stk, tag);
}
namespace v3 {
// Replays the trace up to last_pos position in the last part
// or up to the provided epoch/sid (whichever is earlier)
// and calls the provided function f for each event.
template <typename Func>
void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
Epoch epoch, Func f) {
TracePart *part = trace->parts.Front();
Sid ev_sid = kFreeSid;
Epoch ev_epoch = kEpochOver;
for (;;) {
DCHECK_EQ(part->trace, trace);
// Note: an event can't start in the last element.
// Since an event can take up to 2 elements,
// we ensure we have at least 2 before adding an event.
Event *end = &part->events[TracePart::kSize - 1];
if (part == last)
end = last_pos;
for (Event *evp = &part->events[0]; evp < end; evp++) {
Event *evp0 = evp;
if (!evp->is_access && !evp->is_func) {
switch (evp->type) {
case EventType::kTime: {
auto *ev = reinterpret_cast<EventTime *>(evp);
ev_sid = static_cast<Sid>(ev->sid);
ev_epoch = static_cast<Epoch>(ev->epoch);
if (ev_sid == sid && ev_epoch > epoch)
return;
break;
}
case EventType::kAccessExt:
FALLTHROUGH;
case EventType::kAccessRange:
FALLTHROUGH;
case EventType::kLock:
FALLTHROUGH;
case EventType::kRLock:
// These take 2 Event elements.
evp++;
break;
case EventType::kUnlock:
// This takes 1 Event element.
break;
}
}
CHECK_NE(ev_sid, kFreeSid);
CHECK_NE(ev_epoch, kEpochOver);
f(ev_sid, ev_epoch, evp0);
}
if (part == last)
return;
part = trace->parts.Next(part);
CHECK(part);
}
CHECK(0);
}
static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
Vector<uptr> *stack, MutexSet *mset, uptr pc,
bool *found) {
DPrintf2(" MATCHED\n");
*pmset = *mset;
stack->PushBack(pc);
pstk->Init(&(*stack)[0], stack->Size());
stack->PopBack();
*found = true;
}
// Checks if addr1|size1 is fully contained in addr2|size2.
// We check for fully contained instread of just overlapping
// because a memory access is always traced once, but can be
// split into multiple accesses in the shadow.
static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
uptr size2) {
return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
}
// Replays the trace of thread tid up to the target event identified
// by sid/epoch/addr/size/typ and restores and returns stack, mutex set
// and tag for that event. If there are multiple such events, it returns
// the last one. Returns false if the event is not present in the trace.
bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
uptr size, AccessType typ, VarSizeStackTrace *pstk,
MutexSet *pmset, uptr *ptag) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
DPrintf2("RestoreStack: tid=%u sid=%u@%u addr=0x%zx/%zu typ=%x\n", tid, sid,
epoch, addr, size, typ);
ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
ctx->thread_registry.CheckLocked();
ThreadContext *tctx =
static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
Trace *trace = &tctx->trace;
// Snapshot first/last parts and the current position in the last part.
TracePart *first_part;
TracePart *last_part;
Event *last_pos;
{
Lock lock(&trace->mtx);
first_part = trace->parts.Front();
if (!first_part)
return false;
last_part = trace->parts.Back();
last_pos = trace->final_pos;
if (tctx->thr)
last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
}
// Too large for stack.
alignas(MutexSet) static char mset_storage[sizeof(MutexSet)];
MutexSet &mset = *new (mset_storage) MutexSet();
Vector<uptr> stack;
uptr prev_pc = 0;
bool found = false;
bool is_read = typ & kAccessRead;
bool is_atomic = typ & kAccessAtomic;
bool is_free = typ & kAccessFree;
TraceReplay(
trace, last_part, last_pos, sid, epoch,
[&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
bool match = ev_sid == sid && ev_epoch == epoch;
if (evp->is_access) {
if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
evp->_ == 0) // NopEvent
return;
auto *ev = reinterpret_cast<EventAccess *>(evp);
uptr ev_addr = RestoreAddr(ev->addr);
uptr ev_size = 1 << ev->size_log;
uptr ev_pc =
prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
prev_pc = ev_pc;
DPrintf2(" Access: pc=0x%zx addr=0x%llx/%llu type=%llu/%llu\n",
ev_pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
if (match && type == EventType::kAccessExt &&
IsWithinAccess(addr, size, ev_addr, ev_size) &&
is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
return;
}
if (evp->is_func) {
auto *ev = reinterpret_cast<EventFunc *>(evp);
if (ev->pc) {
DPrintf2(" FuncEnter: pc=0x%zx\n", ev->pc);
stack.PushBack(ev->pc);
} else {
DPrintf2(" FuncExit\n");
CHECK(stack.Size());
stack.PopBack();
}
return;
}
switch (evp->type) {
case EventType::kAccessExt: {
auto *ev = reinterpret_cast<EventAccessExt *>(evp);
uptr ev_addr = RestoreAddr(ev->addr);
uptr ev_size = 1 << ev->size_log;
prev_pc = ev->pc;
DPrintf2(" AccessExt: pc=0x%zx addr=0x%llx/%llu type=%llu/%llu\n",
ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
if (match && type == EventType::kAccessExt &&
IsWithinAccess(addr, size, ev_addr, ev_size) &&
is_read == ev->is_read && is_atomic == ev->is_atomic &&
!is_free)
RestoreStackMatch(pstk, pmset, &stack, &mset, ev->pc, &found);
break;
}
case EventType::kAccessRange: {
auto *ev = reinterpret_cast<EventAccessRange *>(evp);
uptr ev_addr = RestoreAddr(ev->addr);
uptr ev_size =
(ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
uptr ev_pc = RestoreAddr(ev->pc);
prev_pc = ev_pc;
DPrintf2(" Range: pc=0x%zx addr=0x%llx/%llu type=%llu/%llu\n",
ev_pc, ev_addr, ev_size, ev->is_read, ev->is_free);
if (match && type == EventType::kAccessExt &&
IsWithinAccess(addr, size, ev_addr, ev_size) &&
is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
break;
}
case EventType::kLock:
FALLTHROUGH;
case EventType::kRLock: {
auto *ev = reinterpret_cast<EventLock *>(evp);
bool is_write = ev->type == EventType::kLock;
uptr ev_addr = RestoreAddr(ev->addr);
uptr ev_pc = RestoreAddr(ev->pc);
StackID stack_id =
(ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
DPrintf2(" Lock: pc=0x%zx addr=0x%llx stack=%u write=%d\n", ev_pc,
ev_addr, stack_id, is_write);
mset.AddAddr(ev_addr, stack_id, is_write);
// Events with ev_pc == 0 are written to the beginning of trace
// part as initial mutex set (are not real).
if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
break;
}
case EventType::kUnlock: {
auto *ev = reinterpret_cast<EventUnlock *>(evp);
uptr ev_addr = RestoreAddr(ev->addr);
DPrintf2(" Unlock: addr=0x%llx\n", ev_addr);
mset.DelAddr(ev_addr);
break;
}
case EventType::kTime:
// TraceReplay already extracted sid/epoch from it,
// nothing else to do here.
break;
}
});
ExtractTagFromStack(pstk, ptag);
return found;
}
} // namespace v3
static bool FindRacyStacks(const RacyStacks &hash) {
for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
if (hash == ctx->racy_stacks[i]) {

View File

@ -252,6 +252,8 @@ void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
while (!thr->tctx->trace.parts.Empty()) thr->tctx->trace.parts.PopBack();
#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
thr->ignore_interceptors++;

View File

@ -13,8 +13,9 @@
#define TSAN_TRACE_H
#include "tsan_defs.h"
#include "tsan_stack_trace.h"
#include "tsan_ilist.h"
#include "tsan_mutexset.h"
#include "tsan_stack_trace.h"
namespace __tsan {
@ -67,6 +68,155 @@ struct Trace {
Trace() : mtx(MutexTypeTrace) {}
};
namespace v3 {
enum class EventType : u64 {
kAccessExt,
kAccessRange,
kLock,
kRLock,
kUnlock,
kTime,
};
// "Base" type for all events for type dispatch.
struct Event {
// We use variable-length type encoding to give more bits to some event
// types that need them. If is_access is set, this is EventAccess.
// Otherwise, if is_func is set, this is EventFunc.
// Otherwise type denotes the type.
u64 is_access : 1;
u64 is_func : 1;
EventType type : 3;
u64 _ : 59;
};
static_assert(sizeof(Event) == 8, "bad Event size");
// Nop event used as padding and does not affect state during replay.
static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
// Compressed memory access can represent only some events with PCs
// close enough to each other. Otherwise we fall back to EventAccessExt.
struct EventAccess {
static constexpr uptr kPCBits = 15;
u64 is_access : 1; // = 1
u64 is_read : 1;
u64 is_atomic : 1;
u64 size_log : 2;
u64 pc_delta : kPCBits; // signed delta from the previous memory access PC
u64 addr : kCompressedAddrBits;
};
static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
// Function entry (pc != 0) or exit (pc == 0).
struct EventFunc {
u64 is_access : 1; // = 0
u64 is_func : 1; // = 1
u64 pc : 62;
};
static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
// Extended memory access with full PC.
struct EventAccessExt {
u64 is_access : 1; // = 0
u64 is_func : 1; // = 0
EventType type : 3; // = EventType::kAccessExt
u64 is_read : 1;
u64 is_atomic : 1;
u64 size_log : 2;
u64 _ : 11;
u64 addr : kCompressedAddrBits;
u64 pc;
};
static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
// Access to a memory range.
struct EventAccessRange {
static constexpr uptr kSizeLoBits = 13;
u64 is_access : 1; // = 0
u64 is_func : 1; // = 0
EventType type : 3; // = EventType::kAccessRange
u64 is_read : 1;
u64 is_free : 1;
u64 size_lo : kSizeLoBits;
u64 pc : kCompressedAddrBits;
u64 addr : kCompressedAddrBits;
u64 size_hi : 64 - kCompressedAddrBits;
};
static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
// Mutex lock.
struct EventLock {
static constexpr uptr kStackIDLoBits = 15;
u64 is_access : 1; // = 0
u64 is_func : 1; // = 0
EventType type : 3; // = EventType::kLock or EventType::kRLock
u64 pc : kCompressedAddrBits;
u64 stack_lo : kStackIDLoBits;
u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
u64 _ : 3;
u64 addr : kCompressedAddrBits;
};
static_assert(sizeof(EventLock) == 16, "bad EventLock size");
// Mutex unlock.
struct EventUnlock {
u64 is_access : 1; // = 0
u64 is_func : 1; // = 0
EventType type : 3; // = EventType::kUnlock
u64 _ : 15;
u64 addr : kCompressedAddrBits;
};
static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
// Time change event.
struct EventTime {
u64 is_access : 1; // = 0
u64 is_func : 1; // = 0
EventType type : 3; // = EventType::kTime
u64 sid : sizeof(Sid) * kByteBits;
u64 epoch : kEpochBits;
u64 _ : 64 - 5 - sizeof(Sid) * kByteBits - kEpochBits;
};
static_assert(sizeof(EventTime) == 8, "bad EventTime size");
struct Trace;
struct TraceHeader {
Trace* trace = nullptr; // back-pointer to Trace containing this part
INode trace_parts; // in Trace::parts
};
struct TracePart : TraceHeader {
static constexpr uptr kByteSize = 256 << 10;
static constexpr uptr kSize =
(kByteSize - sizeof(TraceHeader)) / sizeof(Event);
// TraceAcquire does a fast event pointer overflow check by comparing
// pointer into TracePart::events with kAlignment mask. Since TracePart's
// are allocated page-aligned, this check detects end of the array
// (it also have false positives in the middle that are filtered separately).
// This also requires events to be the last field.
static constexpr uptr kAlignment = 0xff0;
Event events[kSize];
TracePart() {}
};
static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
struct Trace {
Mutex mtx;
IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
Event* final_pos =
nullptr; // final position in the last part for finished threads
Trace() : mtx(MutexTypeTrace) {}
};
} // namespace v3
} // namespace __tsan
#endif // TSAN_TRACE_H

View File

@ -7,6 +7,7 @@ set(TSAN_UNIT_TEST_SOURCES
tsan_shadow_test.cpp
tsan_stack_test.cpp
tsan_sync_test.cpp
tsan_trace_test.cpp
tsan_unit_test_main.cpp
tsan_vector_clock_test.cpp
)

View File

@ -0,0 +1,215 @@
//===-- tsan_trace_test.cpp -----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_trace.h"
#include <pthread.h>
#include "gtest/gtest.h"
#include "tsan_rtl.h"
namespace __tsan {
using namespace v3;
// We need to run all trace tests in a new thread,
// so that the thread trace is empty initially.
static void run_in_thread(void *(*f)(void *), void *arg = nullptr) {
pthread_t th;
pthread_create(&th, nullptr, f, arg);
pthread_join(th, nullptr);
}
TEST(Trace, RestoreAccess) {
struct Thread {
static void *Func(void *arg) {
// A basic test with some function entry/exit events,
// some mutex lock/unlock events and some other distracting
// memory events.
ThreadState *thr = cur_thread();
TraceFunc(thr, 0x1000);
TraceFunc(thr, 0x1001);
TraceMutexLock(thr, v3::EventType::kLock, 0x4000, 0x5000, 0x6000);
TraceMutexLock(thr, v3::EventType::kLock, 0x4001, 0x5001, 0x6001);
TraceMutexUnlock(thr, 0x5000);
TraceFunc(thr);
CHECK(TryTraceMemoryAccess(thr, 0x2001, 0x3001, 8, kAccessRead));
TraceMutexLock(thr, v3::EventType::kRLock, 0x4002, 0x5002, 0x6002);
TraceFunc(thr, 0x1002);
CHECK(TryTraceMemoryAccess(thr, 0x2000, 0x3000, 8, kAccessRead));
// This is the access we want to find.
// The previous one is equivalent, but RestoreStack must prefer
// the last of the matchig accesses.
CHECK(TryTraceMemoryAccess(thr, 0x2002, 0x3000, 8, kAccessRead));
Lock lock1(&ctx->slot_mtx);
ThreadRegistryLock lock2(&ctx->thread_registry);
VarSizeStackTrace stk;
MutexSet mset;
uptr tag = kExternalTagNone;
bool res =
RestoreStack(thr->tid, v3::EventType::kAccessExt, thr->sid,
thr->epoch, 0x3000, 8, kAccessRead, &stk, &mset, &tag);
CHECK(res);
CHECK_EQ(stk.size, 3);
CHECK_EQ(stk.trace[0], 0x1000);
CHECK_EQ(stk.trace[1], 0x1002);
CHECK_EQ(stk.trace[2], 0x2002);
CHECK_EQ(mset.Size(), 2);
CHECK_EQ(mset.Get(0).addr, 0x5001);
CHECK_EQ(mset.Get(0).stack_id, 0x6001);
CHECK_EQ(mset.Get(0).write, true);
CHECK_EQ(mset.Get(1).addr, 0x5002);
CHECK_EQ(mset.Get(1).stack_id, 0x6002);
CHECK_EQ(mset.Get(1).write, false);
CHECK_EQ(tag, kExternalTagNone);
return nullptr;
}
};
run_in_thread(Thread::Func);
}
TEST(Trace, MemoryAccessSize) {
struct Thread {
struct Params {
uptr access_size, offset, size;
bool res;
int type;
};
static void *Func(void *arg) {
// Test tracing and matching of accesses of different sizes.
const Params *params = static_cast<Params *>(arg);
Printf("access_size=%zu, offset=%zu, size=%zu, res=%d, type=%d\n",
params->access_size, params->offset, params->size, params->res,
params->type);
ThreadState *thr = cur_thread();
TraceFunc(thr, 0x1000);
switch (params->type) {
case 0:
// This should emit compressed event.
CHECK(TryTraceMemoryAccess(thr, 0x2000, 0x3000, params->access_size,
kAccessRead));
break;
case 1:
// This should emit full event.
CHECK(TryTraceMemoryAccess(thr, 0x2000000, 0x3000,
params->access_size, kAccessRead));
break;
case 2:
TraceMemoryAccessRange(thr, 0x2000000, 0x3000, params->access_size,
kAccessRead);
break;
}
Lock lock1(&ctx->slot_mtx);
ThreadRegistryLock lock2(&ctx->thread_registry);
VarSizeStackTrace stk;
MutexSet mset;
uptr tag = kExternalTagNone;
bool res = RestoreStack(thr->tid, v3::EventType::kAccessExt, thr->sid,
thr->epoch, 0x3000 + params->offset, params->size,
kAccessRead, &stk, &mset, &tag);
CHECK_EQ(res, params->res);
if (params->res) {
CHECK_EQ(stk.size, 2);
CHECK_EQ(stk.trace[0], 0x1000);
CHECK_EQ(stk.trace[1], params->type ? 0x2000000 : 0x2000);
}
return nullptr;
}
};
Thread::Params tests[] = {
{1, 0, 1, true}, {4, 0, 2, true},
{4, 2, 2, true}, {8, 3, 1, true},
{2, 1, 1, true}, {1, 1, 1, false},
{8, 5, 4, false}, {4, static_cast<uptr>(-1l), 4, false},
};
for (auto params : tests) {
for (params.type = 0; params.type < 3; params.type++)
run_in_thread(Thread::Func, &params);
}
}
TEST(Trace, RestoreMutexLock) {
struct Thread {
static void *Func(void *arg) {
// Check of restoration of a mutex lock event.
ThreadState *thr = cur_thread();
TraceFunc(thr, 0x1000);
TraceMutexLock(thr, v3::EventType::kLock, 0x4000, 0x5000, 0x6000);
TraceMutexLock(thr, v3::EventType::kRLock, 0x4001, 0x5001, 0x6001);
TraceMutexLock(thr, v3::EventType::kRLock, 0x4002, 0x5001, 0x6002);
Lock lock1(&ctx->slot_mtx);
ThreadRegistryLock lock2(&ctx->thread_registry);
VarSizeStackTrace stk;
MutexSet mset;
uptr tag = kExternalTagNone;
bool res = RestoreStack(thr->tid, v3::EventType::kLock, thr->sid,
thr->epoch, 0x5001, 0, 0, &stk, &mset, &tag);
CHECK_EQ(stk.size, 2);
CHECK_EQ(stk.trace[0], 0x1000);
CHECK_EQ(stk.trace[1], 0x4002);
CHECK_EQ(mset.Size(), 2);
CHECK_EQ(mset.Get(0).addr, 0x5000);
CHECK_EQ(mset.Get(0).stack_id, 0x6000);
CHECK_EQ(mset.Get(0).write, true);
CHECK_EQ(mset.Get(1).addr, 0x5001);
CHECK_EQ(mset.Get(1).stack_id, 0x6001);
CHECK_EQ(mset.Get(1).write, false);
return nullptr;
}
};
run_in_thread(Thread::Func);
}
TEST(Trace, MultiPart) {
struct Thread {
static void *Func(void *arg) {
// Check replay of a trace with multiple parts.
ThreadState *thr = cur_thread();
TraceFunc(thr, 0x1000);
TraceFunc(thr, 0x2000);
TraceMutexLock(thr, v3::EventType::kLock, 0x4000, 0x5000, 0x6000);
const uptr kEvents = 3 * sizeof(TracePart) / sizeof(v3::Event);
for (uptr i = 0; i < kEvents; i++) {
TraceFunc(thr, 0x3000);
TraceMutexLock(thr, v3::EventType::kLock, 0x4002, 0x5002, 0x6002);
TraceMutexUnlock(thr, 0x5002);
TraceFunc(thr);
}
TraceFunc(thr, 0x4000);
TraceMutexLock(thr, v3::EventType::kRLock, 0x4001, 0x5001, 0x6001);
CHECK(TryTraceMemoryAccess(thr, 0x2002, 0x3000, 8, kAccessRead));
Lock lock1(&ctx->slot_mtx);
ThreadRegistryLock lock2(&ctx->thread_registry);
VarSizeStackTrace stk;
MutexSet mset;
uptr tag = kExternalTagNone;
bool res =
RestoreStack(thr->tid, v3::EventType::kAccessExt, thr->sid,
thr->epoch, 0x3000, 8, kAccessRead, &stk, &mset, &tag);
CHECK_EQ(stk.size, 4);
CHECK_EQ(stk.trace[0], 0x1000);
CHECK_EQ(stk.trace[1], 0x2000);
CHECK_EQ(stk.trace[2], 0x4000);
CHECK_EQ(stk.trace[3], 0x2002);
CHECK_EQ(mset.Size(), 2);
CHECK_EQ(mset.Get(0).addr, 0x5000);
CHECK_EQ(mset.Get(0).stack_id, 0x6000);
CHECK_EQ(mset.Get(0).write, true);
CHECK_EQ(mset.Get(1).addr, 0x5001);
CHECK_EQ(mset.Get(1).stack_id, 0x6001);
CHECK_EQ(mset.Get(1).write, false);
return nullptr;
}
};
run_in_thread(Thread::Func);
}
} // namespace __tsan