forked from OSchip/llvm-project
tsan: move traces from tls into dedicated storage at fixed address
helps to reduce tls size (it's weird to have multi-MB tls) will help with dynamically adjustable trace size llvm-svn: 168783
This commit is contained in:
parent
a873623e54
commit
2429b02770
|
@ -14,8 +14,8 @@
|
|||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
static const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
static const uptr kAllocatorSize = 0x10000000000; // 1T.
|
||||
static const uptr kAllocatorSpace = 0x700000000000ULL;
|
||||
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
|
||||
|
||||
typedef DefaultSizeClassMap SCMap;
|
||||
typedef
|
||||
|
|
|
@ -36,7 +36,7 @@ char buf[10];
|
|||
|
||||
int main(void) {
|
||||
__tsan_init();
|
||||
__tsan_map_shadow(buf, sizeof(buf));
|
||||
__tsan_map_shadow(buf, sizeof(buf) + 4096);
|
||||
__tsan_func_enter(0, &main);
|
||||
__tsan_malloc(0, buf, 10, 0);
|
||||
__tsan_release(0, buf);
|
||||
|
|
|
@ -16,7 +16,9 @@
|
|||
C++ linux memory layout:
|
||||
0000 0000 0000 - 03c0 0000 0000: protected
|
||||
03c0 0000 0000 - 1000 0000 0000: shadow
|
||||
1000 0000 0000 - 7d00 0000 0000: protected
|
||||
1000 0000 0000 - 6000 0000 0000: protected
|
||||
6000 0000 0000 - 6200 0000 0000: traces
|
||||
6200 0000 0000 - 7d00 0000 0000: -
|
||||
7d00 0000 0000 - 7e00 0000 0000: heap
|
||||
7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
|
||||
|
||||
|
@ -25,7 +27,9 @@ C++ COMPAT linux memory layout:
|
|||
0400 0000 0000 - 1000 0000 0000: shadow
|
||||
1000 0000 0000 - 2900 0000 0000: protected
|
||||
2900 0000 0000 - 2c00 0000 0000: modules
|
||||
2c00 0000 0000 - 7d00 0000 0000: -
|
||||
2c00 0000 0000 - 6000 0000 0000: -
|
||||
6000 0000 0000 - 6200 0000 0000: traces
|
||||
6200 0000 0000 - 7d00 0000 0000: -
|
||||
7d00 0000 0000 - 7e00 0000 0000: heap
|
||||
7e00 0000 0000 - 7f00 0000 0000: -
|
||||
7f00 0000 0000 - 7fff ffff ffff: main thread stack
|
||||
|
@ -36,7 +40,9 @@ Go linux and darwin memory layout:
|
|||
00f8 0000 0000 - 0118 0000 0000: heap
|
||||
0118 0000 0000 - 1000 0000 0000: -
|
||||
1000 0000 0000 - 1460 0000 0000: shadow
|
||||
1460 0000 0000 - 7fff ffff ffff: -
|
||||
1460 0000 0000 - 6000 0000 0000: -
|
||||
6000 0000 0000 - 6200 0000 0000: traces
|
||||
6200 0000 0000 - 7fff ffff ffff: -
|
||||
|
||||
Go windows memory layout:
|
||||
0000 0000 0000 - 0000 1000 0000: executable
|
||||
|
@ -44,13 +50,15 @@ Go windows memory layout:
|
|||
00f8 0000 0000 - 0118 0000 0000: heap
|
||||
0118 0000 0000 - 0100 0000 0000: -
|
||||
0100 0000 0000 - 0560 0000 0000: shadow
|
||||
0560 0000 0000 - 07ff ffff ffff: -
|
||||
0560 0000 0000 - 0760 0000 0000: traces
|
||||
0760 0000 0000 - 07ff ffff ffff: -
|
||||
*/
|
||||
|
||||
#ifndef TSAN_PLATFORM_H
|
||||
#define TSAN_PLATFORM_H
|
||||
|
||||
#include "tsan_rtl.h"
|
||||
#include "tsan_defs.h"
|
||||
#include "tsan_trace.h"
|
||||
|
||||
#if defined(__LP64__) || defined(_WIN64)
|
||||
namespace __tsan {
|
||||
|
@ -76,6 +84,13 @@ static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
|
|||
|
||||
static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
|
||||
|
||||
#if defined(_WIN32)
|
||||
const uptr kTraceMemBegin = 0x056000000000ULL;
|
||||
#else
|
||||
const uptr kTraceMemBegin = 0x600000000000ULL;
|
||||
#endif
|
||||
const uptr kTraceMemSize = 0x020000000000ULL;
|
||||
|
||||
// This has to be a macro to allow constant initialization of constants below.
|
||||
#ifndef TSAN_GO
|
||||
#define MemToShadow(addr) \
|
||||
|
@ -122,6 +137,12 @@ void FlushShadowMemory();
|
|||
|
||||
const char *InitializePlatform();
|
||||
void FinalizePlatform();
|
||||
void MapThreadTrace(uptr addr, uptr size);
|
||||
uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) {
|
||||
uptr p = kTraceMemBegin + tid * kTraceSize * sizeof(Event);
|
||||
DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
|
||||
return p;
|
||||
}
|
||||
|
||||
void internal_start_thread(void(*func)(void*), void *arg);
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void InitializeShadowMemory() {
|
|||
const uptr kClosedLowBeg = 0x200000;
|
||||
const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
|
||||
const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
|
||||
const uptr kClosedMidEnd = kLinuxAppMemBeg - 1;
|
||||
const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
|
||||
ProtectRange(kClosedLowBeg, kClosedLowEnd);
|
||||
ProtectRange(kClosedMidBeg, kClosedMidEnd);
|
||||
DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
|
||||
|
@ -120,6 +120,16 @@ void InitializeShadowMemory() {
|
|||
}
|
||||
#endif
|
||||
|
||||
void MapThreadTrace(uptr addr, uptr size) {
|
||||
DPrintf("Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
|
||||
CHECK_GE(addr, kTraceMemBegin);
|
||||
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
|
||||
if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
|
||||
Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
static uptr g_data_start;
|
||||
static uptr g_data_end;
|
||||
|
||||
|
|
|
@ -288,6 +288,13 @@ void TraceSwitch(ThreadState *thr) {
|
|||
thr->nomalloc--;
|
||||
}
|
||||
|
||||
uptr TraceTopPC(ThreadState *thr) {
|
||||
Event *events = (Event*)GetThreadTrace(thr->tid);
|
||||
uptr pc = events[thr->fast_state.epoch() % kTraceSize]
|
||||
& ((1ull << 61) - 1);
|
||||
return pc;
|
||||
}
|
||||
|
||||
#ifndef TSAN_GO
|
||||
extern "C" void __tsan_trace_switch() {
|
||||
TraceSwitch(cur_thread());
|
||||
|
@ -453,7 +460,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
|||
|
||||
// We must not store to the trace if we do not store to the shadow.
|
||||
// That is, this call must be moved somewhere below.
|
||||
TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
|
||||
TraceAddEvent(thr, fast_state, EventTypeMop, pc);
|
||||
|
||||
MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
|
||||
shadow_mem, cur);
|
||||
|
@ -523,7 +530,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
|
|||
StatInc(thr, StatFuncEnter);
|
||||
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
|
||||
|
||||
// Shadow stack maintenance can be replaced with
|
||||
// stack unwinding during trace switch (which presumably must be faster).
|
||||
|
@ -553,7 +560,7 @@ void FuncExit(ThreadState *thr) {
|
|||
StatInc(thr, StatFuncExit);
|
||||
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
|
||||
|
||||
DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
|
||||
#ifndef TSAN_GO
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "tsan_trace.h"
|
||||
#include "tsan_vector.h"
|
||||
#include "tsan_report.h"
|
||||
#include "tsan_platform.h"
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
|
@ -533,11 +534,13 @@ void AfterSleep(ThreadState *thr, uptr pc);
|
|||
#endif
|
||||
|
||||
void TraceSwitch(ThreadState *thr);
|
||||
uptr TraceTopPC(ThreadState *thr);
|
||||
|
||||
extern "C" void __tsan_trace_switch();
|
||||
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
|
||||
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
|
||||
EventType typ, uptr addr) {
|
||||
StatInc(thr, StatEvents);
|
||||
u64 epoch = fs.epoch();
|
||||
if (UNLIKELY((epoch % kTracePartSize) == 0)) {
|
||||
#ifndef TSAN_GO
|
||||
HACKY_CALL(__tsan_trace_switch);
|
||||
|
@ -545,7 +548,8 @@ void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
|
|||
TraceSwitch(thr);
|
||||
#endif
|
||||
}
|
||||
Event *evp = &thr->trace.events[epoch % kTraceSize];
|
||||
Event *trace = (Event*)GetThreadTrace(fs.tid());
|
||||
Event *evp = &trace[epoch % kTraceSize];
|
||||
Event ev = (u64)addr | ((u64)typ << 61);
|
||||
*evp = ev;
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeLock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeLock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
||||
if (s->owner_tid == SyncVar::kInvalidTid) {
|
||||
CHECK_EQ(s->recursion, 0);
|
||||
|
@ -107,7 +107,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
||||
if (s->recursion == 0) {
|
||||
if (!s->is_broken) {
|
||||
|
@ -144,7 +144,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRLock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
|
||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||
Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
|
||||
|
@ -164,7 +164,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (IsAppMem(addr))
|
||||
MemoryRead1Byte(thr, pc, addr);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||
Printf("ThreadSanitizer WARNING: read unlock of a write "
|
||||
|
@ -188,7 +188,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
// Seems to be read unlock.
|
||||
StatInc(thr, StatMutexReadUnlock);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.release(&s->read_clock);
|
||||
|
@ -205,7 +205,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
|||
// First, it's a bug to increment the epoch w/o writing to the trace.
|
||||
// Then, the acquire/release logic can be factored out as well.
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.ReleaseStore(&s->clock);
|
||||
|
|
|
@ -277,8 +277,9 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
|
|||
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
|
||||
}
|
||||
uptr pos = hdr->stack0.Size();
|
||||
Event *events = (Event*)GetThreadTrace(tid);
|
||||
for (uptr i = ebegin; i <= eend; i++) {
|
||||
Event ev = trace->events[i];
|
||||
Event ev = events[i];
|
||||
EventType typ = (EventType)(ev >> 61);
|
||||
uptr pc = (uptr)(ev & 0xffffffffffffull);
|
||||
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
|
||||
|
@ -416,8 +417,7 @@ void ReportRace(ThreadState *thr) {
|
|||
ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
|
||||
const uptr kMop = 2;
|
||||
StackTrace traces[kMop];
|
||||
const uptr toppc = thr->trace.events[thr->fast_state.epoch() % kTraceSize]
|
||||
& ((1ull << 61) - 1);
|
||||
const uptr toppc = TraceTopPC(thr);
|
||||
traces[0].ObtainCurrent(thr, toppc);
|
||||
if (IsFiredSuppression(ctx, rep, traces[0]))
|
||||
return;
|
||||
|
|
|
@ -123,6 +123,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
|
|||
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
|
||||
tctx = new(mem) ThreadContext(tid);
|
||||
ctx->threads[tid] = tctx;
|
||||
MapThreadTrace(GetThreadTrace(tid), kTraceSize * sizeof(Event));
|
||||
}
|
||||
CHECK_NE(tctx, 0);
|
||||
CHECK_GE(tid, 0);
|
||||
|
@ -143,7 +144,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
|
|||
if (tid) {
|
||||
thr->fast_state.IncrementEpoch();
|
||||
// Can't increment epoch w/o writing to the trace as well.
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.release(&tctx->sync);
|
||||
|
@ -238,7 +239,7 @@ void ThreadFinish(ThreadState *thr) {
|
|||
} else {
|
||||
thr->fast_state.IncrementEpoch();
|
||||
// Can't increment epoch w/o writing to the trace as well.
|
||||
TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.release(&tctx->sync);
|
||||
|
@ -249,9 +250,8 @@ void ThreadFinish(ThreadState *thr) {
|
|||
// Save from info about the thread.
|
||||
tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
|
||||
ThreadDeadInfo();
|
||||
internal_memcpy(&tctx->dead_info->trace.events[0],
|
||||
&thr->trace.events[0], sizeof(thr->trace.events));
|
||||
for (int i = 0; i < kTraceParts; i++) {
|
||||
tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0;
|
||||
tctx->dead_info->trace.headers[i].stack0.CopyFrom(
|
||||
thr->trace.headers[i].stack0);
|
||||
}
|
||||
|
@ -358,7 +358,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
|
|||
|
||||
fast_state.IncrementEpoch();
|
||||
thr->fast_state = fast_state;
|
||||
TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
|
||||
TraceAddEvent(thr, fast_state, EventTypeMop, pc);
|
||||
|
||||
bool unaligned = (addr % kShadowCell) != 0;
|
||||
|
||||
|
|
|
@ -61,7 +61,6 @@ struct TraceHeader {
|
|||
};
|
||||
|
||||
struct Trace {
|
||||
Event events[kTraceSize];
|
||||
TraceHeader headers[kTraceParts];
|
||||
Mutex mtx;
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "tsan_platform.h"
|
||||
#include "tsan_rtl.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
namespace __tsan {
|
||||
|
|
Loading…
Reference in New Issue