forked from OSchip/llvm-project
tsan: revert r262037
Broke aarch64 and darwin bots. llvm-svn: 262046
This commit is contained in:
parent
1762ad3e73
commit
7f022ae4c2
|
@ -41,7 +41,6 @@ set(TSAN_SOURCES
|
|||
rtl/tsan_rtl.cc
|
||||
rtl/tsan_rtl_mutex.cc
|
||||
rtl/tsan_rtl_report.cc
|
||||
rtl/tsan_rtl_proc.cc
|
||||
rtl/tsan_rtl_thread.cc
|
||||
rtl/tsan_stack_trace.cc
|
||||
rtl/tsan_stat.cc
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
type tsan_go.cc ..\rtl\tsan_interface_atomic.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_rtl_proc.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc ..\..\sanitizer_common\sanitizer_stackdepot.cc ..\..\sanitizer_common\sanitizer_persistent_allocator.cc ..\..\sanitizer_common\sanitizer_flag_parser.cc ..\..\sanitizer_common\sanitizer_symbolizer.cc > gotsan.cc
|
||||
type tsan_go.cc ..\rtl\tsan_interface_atomic.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc ..\..\sanitizer_common\sanitizer_stackdepot.cc ..\..\sanitizer_common\sanitizer_persistent_allocator.cc ..\..\sanitizer_common\sanitizer_flag_parser.cc ..\..\sanitizer_common\sanitizer_symbolizer.cc > gotsan.cc
|
||||
|
||||
gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@ SRCS="
|
|||
../rtl/tsan_rtl_mutex.cc
|
||||
../rtl/tsan_rtl_report.cc
|
||||
../rtl/tsan_rtl_thread.cc
|
||||
../rtl/tsan_rtl_proc.cc
|
||||
../rtl/tsan_stack_trace.cc
|
||||
../rtl/tsan_stat.cc
|
||||
../rtl/tsan_suppressions.cc
|
||||
|
@ -124,7 +123,7 @@ if [ "$SILENT" != "1" ]; then
|
|||
fi
|
||||
$CC $DIR/gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS
|
||||
|
||||
$CC test.c $DIR/race_$SUFFIX.syso -m64 -g -o $DIR/test $OSLDFLAGS
|
||||
$CC test.c $DIR/race_$SUFFIX.syso -m64 -o $DIR/test $OSLDFLAGS
|
||||
|
||||
export GORACE="exitcode=0 atexit_sleep_ms=0"
|
||||
if [ "$SILENT" != "1" ]; then
|
||||
|
|
|
@ -13,21 +13,16 @@
|
|||
|
||||
#include <stdio.h>
|
||||
|
||||
void __tsan_init(void **thr, void **proc, void (*cb)(void*));
|
||||
void __tsan_init(void **thr, void (*cb)(void*));
|
||||
void __tsan_fini();
|
||||
void __tsan_map_shadow(void *addr, unsigned long size);
|
||||
void __tsan_go_start(void *thr, void **chthr, void *pc);
|
||||
void __tsan_go_end(void *thr);
|
||||
void __tsan_proc_create(void **pproc);
|
||||
void __tsan_proc_destroy(void *proc);
|
||||
void __tsan_proc_wire(void *proc, void *thr);
|
||||
void __tsan_proc_unwire(void *proc, void *thr);
|
||||
void __tsan_read(void *thr, void *addr, void *pc);
|
||||
void __tsan_write(void *thr, void *addr, void *pc);
|
||||
void __tsan_func_enter(void *thr, void *pc);
|
||||
void __tsan_func_exit(void *thr);
|
||||
void __tsan_malloc(void *thr, void *pc, void *p, unsigned long sz);
|
||||
void __tsan_free(void *proc, void *p, unsigned long sz);
|
||||
void __tsan_malloc(void *p, unsigned long sz);
|
||||
void __tsan_acquire(void *thr, void *addr);
|
||||
void __tsan_release(void *thr, void *addr);
|
||||
void __tsan_release_merge(void *thr, void *addr);
|
||||
|
@ -41,23 +36,18 @@ void barfoo() {}
|
|||
|
||||
int main(void) {
|
||||
void *thr0 = 0;
|
||||
void *proc0 = 0;
|
||||
__tsan_init(&thr0, &proc0, symbolize_cb);
|
||||
char *buf = (char*)((unsigned long)buf0 + (64<<10) - 1 & ~((64<<10) - 1));
|
||||
__tsan_malloc(buf, 10);
|
||||
__tsan_init(&thr0, symbolize_cb);
|
||||
__tsan_map_shadow(buf, 4096);
|
||||
__tsan_malloc(thr0, 0, buf, 10);
|
||||
__tsan_free(proc0, buf, 10);
|
||||
__tsan_func_enter(thr0, (char*)&main + 1);
|
||||
__tsan_malloc(thr0, 0, buf, 10);
|
||||
__tsan_malloc(buf, 10);
|
||||
__tsan_release(thr0, buf);
|
||||
__tsan_release_merge(thr0, buf);
|
||||
void *thr1 = 0;
|
||||
__tsan_go_start(thr0, &thr1, (char*)&barfoo + 1);
|
||||
void *thr2 = 0;
|
||||
__tsan_go_start(thr0, &thr2, (char*)&barfoo + 1);
|
||||
__tsan_func_exit(thr0);
|
||||
__tsan_proc_unwire(proc0, thr0);
|
||||
__tsan_proc_wire(proc0, thr1);
|
||||
__tsan_func_enter(thr1, (char*)&foobar + 1);
|
||||
__tsan_func_enter(thr1, (char*)&foobar + 1);
|
||||
__tsan_write(thr1, buf, (char*)&barfoo + 1);
|
||||
|
@ -65,16 +55,11 @@ int main(void) {
|
|||
__tsan_func_exit(thr1);
|
||||
__tsan_func_exit(thr1);
|
||||
__tsan_go_end(thr1);
|
||||
void *proc1 = 0;
|
||||
__tsan_proc_create(&proc1);
|
||||
__tsan_proc_wire(proc1, thr2);
|
||||
__tsan_func_enter(thr2, (char*)&foobar + 1);
|
||||
__tsan_read(thr2, buf, (char*)&barfoo + 1);
|
||||
__tsan_free(proc1, buf, 10);
|
||||
__tsan_func_exit(thr2);
|
||||
__tsan_go_end(thr2);
|
||||
__tsan_proc_destroy(proc0);
|
||||
__tsan_proc_destroy(proc1);
|
||||
__tsan_func_exit(thr0);
|
||||
__tsan_fini();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -81,14 +81,11 @@ static ThreadState *AllocGoroutine() {
|
|||
return thr;
|
||||
}
|
||||
|
||||
void __tsan_init(ThreadState **thrp,
|
||||
Processor **procp,
|
||||
void (*cb)(SymbolizeContext *cb)) {
|
||||
void __tsan_init(ThreadState **thrp, void (*cb)(SymbolizeContext *cb)) {
|
||||
symbolize_cb = cb;
|
||||
ThreadState *thr = AllocGoroutine();
|
||||
main_thr = *thrp = thr;
|
||||
Initialize(thr);
|
||||
*procp = thr->proc;
|
||||
inited = true;
|
||||
}
|
||||
|
||||
|
@ -143,52 +140,24 @@ void __tsan_func_exit(ThreadState *thr) {
|
|||
FuncExit(thr);
|
||||
}
|
||||
|
||||
void __tsan_malloc(ThreadState *thr, uptr pc, uptr p, uptr sz) {
|
||||
CHECK(inited);
|
||||
if (thr && thr->proc)
|
||||
ctx->metamap.AllocBlock(thr, pc, p, sz);
|
||||
MemoryResetRange(thr, 0, p, sz);
|
||||
}
|
||||
|
||||
void __tsan_free(Processor *proc, uptr p, uptr sz) {
|
||||
ctx->metamap.FreeRange(proc, p, sz);
|
||||
void __tsan_malloc(void *p, uptr sz) {
|
||||
if (!inited)
|
||||
return;
|
||||
MemoryResetRange(0, 0, (uptr)p, sz);
|
||||
}
|
||||
|
||||
void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) {
|
||||
ThreadState *thr = AllocGoroutine();
|
||||
*pthr = thr;
|
||||
int goid = ThreadCreate(parent, (uptr)pc, 0, true);
|
||||
Processor *proc = parent->proc;
|
||||
ProcUnwire(proc, parent);
|
||||
ProcWire(proc, thr);
|
||||
ThreadStart(thr, goid, 0);
|
||||
ProcUnwire(proc, thr);
|
||||
ProcWire(proc, parent);
|
||||
}
|
||||
|
||||
void __tsan_go_end(ThreadState *thr) {
|
||||
Processor *proc = thr->proc;
|
||||
ThreadFinish(thr);
|
||||
ProcUnwire(proc, thr);
|
||||
internal_free(thr);
|
||||
}
|
||||
|
||||
void __tsan_proc_create(Processor **pproc) {
|
||||
*pproc = ProcCreate();
|
||||
}
|
||||
|
||||
void __tsan_proc_destroy(Processor *proc) {
|
||||
ProcDestroy(proc);
|
||||
}
|
||||
|
||||
void __tsan_proc_wire(Processor *proc, ThreadState *thr) {
|
||||
ProcWire(proc, thr);
|
||||
}
|
||||
|
||||
void __tsan_proc_unwire(Processor *proc, ThreadState *thr) {
|
||||
ProcUnwire(proc, thr);
|
||||
}
|
||||
|
||||
void __tsan_acquire(ThreadState *thr, void *addr) {
|
||||
Acquire(thr, 0, (uptr)addr);
|
||||
}
|
||||
|
|
|
@ -149,7 +149,6 @@ struct MD5Hash {
|
|||
|
||||
MD5Hash md5_hash(const void *data, uptr size);
|
||||
|
||||
struct Processor;
|
||||
struct ThreadState;
|
||||
class ThreadContext;
|
||||
struct Context;
|
||||
|
|
|
@ -805,7 +805,7 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
|
|||
if (sz != 0) {
|
||||
// If sz == 0, munmap will return EINVAL and don't unmap any memory.
|
||||
DontNeedShadowFor((uptr)addr, sz);
|
||||
ctx->metamap.ResetRange(thr->proc, (uptr)addr, (uptr)sz);
|
||||
ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz);
|
||||
}
|
||||
int res = REAL(munmap)(addr, sz);
|
||||
return res;
|
||||
|
@ -900,10 +900,7 @@ STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
|
|||
namespace __tsan {
|
||||
void DestroyThreadState() {
|
||||
ThreadState *thr = cur_thread();
|
||||
Processor *proc = thr->proc;
|
||||
ThreadFinish(thr);
|
||||
ProcUnwire(proc, thr);
|
||||
ProcDestroy(proc);
|
||||
ThreadSignalContext *sctx = thr->signal_ctx;
|
||||
if (sctx) {
|
||||
thr->signal_ctx = 0;
|
||||
|
@ -954,8 +951,6 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
|
|||
#endif
|
||||
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
|
||||
internal_sched_yield();
|
||||
Processor *proc = ProcCreate();
|
||||
ProcWire(proc, thr);
|
||||
ThreadStart(thr, tid, GetTid());
|
||||
atomic_store(&p->tid, 0, memory_order_release);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
|
|||
CHECK_GE(ptr, jctx->heap_begin);
|
||||
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
|
||||
|
||||
ctx->metamap.FreeRange(thr->proc, ptr, size);
|
||||
ctx->metamap.FreeRange(thr, pc, ptr, size);
|
||||
}
|
||||
|
||||
void __tsan_java_move(jptr src, jptr dst, jptr size) {
|
||||
|
|
|
@ -67,14 +67,14 @@ void InitializeAllocator() {
|
|||
allocator()->Init(common_flags()->allocator_may_return_null);
|
||||
}
|
||||
|
||||
void AllocatorProcStart(Processor *proc) {
|
||||
allocator()->InitCache(&proc->alloc_cache);
|
||||
internal_allocator()->InitCache(&proc->internal_alloc_cache);
|
||||
void AllocatorThreadStart(ThreadState *thr) {
|
||||
allocator()->InitCache(&thr->alloc_cache);
|
||||
internal_allocator()->InitCache(&thr->internal_alloc_cache);
|
||||
}
|
||||
|
||||
void AllocatorProcFinish(Processor *proc) {
|
||||
allocator()->DestroyCache(&proc->alloc_cache);
|
||||
internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
|
||||
void AllocatorThreadFinish(ThreadState *thr) {
|
||||
allocator()->DestroyCache(&thr->alloc_cache);
|
||||
internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
|
||||
}
|
||||
|
||||
void AllocatorPrintStats() {
|
||||
|
@ -98,7 +98,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
|
|||
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
|
||||
return allocator()->ReturnNullOrDie();
|
||||
void *p = allocator()->Allocate(&thr->proc->alloc_cache, sz, align);
|
||||
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
|
||||
if (p == 0)
|
||||
return 0;
|
||||
if (ctx && ctx->initialized)
|
||||
|
@ -120,7 +120,7 @@ void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
|
|||
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
|
||||
if (ctx && ctx->initialized)
|
||||
OnUserFree(thr, pc, (uptr)p, true);
|
||||
allocator()->Deallocate(&thr->proc->alloc_cache, p);
|
||||
allocator()->Deallocate(&thr->alloc_cache, p);
|
||||
if (signal)
|
||||
SignalUnsafeCall(thr, pc);
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
|
|||
|
||||
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
|
||||
CHECK_NE(p, (void*)0);
|
||||
uptr sz = ctx->metamap.FreeBlock(thr->proc, p);
|
||||
uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
|
||||
DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
|
||||
if (write && thr->ignore_reads_and_writes == 0)
|
||||
MemoryRangeFreed(thr, pc, (uptr)p, sz);
|
||||
|
@ -187,7 +187,7 @@ void *internal_alloc(MBlockType typ, uptr sz) {
|
|||
thr->nomalloc = 0; // CHECK calls internal_malloc().
|
||||
CHECK(0);
|
||||
}
|
||||
return InternalAlloc(sz, &thr->proc->internal_alloc_cache);
|
||||
return InternalAlloc(sz, &thr->internal_alloc_cache);
|
||||
}
|
||||
|
||||
void internal_free(void *p) {
|
||||
|
@ -196,7 +196,7 @@ void internal_free(void *p) {
|
|||
thr->nomalloc = 0; // CHECK calls internal_malloc().
|
||||
CHECK(0);
|
||||
}
|
||||
InternalFree(p, &thr->proc->internal_alloc_cache);
|
||||
InternalFree(p, &thr->internal_alloc_cache);
|
||||
}
|
||||
|
||||
} // namespace __tsan
|
||||
|
@ -238,8 +238,8 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
|||
|
||||
void __tsan_on_thread_idle() {
|
||||
ThreadState *thr = cur_thread();
|
||||
allocator()->SwallowCache(&thr->proc->alloc_cache);
|
||||
internal_allocator()->SwallowCache(&thr->proc->internal_alloc_cache);
|
||||
ctx->metamap.OnProcIdle(thr->proc);
|
||||
allocator()->SwallowCache(&thr->alloc_cache);
|
||||
internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
|
||||
ctx->metamap.OnThreadIdle(thr);
|
||||
}
|
||||
} // extern "C"
|
||||
|
|
|
@ -21,8 +21,8 @@ const uptr kDefaultAlignment = 16;
|
|||
|
||||
void InitializeAllocator();
|
||||
void ReplaceSystemMalloc();
|
||||
void AllocatorProcStart(Processor *proc);
|
||||
void AllocatorProcFinish(Processor *proc);
|
||||
void AllocatorThreadStart(ThreadState *thr);
|
||||
void AllocatorThreadFinish(ThreadState *thr);
|
||||
void AllocatorPrintStats();
|
||||
|
||||
// For user allocations.
|
||||
|
@ -46,7 +46,6 @@ enum MBlockType {
|
|||
MBlockShadowStack,
|
||||
MBlockSync,
|
||||
MBlockClock,
|
||||
MBlockProcessor,
|
||||
MBlockThreadContex,
|
||||
MBlockDeadInfo,
|
||||
MBlockRacyStacks,
|
||||
|
|
|
@ -329,10 +329,6 @@ void Initialize(ThreadState *thr) {
|
|||
InitializeAllocator();
|
||||
ReplaceSystemMalloc();
|
||||
#endif
|
||||
if (common_flags()->detect_deadlocks)
|
||||
ctx->dd = DDetector::Create(flags());
|
||||
Processor *proc = ProcCreate();
|
||||
ProcWire(proc, thr);
|
||||
InitializeInterceptors();
|
||||
CheckShadowMapping();
|
||||
InitializePlatform();
|
||||
|
@ -355,6 +351,8 @@ void Initialize(ThreadState *thr) {
|
|||
SetSandboxingCallback(StopBackgroundThread);
|
||||
#endif
|
||||
#endif
|
||||
if (common_flags()->detect_deadlocks)
|
||||
ctx->dd = DDetector::Create(flags());
|
||||
|
||||
VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
|
||||
(int)internal_getpid());
|
||||
|
|
|
@ -325,26 +325,6 @@ struct JmpBuf {
|
|||
uptr *shadow_stack_pos;
|
||||
};
|
||||
|
||||
// A Processor represents a physical thread, or a P for Go.
|
||||
// It is used to store internal resources like allocate cache, and does not
|
||||
// participate in race-detection logic (invisible to end user).
|
||||
// In C++ it is tied to an OS thread just like ThreadState, however ideally
|
||||
// it should be tied to a CPU (this way we will have fewer allocator caches).
|
||||
// In Go it is tied to a P, so there are significantly fewer Processor's than
|
||||
// ThreadState's (which are tied to Gs).
|
||||
// A ThreadState must be wired with a Processor to handle events.
|
||||
struct Processor {
|
||||
ThreadState *thr; // currently wired thread, or nullptr
|
||||
#ifndef SANITIZER_GO
|
||||
AllocatorCache alloc_cache;
|
||||
InternalAllocatorCache internal_alloc_cache;
|
||||
#endif
|
||||
DenseSlabAllocCache block_cache;
|
||||
DenseSlabAllocCache sync_cache;
|
||||
DenseSlabAllocCache clock_cache;
|
||||
DDPhysicalThread *dd_pt;
|
||||
};
|
||||
|
||||
// This struct is stored in TLS.
|
||||
struct ThreadState {
|
||||
FastState fast_state;
|
||||
|
@ -380,6 +360,8 @@ struct ThreadState {
|
|||
MutexSet mset;
|
||||
ThreadClock clock;
|
||||
#ifndef SANITIZER_GO
|
||||
AllocatorCache alloc_cache;
|
||||
InternalAllocatorCache internal_alloc_cache;
|
||||
Vector<JmpBuf> jmp_bufs;
|
||||
int ignore_interceptors;
|
||||
#endif
|
||||
|
@ -403,14 +385,16 @@ struct ThreadState {
|
|||
#if SANITIZER_DEBUG && !SANITIZER_GO
|
||||
InternalDeadlockDetector internal_deadlock_detector;
|
||||
#endif
|
||||
DDPhysicalThread *dd_pt;
|
||||
DDLogicalThread *dd_lt;
|
||||
|
||||
// Current wired Processor, or nullptr. Required to handle any events.
|
||||
Processor *proc;
|
||||
|
||||
atomic_uintptr_t in_signal_handler;
|
||||
ThreadSignalContext *signal_ctx;
|
||||
|
||||
DenseSlabAllocCache block_cache;
|
||||
DenseSlabAllocCache sync_cache;
|
||||
DenseSlabAllocCache clock_cache;
|
||||
|
||||
#ifndef SANITIZER_GO
|
||||
u32 last_sleep_stack_id;
|
||||
ThreadClock last_sleep_clock;
|
||||
|
@ -699,11 +683,6 @@ void ThreadSetName(ThreadState *thr, const char *name);
|
|||
int ThreadCount(ThreadState *thr);
|
||||
void ProcessPendingSignals(ThreadState *thr);
|
||||
|
||||
Processor *ProcCreate();
|
||||
void ProcDestroy(Processor *proc);
|
||||
void ProcWire(Processor *proc, ThreadState *thr);
|
||||
void ProcUnwire(Processor *proc, ThreadState *thr);
|
||||
|
||||
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
|
||||
bool rw, bool recursive, bool linker_init);
|
||||
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
|
||||
|
|
|
@ -32,7 +32,7 @@ struct Callback : DDCallback {
|
|||
Callback(ThreadState *thr, uptr pc)
|
||||
: thr(thr)
|
||||
, pc(pc) {
|
||||
DDCallback::pt = thr->proc->dd_pt;
|
||||
DDCallback::pt = thr->dd_pt;
|
||||
DDCallback::lt = thr->dd_lt;
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
|
|||
u64 mid = s->GetId();
|
||||
u32 last_lock = s->last_lock;
|
||||
if (!unlock_locked)
|
||||
s->Reset(thr->proc); // must not reset it before the report is printed
|
||||
s->Reset(thr); // must not reset it before the report is printed
|
||||
s->mtx.Unlock();
|
||||
if (unlock_locked) {
|
||||
ThreadRegistryLock l(ctx->thread_registry);
|
||||
|
@ -132,7 +132,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
|
|||
if (unlock_locked) {
|
||||
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
|
||||
if (s != 0) {
|
||||
s->Reset(thr->proc);
|
||||
s->Reset(thr);
|
||||
s->mtx.Unlock();
|
||||
}
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
|
|||
if (thr->ignore_sync)
|
||||
return;
|
||||
thr->clock.set(thr->fast_state.epoch());
|
||||
thr->clock.acquire(&thr->proc->clock_cache, c);
|
||||
thr->clock.acquire(&thr->clock_cache, c);
|
||||
StatInc(thr, StatSyncAcquire);
|
||||
}
|
||||
|
||||
|
@ -435,7 +435,7 @@ void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
|
|||
return;
|
||||
thr->clock.set(thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.release(&thr->proc->clock_cache, c);
|
||||
thr->clock.release(&thr->clock_cache, c);
|
||||
StatInc(thr, StatSyncRelease);
|
||||
}
|
||||
|
||||
|
@ -444,7 +444,7 @@ void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
|
|||
return;
|
||||
thr->clock.set(thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.ReleaseStore(&thr->proc->clock_cache, c);
|
||||
thr->clock.ReleaseStore(&thr->clock_cache, c);
|
||||
StatInc(thr, StatSyncRelease);
|
||||
}
|
||||
|
||||
|
@ -453,7 +453,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
|
|||
return;
|
||||
thr->clock.set(thr->fast_state.epoch());
|
||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||
thr->clock.acq_rel(&thr->proc->clock_cache, c);
|
||||
thr->clock.acq_rel(&thr->clock_cache, c);
|
||||
StatInc(thr, StatSyncAcquire);
|
||||
StatInc(thr, StatSyncRelease);
|
||||
}
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
//===-- tsan_rtl_proc.cc ------------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "tsan_rtl.h"
|
||||
#include "tsan_mman.h"
|
||||
#include "tsan_flags.h"
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
Processor *ProcCreate() {
|
||||
void *mem = internal_alloc(MBlockProcessor, sizeof(Processor));
|
||||
internal_memset(mem, 0, sizeof(Processor));
|
||||
Processor *proc = new(mem) Processor;
|
||||
proc->thr = nullptr;
|
||||
#ifndef SANITIZER_GO
|
||||
AllocatorProcStart(proc);
|
||||
#endif
|
||||
if (common_flags()->detect_deadlocks)
|
||||
proc->dd_pt = ctx->dd->CreatePhysicalThread();
|
||||
return proc;
|
||||
}
|
||||
|
||||
void ProcDestroy(Processor *proc) {
|
||||
CHECK_EQ(proc->thr, nullptr);
|
||||
#ifndef SANITIZER_GO
|
||||
AllocatorProcFinish(proc);
|
||||
#endif
|
||||
ctx->clock_alloc.FlushCache(&proc->clock_cache);
|
||||
ctx->metamap.OnProcIdle(proc);
|
||||
if (common_flags()->detect_deadlocks)
|
||||
ctx->dd->DestroyPhysicalThread(proc->dd_pt);
|
||||
proc->~Processor();
|
||||
internal_free(proc);
|
||||
}
|
||||
|
||||
void ProcWire(Processor *proc, ThreadState *thr) {
|
||||
CHECK_EQ(thr->proc, nullptr);
|
||||
CHECK_EQ(proc->thr, nullptr);
|
||||
thr->proc = proc;
|
||||
proc->thr = thr;
|
||||
}
|
||||
|
||||
void ProcUnwire(Processor *proc, ThreadState *thr) {
|
||||
CHECK_EQ(thr->proc, proc);
|
||||
CHECK_EQ(proc->thr, thr);
|
||||
thr->proc = nullptr;
|
||||
proc->thr = nullptr;
|
||||
}
|
||||
|
||||
} // namespace __tsan
|
|
@ -42,7 +42,7 @@ void ThreadContext::OnDead() {
|
|||
void ThreadContext::OnJoined(void *arg) {
|
||||
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
|
||||
AcquireImpl(caller_thr, 0, &sync);
|
||||
sync.Reset(&caller_thr->proc->clock_cache);
|
||||
sync.Reset(&caller_thr->clock_cache);
|
||||
}
|
||||
|
||||
struct OnCreatedArgs {
|
||||
|
@ -74,7 +74,7 @@ void ThreadContext::OnReset() {
|
|||
|
||||
void ThreadContext::OnDetached(void *arg) {
|
||||
ThreadState *thr1 = static_cast<ThreadState*>(arg);
|
||||
sync.Reset(&thr1->proc->clock_cache);
|
||||
sync.Reset(&thr1->clock_cache);
|
||||
}
|
||||
|
||||
struct OnStartedArgs {
|
||||
|
@ -106,8 +106,13 @@ void ThreadContext::OnStarted(void *arg) {
|
|||
thr->shadow_stack_pos = thr->shadow_stack;
|
||||
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
|
||||
#endif
|
||||
if (common_flags()->detect_deadlocks)
|
||||
#ifndef SANITIZER_GO
|
||||
AllocatorThreadStart(thr);
|
||||
#endif
|
||||
if (common_flags()->detect_deadlocks) {
|
||||
thr->dd_pt = ctx->dd->CreatePhysicalThread();
|
||||
thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
|
||||
}
|
||||
thr->fast_state.SetHistorySize(flags()->history_size);
|
||||
// Commit switch to the new part of the trace.
|
||||
// TraceAddEvent will reset stack0/mset0 in the new part for us.
|
||||
|
@ -116,7 +121,7 @@ void ThreadContext::OnStarted(void *arg) {
|
|||
thr->fast_synch_epoch = epoch0;
|
||||
AcquireImpl(thr, 0, &sync);
|
||||
StatInc(thr, StatSyncAcquire);
|
||||
sync.Reset(&thr->proc->clock_cache);
|
||||
sync.Reset(&thr->clock_cache);
|
||||
thr->is_inited = true;
|
||||
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
|
||||
"tls_addr=%zx tls_size=%zx\n",
|
||||
|
@ -133,8 +138,15 @@ void ThreadContext::OnFinished() {
|
|||
}
|
||||
epoch1 = thr->fast_state.epoch();
|
||||
|
||||
if (common_flags()->detect_deadlocks)
|
||||
if (common_flags()->detect_deadlocks) {
|
||||
ctx->dd->DestroyPhysicalThread(thr->dd_pt);
|
||||
ctx->dd->DestroyLogicalThread(thr->dd_lt);
|
||||
}
|
||||
ctx->clock_alloc.FlushCache(&thr->clock_cache);
|
||||
ctx->metamap.OnThreadIdle(thr);
|
||||
#ifndef SANITIZER_GO
|
||||
AllocatorThreadFinish(thr);
|
||||
#endif
|
||||
thr->~ThreadState();
|
||||
#if TSAN_COLLECT_STATS
|
||||
StatAggregate(ctx->stat, thr->stat);
|
||||
|
|
|
@ -36,7 +36,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
|
|||
DDMutexInit(thr, pc, this);
|
||||
}
|
||||
|
||||
void SyncVar::Reset(Processor *proc) {
|
||||
void SyncVar::Reset(ThreadState *thr) {
|
||||
uid = 0;
|
||||
creation_stack_id = 0;
|
||||
owner_tid = kInvalidTid;
|
||||
|
@ -47,12 +47,12 @@ void SyncVar::Reset(Processor *proc) {
|
|||
is_broken = 0;
|
||||
is_linker_init = 0;
|
||||
|
||||
if (proc == 0) {
|
||||
if (thr == 0) {
|
||||
CHECK_EQ(clock.size(), 0);
|
||||
CHECK_EQ(read_clock.size(), 0);
|
||||
} else {
|
||||
clock.Reset(&proc->clock_cache);
|
||||
read_clock.Reset(&proc->clock_cache);
|
||||
clock.Reset(&thr->clock_cache);
|
||||
read_clock.Reset(&thr->clock_cache);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ MetaMap::MetaMap() {
|
|||
}
|
||||
|
||||
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
|
||||
u32 idx = block_alloc_.Alloc(&thr->proc->block_cache);
|
||||
u32 idx = block_alloc_.Alloc(&thr->block_cache);
|
||||
MBlock *b = block_alloc_.Map(idx);
|
||||
b->siz = sz;
|
||||
b->tid = thr->tid;
|
||||
|
@ -71,16 +71,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
|
|||
*meta = idx | kFlagBlock;
|
||||
}
|
||||
|
||||
uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
|
||||
uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
|
||||
MBlock* b = GetBlock(p);
|
||||
if (b == 0)
|
||||
return 0;
|
||||
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
|
||||
FreeRange(proc, p, sz);
|
||||
FreeRange(thr, pc, p, sz);
|
||||
return sz;
|
||||
}
|
||||
|
||||
bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
|
||||
bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
|
||||
bool has_something = false;
|
||||
u32 *meta = MemToMeta(p);
|
||||
u32 *end = MemToMeta(p + sz);
|
||||
|
@ -96,14 +96,14 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
|
|||
has_something = true;
|
||||
while (idx != 0) {
|
||||
if (idx & kFlagBlock) {
|
||||
block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
|
||||
block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
|
||||
break;
|
||||
} else if (idx & kFlagSync) {
|
||||
DCHECK(idx & kFlagSync);
|
||||
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
|
||||
u32 next = s->next;
|
||||
s->Reset(proc);
|
||||
sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
|
||||
s->Reset(thr);
|
||||
sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
|
||||
idx = next;
|
||||
} else {
|
||||
CHECK(0);
|
||||
|
@ -119,24 +119,24 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
|
|||
// which can be huge. The function probes pages one-by-one until it finds a page
|
||||
// without meta objects, at this point it stops freeing meta objects. Because
|
||||
// thread stacks grow top-down, we do the same starting from end as well.
|
||||
void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
|
||||
void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
|
||||
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
|
||||
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
|
||||
if (sz <= 4 * kPageSize) {
|
||||
// If the range is small, just do the normal free procedure.
|
||||
FreeRange(proc, p, sz);
|
||||
FreeRange(thr, pc, p, sz);
|
||||
return;
|
||||
}
|
||||
// First, round both ends of the range to page size.
|
||||
uptr diff = RoundUp(p, kPageSize) - p;
|
||||
if (diff != 0) {
|
||||
FreeRange(proc, p, diff);
|
||||
FreeRange(thr, pc, p, diff);
|
||||
p += diff;
|
||||
sz -= diff;
|
||||
}
|
||||
diff = p + sz - RoundDown(p + sz, kPageSize);
|
||||
if (diff != 0) {
|
||||
FreeRange(proc, p + sz - diff, diff);
|
||||
FreeRange(thr, pc, p + sz - diff, diff);
|
||||
sz -= diff;
|
||||
}
|
||||
// Now we must have a non-empty page-aligned range.
|
||||
|
@ -147,7 +147,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
|
|||
const uptr sz0 = sz;
|
||||
// Probe start of the range.
|
||||
while (sz > 0) {
|
||||
bool has_something = FreeRange(proc, p, kPageSize);
|
||||
bool has_something = FreeRange(thr, pc, p, kPageSize);
|
||||
p += kPageSize;
|
||||
sz -= kPageSize;
|
||||
if (!has_something)
|
||||
|
@ -155,7 +155,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
|
|||
}
|
||||
// Probe end of the range.
|
||||
while (sz > 0) {
|
||||
bool has_something = FreeRange(proc, p - kPageSize, kPageSize);
|
||||
bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
|
||||
sz -= kPageSize;
|
||||
if (!has_something)
|
||||
break;
|
||||
|
@ -210,8 +210,8 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
|
|||
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
|
||||
if (s->addr == addr) {
|
||||
if (myidx != 0) {
|
||||
mys->Reset(thr->proc);
|
||||
sync_alloc_.Free(&thr->proc->sync_cache, myidx);
|
||||
mys->Reset(thr);
|
||||
sync_alloc_.Free(&thr->sync_cache, myidx);
|
||||
}
|
||||
if (write_lock)
|
||||
s->mtx.Lock();
|
||||
|
@ -230,7 +230,7 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
|
|||
|
||||
if (myidx == 0) {
|
||||
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
|
||||
myidx = sync_alloc_.Alloc(&thr->proc->sync_cache);
|
||||
myidx = sync_alloc_.Alloc(&thr->sync_cache);
|
||||
mys = sync_alloc_.Map(myidx);
|
||||
mys->Init(thr, pc, addr, uid);
|
||||
}
|
||||
|
@ -279,9 +279,9 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
|
|||
}
|
||||
}
|
||||
|
||||
void MetaMap::OnProcIdle(Processor *proc) {
|
||||
block_alloc_.FlushCache(&proc->block_cache);
|
||||
sync_alloc_.FlushCache(&proc->sync_cache);
|
||||
void MetaMap::OnThreadIdle(ThreadState *thr) {
|
||||
block_alloc_.FlushCache(&thr->block_cache);
|
||||
sync_alloc_.FlushCache(&thr->sync_cache);
|
||||
}
|
||||
|
||||
} // namespace __tsan
|
||||
|
|
|
@ -47,7 +47,7 @@ struct SyncVar {
|
|||
SyncClock clock;
|
||||
|
||||
void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
|
||||
void Reset(Processor *proc);
|
||||
void Reset(ThreadState *thr);
|
||||
|
||||
u64 GetId() const {
|
||||
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
|
||||
|
@ -72,9 +72,9 @@ class MetaMap {
|
|||
MetaMap();
|
||||
|
||||
void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
|
||||
uptr FreeBlock(Processor *proc, uptr p);
|
||||
bool FreeRange(Processor *proc, uptr p, uptr sz);
|
||||
void ResetRange(Processor *proc, uptr p, uptr sz);
|
||||
uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
|
||||
bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
|
||||
void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
|
||||
MBlock* GetBlock(uptr p);
|
||||
|
||||
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
|
||||
|
@ -83,7 +83,7 @@ class MetaMap {
|
|||
|
||||
void MoveMemory(uptr src, uptr dst, uptr sz);
|
||||
|
||||
void OnProcIdle(Processor *proc);
|
||||
void OnThreadIdle(ThreadState *thr);
|
||||
|
||||
private:
|
||||
static const u32 kFlagMask = 3u << 30;
|
||||
|
|
|
@ -25,7 +25,7 @@ TEST(MetaMap, Basic) {
|
|||
EXPECT_NE(mb, (MBlock*)0);
|
||||
EXPECT_EQ(mb->siz, 1 * sizeof(u64));
|
||||
EXPECT_EQ(mb->tid, thr->tid);
|
||||
uptr sz = m->FreeBlock(thr->proc, (uptr)&block[0]);
|
||||
uptr sz = m->FreeBlock(thr, 0, (uptr)&block[0]);
|
||||
EXPECT_EQ(sz, 1 * sizeof(u64));
|
||||
mb = m->GetBlock((uptr)&block[0]);
|
||||
EXPECT_EQ(mb, (MBlock*)0);
|
||||
|
@ -41,7 +41,7 @@ TEST(MetaMap, FreeRange) {
|
|||
EXPECT_EQ(mb1->siz, 1 * sizeof(u64));
|
||||
MBlock *mb2 = m->GetBlock((uptr)&block[1]);
|
||||
EXPECT_EQ(mb2->siz, 3 * sizeof(u64));
|
||||
m->FreeRange(thr->proc, (uptr)&block[0], 4 * sizeof(u64));
|
||||
m->FreeRange(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
|
||||
mb1 = m->GetBlock((uptr)&block[0]);
|
||||
EXPECT_EQ(mb1, (MBlock*)0);
|
||||
mb2 = m->GetBlock((uptr)&block[1]);
|
||||
|
@ -63,12 +63,12 @@ TEST(MetaMap, Sync) {
|
|||
EXPECT_NE(s2, (SyncVar*)0);
|
||||
EXPECT_EQ(s2->addr, (uptr)&block[1]);
|
||||
s2->mtx.ReadUnlock();
|
||||
m->FreeBlock(thr->proc, (uptr)&block[0]);
|
||||
m->FreeBlock(thr, 0, (uptr)&block[0]);
|
||||
s1 = m->GetIfExistsAndLock((uptr)&block[0]);
|
||||
EXPECT_EQ(s1, (SyncVar*)0);
|
||||
s2 = m->GetIfExistsAndLock((uptr)&block[1]);
|
||||
EXPECT_EQ(s2, (SyncVar*)0);
|
||||
m->OnProcIdle(thr->proc);
|
||||
m->OnThreadIdle(thr);
|
||||
}
|
||||
|
||||
TEST(MetaMap, MoveMemory) {
|
||||
|
@ -105,7 +105,7 @@ TEST(MetaMap, MoveMemory) {
|
|||
EXPECT_NE(s2, (SyncVar*)0);
|
||||
EXPECT_EQ(s2->addr, (uptr)&block2[1]);
|
||||
s2->mtx.Unlock();
|
||||
m->FreeRange(thr->proc, (uptr)&block2[0], 4 * sizeof(u64));
|
||||
m->FreeRange(thr, 0, (uptr)&block2[0], 4 * sizeof(u64));
|
||||
}
|
||||
|
||||
TEST(MetaMap, ResetSync) {
|
||||
|
@ -114,9 +114,9 @@ TEST(MetaMap, ResetSync) {
|
|||
u64 block[1] = {}; // fake malloc block
|
||||
m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
|
||||
SyncVar *s = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true);
|
||||
s->Reset(thr->proc);
|
||||
s->Reset(thr);
|
||||
s->mtx.Unlock();
|
||||
uptr sz = m->FreeBlock(thr->proc, (uptr)&block[0]);
|
||||
uptr sz = m->FreeBlock(thr, 0, (uptr)&block[0]);
|
||||
EXPECT_EQ(sz, 1 * sizeof(u64));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue