tsan: new MemoryAccess interface

Currently we have MemoryAccess function that accepts
"bool kAccessIsWrite, bool kIsAtomic" and 4 wrappers:
MemoryRead/MemoryWrite/MemoryReadAtomic/MemoryWriteAtomic.

Such scheme with bool flags is not particularly scalable/extendable.
Because of that we did not have Read/Write wrappers for UnalignedMemoryAccess,
and "true, false" or "false, true" at call sites is not very readable.

Moreover, the new tsan runtime will introduce more flags
(e.g. move "freed" and "vptr access" to memory acccess flags).
We can't have 16 wrappers and each flag also takes whole
64-bit register for non-inlined calls.

Introduce AccessType enum that contains bit mask of
read/write, atomic/non-atomic, and later free/non-free,
vptr/non-vptr.
Such scheme is more scalable, more readble, more efficient
(don't consume multiple registers for these flags during calls)
and allows to cover unaligned and range variations of memory
access functions as well.

Also switch from size log to just size.
The new tsan runtime won't have the limitation of supporting
only 1/2/4/8 access sizes, so we don't need the logarithms.

Also add an inline thunk that converts the new interface to the old one.
For inlined calls it should not add any overhead because
all flags/size can be computed as compile time.

Reviewed By: vitalybuka, melver

Differential Revision: https://reviews.llvm.org/D107276
This commit is contained in:
Dmitry Vyukov 2021-08-02 11:04:43 +02:00
parent 0156f91f3b
commit 831910c5c4
10 changed files with 118 additions and 96 deletions

View File

@ -167,25 +167,25 @@ void __tsan_map_shadow(uptr addr, uptr size) {
}
void __tsan_read(ThreadState *thr, void *addr, void *pc) {
MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
}
void __tsan_read_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
if (callpc != 0)
FuncEntry(thr, callpc);
MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
if (callpc != 0)
FuncExit(thr);
}
void __tsan_write(ThreadState *thr, void *addr, void *pc) {
MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
}
void __tsan_write_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
if (callpc != 0)
FuncEntry(thr, callpc);
MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
if (callpc != 0)
FuncExit(thr);
}

View File

@ -57,16 +57,14 @@ uptr TagFromShadowStackFrame(uptr pc) {
#if !SANITIZER_GO
typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) {
void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessType typ) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
ThreadState *thr = cur_thread();
if (caller_pc) FuncEntry(thr, caller_pc);
InsertShadowStackFrameForTag(thr, (uptr)tag);
bool in_ignored_lib;
if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) {
access(thr, CALLERPC, (uptr)addr, kSizeLog1);
}
if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib))
MemoryAccess(thr, CALLERPC, (uptr)addr, 1, typ);
FuncExit(thr);
if (caller_pc) FuncExit(thr);
}
@ -111,12 +109,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead);
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite);
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessWrite);
}
} // extern "C"

View File

@ -115,7 +115,7 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
} else {
// See the dup-related comment in FdClose.
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
}
}
@ -163,7 +163,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
if (s)
Acquire(thr, pc, (uptr)s);
}
@ -174,7 +174,7 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
if (s)
Release(thr, pc, (uptr)s);
}
@ -184,7 +184,7 @@ void FdAccess(ThreadState *thr, uptr pc, int fd) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
}
void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
@ -194,7 +194,7 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
FdDesc *d = fddesc(thr, pc, fd);
if (write) {
// To catch races between fd usage and close.
MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite);
} else {
// This path is used only by dup2/dup3 calls.
// We do read instead of write because there is a number of legitimate
@ -204,7 +204,7 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
// 2. Some daemons dup /dev/null in place of stdin/stdout.
// On the other hand we have not seen cases when write here catches real
// bugs.
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
}
// We need to clear it, because if we do not intercept any call out there
// that creates fd, we will hit false postives.
@ -228,7 +228,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
return;
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
MemoryRead(thr, pc, (uptr)od, kSizeLog8);
MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
FdClose(thr, pc, newfd, write);
init(thr, pc, newfd, ref(od->sync), write);
}

View File

@ -1445,14 +1445,14 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
int res = REAL(pthread_barrier_init)(b, a, count);
return res;
}
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
int res = REAL(pthread_barrier_destroy)(b);
return res;
}
@ -1460,9 +1460,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
Release(thr, pc, (uptr)b);
MemoryRead(thr, pc, (uptr)b, kSizeLog1);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
int res = REAL(pthread_barrier_wait)(b);
MemoryRead(thr, pc, (uptr)b, kSizeLog1);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
Acquire(thr, pc, (uptr)b);
}

View File

@ -30,57 +30,65 @@ void __tsan_flush_memory() {
}
void __tsan_read16(void *addr) {
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
uptr pc = CALLERPC;
ThreadState *thr = cur_thread();
MemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
}
void __tsan_write16(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
uptr pc = CALLERPC;
ThreadState *thr = cur_thread();
MemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
}
void __tsan_read16_pc(void *addr, void *pc) {
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
uptr pc_no_pac = STRIP_PAC_PC(pc);
ThreadState *thr = cur_thread();
MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessRead);
MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessRead);
}
void __tsan_write16_pc(void *addr, void *pc) {
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
uptr pc_no_pac = STRIP_PAC_PC(pc);
ThreadState *thr = cur_thread();
MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessWrite);
MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessWrite);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
void __tsan_unaligned_read2(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
}
void __tsan_unaligned_read4(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
}
void __tsan_unaligned_read8(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
}
void __tsan_unaligned_read16(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, kAccessRead);
}
void __tsan_unaligned_write2(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
}
void __tsan_unaligned_write4(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
}
void __tsan_unaligned_write8(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
}
void __tsan_unaligned_write16(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, kAccessWrite);
}
// __sanitizer_unaligned_load/store are for user instrumentation.

View File

@ -161,16 +161,16 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
}
#endif
template<typename T>
static int SizeLog() {
template <typename T>
static int AccessSize() {
if (sizeof(T) <= 1)
return kSizeLog1;
return 1;
else if (sizeof(T) <= 2)
return kSizeLog2;
return 2;
else if (sizeof(T) <= 4)
return kSizeLog4;
return 4;
else
return kSizeLog8;
return 8;
// For 16-byte atomics we also use 8-byte memory access,
// this leads to false negatives only in very obscure cases.
}
@ -224,7 +224,8 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
// This fast-path is critical for performance.
// Assume the access is atomic.
if (!IsAcquireOrder(mo)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
kAccessRead | kAccessAtomic);
return NoTsanAtomicLoad(a, mo);
}
// Don't create sync object if it does not exist yet. For example, an atomic
@ -238,7 +239,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
// of the value and the clock we acquire.
v = NoTsanAtomicLoad(a, mo);
}
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
return v;
}
@ -258,7 +259,7 @@ template <typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
@ -279,7 +280,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
template <typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
if (LIKELY(mo == mo_relaxed))
return F(a, v);
SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
@ -404,7 +405,7 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
// (mo_relaxed) when those are used.
CHECK(IsLoadOrder(fmo));
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
T cc = *c;
T pr = func_cas(a, cc, v);

View File

@ -19,67 +19,67 @@
using namespace __tsan;
void __tsan_read1(void *addr) {
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessRead);
}
void __tsan_read2(void *addr) {
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
}
void __tsan_read4(void *addr) {
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
}
void __tsan_read8(void *addr) {
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
}
void __tsan_write1(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite);
}
void __tsan_write2(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
}
void __tsan_write4(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
}
void __tsan_write8(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
}
void __tsan_read1_pc(void *addr, void *pc) {
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead);
}
void __tsan_read2_pc(void *addr, void *pc) {
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessRead);
}
void __tsan_read4_pc(void *addr, void *pc) {
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessRead);
}
void __tsan_read8_pc(void *addr, void *pc) {
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessRead);
}
void __tsan_write1_pc(void *addr, void *pc) {
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessWrite);
}
void __tsan_write2_pc(void *addr, void *pc) {
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessWrite);
}
void __tsan_write4_pc(void *addr, void *pc) {
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessWrite);
}
void __tsan_write8_pc(void *addr, void *pc) {
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessWrite);
}
void __tsan_vptr_update(void **vptr_p, void *new_val) {
@ -87,7 +87,7 @@ void __tsan_vptr_update(void **vptr_p, void *new_val) {
if (*vptr_p != new_val) {
ThreadState *thr = cur_thread();
thr->is_vptr_access = true;
MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
MemoryAccess(thr, CALLERPC, (uptr)vptr_p, 8, kAccessWrite);
thr->is_vptr_access = false;
}
}
@ -96,7 +96,7 @@ void __tsan_vptr_read(void **vptr_p) {
CHECK_EQ(sizeof(vptr_p), 8);
ThreadState *thr = cur_thread();
thr->is_vptr_access = true;
MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
MemoryAccess(thr, CALLERPC, (uptr)vptr_p, 8, kAccessRead);
thr->is_vptr_access = false;
}

View File

@ -733,8 +733,11 @@ void MemoryAccessImpl1(ThreadState *thr, uptr addr,
return;
}
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int size, bool kAccessIsWrite, bool kIsAtomic) {
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ) {
DCHECK(!(typ & kAccessAtomic));
const bool kAccessIsWrite = !(typ & kAccessRead);
const bool kIsAtomic = false;
while (size) {
int size1 = 1;
int kAccessSizeLog = kSizeLog1;

View File

@ -692,6 +692,14 @@ int Finalize(ThreadState *thr);
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
typedef uptr AccessType;
enum : AccessType {
kAccessWrite = 0,
kAccessRead = 1 << 0,
kAccessAtomic = 1 << 1,
};
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
void MemoryAccessImpl(ThreadState *thr, uptr addr,
@ -701,32 +709,36 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
uptr size, bool is_write);
void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
uptr size, uptr step, bool is_write);
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int size, bool kAccessIsWrite, bool kIsAtomic);
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
const int kSizeLog1 = 0;
const int kSizeLog2 = 1;
const int kSizeLog4 = 2;
const int kSizeLog8 = 3;
void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
uptr addr, int kAccessSizeLog) {
MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
}
void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
uptr addr, int kAccessSizeLog) {
MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
}
void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
uptr addr, int kAccessSizeLog) {
MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
}
void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
uptr addr, int kAccessSizeLog) {
MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
ALWAYS_INLINE
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ) {
int size_log;
switch (size) {
case 1:
size_log = kSizeLog1;
break;
case 2:
size_log = kSizeLog2;
break;
case 4:
size_log = kSizeLog4;
break;
default:
DCHECK_EQ(size, 8);
size_log = kSizeLog8;
break;
}
bool is_write = !(typ & kAccessRead);
bool is_atomic = typ & kAccessAtomic;
MemoryAccess(thr, pc, addr, size_log, is_write, is_atomic);
}
void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);

View File

@ -68,7 +68,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
MemoryAccess(thr, pc, addr, 1, kAccessWrite);
thr->is_freeing = false;
}
SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
@ -135,7 +135,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
MemoryAccess(thr, pc, addr, 1, kAccessWrite);
thr->is_freeing = false;
}
// s will be destroyed and freed in MetaMap::FreeBlock.
@ -166,7 +166,7 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
else
rec = 1;
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
u64 mid = 0;
bool pre_lock = false;
bool first = false;
@ -216,7 +216,7 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
u64 mid = 0;
bool report_bad_unlock = false;
int rec = 0;
@ -274,7 +274,7 @@ void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
u64 mid = 0;
bool report_bad_lock = false;
bool pre_lock = false;
@ -314,7 +314,7 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
u64 mid = 0;
bool report_bad_unlock = false;
{
@ -347,7 +347,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
u64 mid = 0;
bool report_bad_unlock = false;
{