sanitizer_common: prefix thread-safety macros with SANITIZER_

Currently we use very common names for macros like ACQUIRE/RELEASE,
which cause conflicts with system headers.
Prefix all macros with SANITIZER_ to avoid conflicts.

Reviewed By: vitalybuka

Differential Revision: https://reviews.llvm.org/D116652
This commit is contained in:
Dmitry Vyukov 2022-01-05 13:35:13 +01:00
parent 21babe4db3
commit 765921de5b
28 changed files with 133 additions and 121 deletions

View File

@ -840,12 +840,12 @@ struct Allocator {
quarantine.PrintStats();
}
void ForceLock() ACQUIRE(fallback_mutex) {
void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
allocator.ForceLock();
fallback_mutex.Lock();
}
void ForceUnlock() RELEASE(fallback_mutex) {
void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
@ -1054,9 +1054,11 @@ uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
instance.ForceLock();
}
void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
instance.ForceUnlock();
}

View File

@ -322,14 +322,14 @@ void InitShadow() {
THREADLOCAL int in_loader;
Mutex shadow_update_lock;
void EnterLoader() NO_THREAD_SAFETY_ANALYSIS {
void EnterLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (in_loader == 0) {
shadow_update_lock.Lock();
}
++in_loader;
}
void ExitLoader() NO_THREAD_SAFETY_ANALYSIS {
void ExitLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
CHECK(in_loader > 0);
--in_loader;
UpdateShadow();

View File

@ -230,8 +230,8 @@ void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
struct ScopedStopTheWorldLock {
ScopedStopTheWorldLock() {

View File

@ -524,12 +524,12 @@ struct Allocator {
void PrintStats() { allocator.PrintStats(); }
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
allocator.ForceLock();
fallback_mutex.Lock();
}
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}

View File

@ -201,7 +201,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
}
template <typename T, uptr kSize>
void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
void AddrHashMap<T, kSize>::acquire(Handle *h)
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@ -330,7 +331,8 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
}
template <typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
void AddrHashMap<T, kSize>::release(Handle *h)
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (!h->cell_)
return;
Bucket *b = h->bucket_;

View File

@ -126,12 +126,12 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
RawInternalFree(addr, cache);
}
void InternalAllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
internal_allocator_cache_mu.Lock();
internal_allocator()->ForceLock();
}
void InternalAllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
internal_allocator()->ForceUnlock();
internal_allocator_cache_mu.Unlock();
}

View File

@ -175,12 +175,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}

View File

@ -238,13 +238,13 @@ class SizeClassAllocator32 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}

View File

@ -354,13 +354,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}

View File

@ -267,9 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.

View File

@ -238,12 +238,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
static void Lock() ACQUIRE(mutex_);
static void Unlock() RELEASE(mutex_);
static void CheckLocked() CHECK_LOCKED(mutex_);
static void Lock() SANITIZER_ACQUIRE(mutex_);
static void Unlock() SANITIZER_RELEASE(mutex_);
static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
private:
static atomic_uintptr_t reporting_thread_;

View File

@ -20,25 +20,27 @@
namespace __sanitizer {
class MUTEX StaticSpinMutex {
class SANITIZER_MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
void Lock() ACQUIRE() {
void Lock() SANITIZER_ACQUIRE() {
if (LIKELY(TryLock()))
return;
LockSlow();
}
bool TryLock() TRY_ACQUIRE(true) {
bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
void Unlock() SANITIZER_RELEASE() {
atomic_store(&state_, 0, memory_order_release);
}
void CheckLocked() const CHECK_LOCKED() {
void CheckLocked() const SANITIZER_CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
void LockSlow();
};
class MUTEX SpinMutex : public StaticSpinMutex {
class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
@ -156,12 +158,12 @@ class CheckedMutex {
// Derive from CheckedMutex for the purposes of EBO.
// We could make it a field marked with [[no_unique_address]],
// but this attribute is not supported by some older compilers.
class MUTEX Mutex : CheckedMutex {
class SANITIZER_MUTEX Mutex : CheckedMutex {
public:
explicit constexpr Mutex(MutexType type = MutexUnchecked)
: CheckedMutex(type) {}
void Lock() ACQUIRE() {
void Lock() SANITIZER_ACQUIRE() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
@ -206,7 +208,7 @@ class MUTEX Mutex : CheckedMutex {
}
}
void Unlock() RELEASE() {
void Unlock() SANITIZER_RELEASE() {
CheckedMutex::Unlock();
bool wake_writer;
u64 wake_readers;
@ -234,7 +236,7 @@ class MUTEX Mutex : CheckedMutex {
readers_.Post(wake_readers);
}
void ReadLock() ACQUIRE_SHARED() {
void ReadLock() SANITIZER_ACQUIRE_SHARED() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
@ -271,7 +273,7 @@ class MUTEX Mutex : CheckedMutex {
}
}
void ReadUnlock() RELEASE_SHARED() {
void ReadUnlock() SANITIZER_RELEASE_SHARED() {
CheckedMutex::Unlock();
bool wake;
u64 new_state;
@ -297,13 +299,13 @@ class MUTEX Mutex : CheckedMutex {
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned.
void CheckWriteLocked() const CHECK_LOCKED() {
void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
}
void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
void CheckReadLocked() const CHECK_LOCKED() {
void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
}
@ -361,13 +363,13 @@ void FutexWait(atomic_uint32_t *p, u32 cmp);
void FutexWake(atomic_uint32_t *p, u32 count);
template <typename MutexType>
class SCOPED_LOCK GenericScopedLock {
class SANITIZER_SCOPED_LOCK GenericScopedLock {
public:
explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
mu_->Lock();
}
~GenericScopedLock() RELEASE() { mu_->Unlock(); }
~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
private:
MutexType *mu_;
@ -377,13 +379,14 @@ class SCOPED_LOCK GenericScopedLock {
};
template <typename MutexType>
class SCOPED_LOCK GenericScopedReadLock {
class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
public:
explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
: mu_(mu) {
mu_->ReadLock();
}
~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
@ -393,10 +396,10 @@ class SCOPED_LOCK GenericScopedReadLock {
};
template <typename MutexType>
class SCOPED_LOCK GenericScopedRWLock {
class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
public:
ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
ACQUIRE(mu)
SANITIZER_ACQUIRE(mu)
: mu_(mu), write_(write) {
if (write_)
mu_->Lock();
@ -404,7 +407,7 @@ class SCOPED_LOCK GenericScopedRWLock {
mu_->ReadLock();
}
ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
if (write_)
mu_->Unlock();
else

View File

@ -170,13 +170,9 @@ typedef struct user_fpregs elf_fpregset_t;
#endif
// Include these after system headers to avoid name clashes and ambiguities.
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
// To prevent macro redefinition warning between our sanitizer_thread_safety.h
// and system's scsi.h.
# undef RELEASE
# include "sanitizer_common.h"
# include "sanitizer_internal_defs.h"
# include "sanitizer_platform_limits_posix.h"
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);

View File

@ -149,8 +149,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
RELEASE(recycle_mutex_) {
void NOINLINE Recycle(uptr min_size, Callback cb)
SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);

View File

@ -97,7 +97,7 @@ class StackStore {
Packed,
Unpacked,
};
State state GUARDED_BY(mtx_);
State state SANITIZER_GUARDED_BY(mtx_);
uptr *Create(StackStore *store);
@ -109,8 +109,8 @@ class StackStore {
void TestOnlyUnmap(StackStore *store);
bool Stored(uptr n);
bool IsPacked() const;
void Lock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
void Unlock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
};
BlockInfo blocks_[kBlockCount] = {};

View File

@ -94,8 +94,8 @@ class CompressThread {
constexpr CompressThread() = default;
void NewWorkNotify();
void Stop();
void LockAndStop() NO_THREAD_SAFETY_ANALYSIS;
void Unlock() NO_THREAD_SAFETY_ANALYSIS;
void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
private:
enum class State {
@ -114,8 +114,8 @@ class CompressThread {
Semaphore semaphore_ = {};
StaticSpinMutex mutex_ = {};
State state_ GUARDED_BY(mutex_) = State::NotStarted;
void *thread_ GUARDED_BY(mutex_) = nullptr;
State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
atomic_uint8_t run_ = {};
};

View File

@ -86,7 +86,7 @@ class ThreadContextBase {
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
class MUTEX ThreadRegistry {
class SANITIZER_MUTEX ThreadRegistry {
public:
ThreadRegistry(ThreadContextFactory factory);
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
@ -95,9 +95,9 @@ class MUTEX ThreadRegistry {
uptr *alive = nullptr);
uptr GetMaxAliveThreads();
void Lock() ACQUIRE() { mtx_.Lock(); }
void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
void Unlock() RELEASE() { mtx_.Unlock(); }
void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.
ThreadContextBase *GetThreadLocked(u32 tid) {

View File

@ -16,27 +16,34 @@
#define SANITIZER_THREAD_SAFETY_H
#if defined(__clang__)
# define THREAD_ANNOTATION(x) __attribute__((x))
# define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x))
#else
# define THREAD_ANNOTATION(x)
# define SANITIZER_THREAD_ANNOTATION(x)
#endif
#define MUTEX THREAD_ANNOTATION(capability("mutex"))
#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability("mutex"))
#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable)
#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x))
#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x))
#define SANITIZER_REQUIRES(...) \
SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
#define SANITIZER_REQUIRES_SHARED(...) \
SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
#define SANITIZER_ACQUIRE(...) \
SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
#define SANITIZER_ACQUIRE_SHARED(...) \
SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
#define SANITIZER_TRY_ACQUIRE(...) \
SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
#define SANITIZER_RELEASE(...) \
SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__))
#define SANITIZER_RELEASE_SHARED(...) \
SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
#define SANITIZER_EXCLUDES(...) \
SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
#define SANITIZER_CHECK_LOCKED(...) \
SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \
SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis)
#endif

View File

@ -299,8 +299,9 @@ struct Allocator {
NOINLINE bool isRssLimitExceeded();
// Allocates a chunk.
void *allocate(uptr Size, uptr Alignment, AllocType Type,
bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
void *
allocate(uptr Size, uptr Alignment, AllocType Type,
bool ForceZeroContents = false) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
if (UNLIKELY(Alignment > MaxAlignment)) {
@ -404,8 +405,8 @@ struct Allocator {
// Place a chunk in the quarantine or directly deallocate it in the event of
// a zero-sized quarantine, or if the size of the chunk is greater than the
// quarantine chunk size threshold.
void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size)
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
if (BypassQuarantine) {
UnpackedHeader NewHeader = *Header;

View File

@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
void init();
void commitBack();
inline bool tryLock() TRY_ACQUIRE(true, Mutex) {
inline bool tryLock() SANITIZER_TRY_ACQUIRE(true, Mutex) {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@ -40,12 +40,12 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
return false;
}
inline void lock() ACQUIRE(Mutex) {
inline void lock() SANITIZER_ACQUIRE(Mutex) {
atomic_store_relaxed(&Precedence, 0);
Mutex.Lock();
}
inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); }
inline void unlock() SANITIZER_RELEASE(Mutex) { Mutex.Unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }

View File

@ -34,7 +34,7 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
}
ALWAYS_INLINE ScudoTSD *
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
getTSDAndLock(bool *UnlockRequired) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
FallbackTSD.lock();
*UnlockRequired = true;

View File

@ -64,7 +64,7 @@ void initThread(bool MinimalInit) {
setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
}
ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) NO_THREAD_SAFETY_ANALYSIS {
ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (NumberOfTSDs > 1) {
// Use the Precedence of the current TSD as our random seed. Since we are in
// the slow path, it means that tryLock failed, and as a result it's very

View File

@ -124,13 +124,13 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->mtx.Lock();
global_proc()->internal_alloc_mtx.Lock();
InternalAllocatorLock();
}
void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
InternalAllocatorUnlock();
global_proc()->internal_alloc_mtx.Unlock();
global_proc()->mtx.Unlock();

View File

@ -521,7 +521,7 @@ int Finalize(ThreadState *thr) {
}
#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
void ForkBefore(ThreadState *thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
ctx->thread_registry.Lock();
ctx->report_mtx.Lock();
ScopedErrorReportLock::Lock();
@ -543,7 +543,8 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
__tsan_test_only_on_fork();
}
void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
void ForkParentAfter(ThreadState *thr,
uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
thr->ignore_reads_and_writes--;
@ -554,7 +555,7 @@ void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
}
void ForkChildAfter(ThreadState *thr, uptr pc,
bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
bool start_thread) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
thr->ignore_reads_and_writes--;

View File

@ -124,21 +124,21 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->internal_alloc_mtx.Lock();
InternalAllocatorLock();
}
void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
InternalAllocatorUnlock();
global_proc()->internal_alloc_mtx.Unlock();
}
void GlobalProcessorLock() NO_THREAD_SAFETY_ANALYSIS {
void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->mtx.Lock();
}
void GlobalProcessorUnlock() NO_THREAD_SAFETY_ANALYSIS {
void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->mtx.Unlock();
}

View File

@ -113,7 +113,7 @@ static TracePart* TracePartAlloc(ThreadState* thr) {
return part;
}
static void TracePartFree(TracePart* part) REQUIRES(ctx->slot_mtx) {
static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
DCHECK(part->trace);
part->trace = nullptr;
ctx->trace_part_recycle.PushFront(part);
@ -208,7 +208,7 @@ static void DoResetImpl(uptr epoch) {
// Clang does not understand locking all slots in the loop:
// error: expecting mutex 'slot.mtx' to be held at start of each loop
void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS {
void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
{
for (auto& slot : ctx->slots) {
slot.mtx.Lock();
@ -230,7 +230,7 @@ void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS {
void FlushShadowMemory() { DoReset(nullptr, 0); }
static TidSlot* FindSlotAndLock(ThreadState* thr)
ACQUIRE(thr->slot->mtx) NO_THREAD_SAFETY_ANALYSIS {
SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
CHECK(!thr->slot);
TidSlot* slot = nullptr;
for (;;) {
@ -334,7 +334,7 @@ void SlotDetach(ThreadState* thr) {
SlotDetachImpl(thr, true);
}
void SlotLock(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
DCHECK(!thr->slot_locked);
#if SANITIZER_DEBUG
// Check these mutexes are not locked.
@ -756,7 +756,7 @@ int Finalize(ThreadState *thr) {
}
#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
GlobalProcessorLock();
// Detaching from the slot makes OnUserFree skip writing to the shadow.
// The slot will be locked so any attempts to use it will deadlock anyway.
@ -783,7 +783,7 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
__tsan_test_only_on_fork();
}
static void ForkAfter(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
thr->ignore_reads_and_writes--;

View File

@ -332,12 +332,12 @@ struct Context {
Mutex slot_mtx;
uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
bool resetting; // global reset is in progress
IList<TidSlot, &TidSlot::node> slot_queue GUARDED_BY(slot_mtx);
IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
GUARDED_BY(slot_mtx);
uptr trace_part_total_allocated GUARDED_BY(slot_mtx);
uptr trace_part_recycle_finished GUARDED_BY(slot_mtx);
uptr trace_part_finished_excess GUARDED_BY(slot_mtx);
SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
};
extern Context *ctx; // The one and the only global runtime context.
@ -566,10 +566,10 @@ uptr ALWAYS_INLINE HeapEnd() {
}
#endif
void SlotAttachAndLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
void SlotDetach(ThreadState *thr);
void SlotLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
void SlotUnlock(ThreadState *thr) RELEASE(thr->slot->mtx);
void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
void DoReset(ThreadState *thr, uptr epoch);
void FlushShadowMemory();

View File

@ -156,7 +156,7 @@ ALWAYS_INLINE void StoreShadow(RawShadow* sp, RawShadow s) {
NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
Shadow old,
AccessType typ) NO_THREAD_SAFETY_ANALYSIS {
AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
// For the free shadow markers the first element (that contains kFreeSid)
// triggers the race, but the second element contains info about the freeing
// thread, take it.