sanitizer_common: replace RWMutex/BlockingMutex with Mutex

Mutex supports reader access, OS blocking, spinning,
portable and smaller than BlockingMutex.
Overall it's supposed to be better than RWMutex/BlockingMutex.
Replace RWMutex/BlockingMutex with Mutex.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D106936
This commit is contained in:
Dmitry Vyukov 2021-07-28 09:45:43 +02:00
parent 48cbcb909d
commit 960cb490dd
7 changed files with 5 additions and 262 deletions

View File

@ -112,47 +112,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
CHECK_EQ(status, ZX_OK);
}
enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
// NOTE! It's important that this use internal_memset, because plain
// memset might be intercepted (e.g., actually be __asan_memset).
// Defining this so the compiler initializes each field, e.g.:
// BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
// might result in the compiler generating a call to memset, which would
// have the same problem.
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK_EQ(owner_, 0);
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
zx_status_t status =
_zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
if (status != ZX_ERR_BAD_STATE) // Normal race.
CHECK_EQ(status, ZX_OK);
}
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping) {
zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
CHECK_EQ(status, ZX_OK);
}
}
void BlockingMutex::CheckLocked() const {
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
uptr GetPageSize() { return _zx_system_get_page_size(); }
uptr GetMmapGranularity() { return _zx_system_get_page_size(); }

View File

@ -659,48 +659,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
# endif
}
enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK_EQ(owner_, 0);
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
#elif SANITIZER_NETBSD
sched_yield(); /* No userspace futex-like synchronization */
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping,
0, 0, 0);
#endif
}
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
#elif SANITIZER_NETBSD
/* No userspace futex-like synchronization */
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
#endif
}
}
void BlockingMutex::CheckLocked() const {
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
# endif // !SANITIZER_SOLARIS
// ----------------- sanitizer_linux.h

View File

@ -516,25 +516,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
void FutexWake(atomic_uint32_t *p, u32 count) {}
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK_EQ(OS_SPINLOCK_INIT, 0);
CHECK_EQ(owner_, 0);
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::CheckLocked() const {
CHECK_NE(*(const OSSpinLock*)&opaque_storage_, 0);
}
u64 NanoTime() {
timeval tv;
internal_memset(&tv, 0, sizeof(tv));

View File

@ -340,111 +340,6 @@ class MUTEX Mutex : CheckedMutex {
void FutexWait(atomic_uint32_t *p, u32 cmp);
void FutexWake(atomic_uint32_t *p, u32 count);
class MUTEX BlockingMutex {
public:
explicit constexpr BlockingMutex(LinkerInitialized)
: opaque_storage_ {0, }, owner_ {0} {}
BlockingMutex();
void Lock() ACQUIRE();
void Unlock() RELEASE();
// This function does not guarantee an explicit check that the calling thread
// is the thread which owns the mutex. This behavior, while more strictly
// correct, causes problems in cases like StopTheWorld, where a parent thread
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned, and assumes callers to be generally
// well-behaved.
void CheckLocked() const CHECK_LOCKED();
private:
// Solaris mutex_t has a member that requires 64-bit alignment.
ALIGNED(8) uptr opaque_storage_[10];
uptr owner_; // for debugging
};
// Reader-writer spin mutex.
class MUTEX RWMutex {
public:
RWMutex() {
atomic_store(&state_, kUnlocked, memory_order_relaxed);
}
~RWMutex() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
void Lock() ACQUIRE() {
u32 cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
LockSlow();
}
void Unlock() RELEASE() {
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
DCHECK_NE(prev & kWriteLock, 0);
(void)prev;
}
void ReadLock() ACQUIRE_SHARED() {
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
ReadLockSlow();
}
void ReadUnlock() RELEASE_SHARED() {
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
(void)prev;
}
void CheckLocked() const CHECK_LOCKED() {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
private:
atomic_uint32_t state_;
enum {
kUnlocked = 0,
kWriteLock = 1,
kReadLock = 2
};
void NOINLINE LockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
u32 cmp = atomic_load(&state_, memory_order_relaxed);
if (cmp == kUnlocked &&
atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
}
}
void NOINLINE ReadLockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
u32 prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
}
}
RWMutex(const RWMutex &) = delete;
void operator=(const RWMutex &) = delete;
};
template <typename MutexType>
class SCOPED_LOCK GenericScopedLock {
public:
@ -477,6 +372,11 @@ class SCOPED_LOCK GenericScopedReadLock {
void operator=(const GenericScopedReadLock &) = delete;
};
// TODO: Temporary measure for incremental migration.
// These typedefs should be removed and all uses renamed to Mutex.
typedef Mutex BlockingMutex;
typedef Mutex RWMutex;
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
typedef GenericScopedLock<RWMutex> RWMutexLock;

View File

@ -225,28 +225,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
void FutexWake(atomic_uint32_t *p, u32 count) {}
BlockingMutex::BlockingMutex() {
CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
internal_memset(this, 0, sizeof(*this));
CHECK_EQ(mutex_init((mutex_t *)&opaque_storage_, USYNC_THREAD, NULL), 0);
}
void BlockingMutex::Lock() {
CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
CHECK_NE(owner_, (uptr)thr_self());
CHECK_EQ(mutex_lock((mutex_t *)&opaque_storage_), 0);
CHECK(!owner_);
owner_ = (uptr)thr_self();
}
void BlockingMutex::Unlock() {
CHECK(owner_ == (uptr)thr_self());
owner_ = 0;
CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
}
void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
} // namespace __sanitizer
#endif // SANITIZER_SOLARIS

View File

@ -827,27 +827,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
WakeByAddressAll(p);
}
// ---------------------- BlockingMutex ---------------- {{{1
BlockingMutex::BlockingMutex() {
CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
CHECK_EQ(owner_, 0);
owner_ = GetThreadSelf();
}
void BlockingMutex::Unlock() {
CheckLocked();
owner_ = 0;
ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
}
void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
uptr GetTlsSize() {
return 0;
}

View File

@ -146,18 +146,6 @@ TEST(SanitizerCommon, SpinMutexTry) {
PTHREAD_JOIN(threads[i], 0);
}
TEST(SanitizerCommon, BlockingMutex) {
u64 mtxmem[1024] = {};
BlockingMutex *mtx = new(mtxmem) BlockingMutex(LINKER_INITIALIZED);
TestData<BlockingMutex> data(mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
PTHREAD_CREATE(&threads[i], 0, lock_thread<BlockingMutex>, &data);
for (int i = 0; i < kThreads; i++)
PTHREAD_JOIN(threads[i], 0);
check_locked(mtx);
}
TEST(SanitizerCommon, Mutex) {
Mutex mtx;
TestData<Mutex> data(&mtx);