2012-06-30 01:10:08 +08:00
|
|
|
//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-06-30 01:10:08 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-06-30 01:32:18 +08:00
|
|
|
//
|
|
|
|
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
#ifndef SANITIZER_MUTEX_H
|
|
|
|
#define SANITIZER_MUTEX_H
|
|
|
|
|
|
|
|
#include "sanitizer_atomic.h"
|
2012-07-02 15:09:21 +08:00
|
|
|
#include "sanitizer_internal_defs.h"
|
|
|
|
#include "sanitizer_libc.h"
|
2021-07-10 01:29:41 +08:00
|
|
|
#include "sanitizer_thread_safety.h"
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX StaticSpinMutex {
|
2012-06-30 01:32:18 +08:00
|
|
|
public:
|
2012-08-30 18:02:48 +08:00
|
|
|
void Init() {
|
2012-06-30 01:32:18 +08:00
|
|
|
atomic_store(&state_, 0, memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Lock() ACQUIRE() {
|
2021-07-20 16:36:41 +08:00
|
|
|
if (LIKELY(TryLock()))
|
2012-07-02 15:09:21 +08:00
|
|
|
return;
|
|
|
|
LockSlow();
|
2012-06-30 01:32:18 +08:00
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
bool TryLock() TRY_ACQUIRE(true) {
|
2013-01-11 19:03:35 +08:00
|
|
|
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
|
2012-06-30 01:32:18 +08:00
|
|
|
|
2021-07-15 17:18:53 +08:00
|
|
|
void CheckLocked() const CHECK_LOCKED() {
|
2013-10-17 19:18:11 +08:00
|
|
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
|
|
|
|
}
|
|
|
|
|
2012-06-30 01:32:18 +08:00
|
|
|
private:
|
|
|
|
atomic_uint8_t state_;
|
|
|
|
|
2021-07-20 16:36:41 +08:00
|
|
|
void LockSlow();
|
2012-08-30 18:02:48 +08:00
|
|
|
};
|
2012-07-02 15:09:21 +08:00
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX SpinMutex : public StaticSpinMutex {
|
2012-08-30 18:02:48 +08:00
|
|
|
public:
|
|
|
|
SpinMutex() {
|
|
|
|
Init();
|
|
|
|
}
|
2021-07-21 15:10:00 +08:00
|
|
|
|
|
|
|
SpinMutex(const SpinMutex &) = delete;
|
|
|
|
void operator=(const SpinMutex &) = delete;
|
2012-06-30 01:32:18 +08:00
|
|
|
};
|
|
|
|
|
2021-07-15 23:15:48 +08:00
|
|
|
// Semaphore provides an OS-dependent way to park/unpark threads.
|
|
|
|
// The last thread returned from Wait can destroy the object
|
|
|
|
// (destruction-safety).
|
|
|
|
class Semaphore {
|
|
|
|
public:
|
|
|
|
constexpr Semaphore() {}
|
|
|
|
Semaphore(const Semaphore &) = delete;
|
|
|
|
void operator=(const Semaphore &) = delete;
|
|
|
|
|
|
|
|
void Wait();
|
|
|
|
void Post(u32 count = 1);
|
|
|
|
|
|
|
|
private:
|
|
|
|
atomic_uint32_t state_ = {0};
|
|
|
|
};
|
|
|
|
|
2021-07-18 16:19:37 +08:00
|
|
|
// Reader-writer mutex.
|
|
|
|
class MUTEX Mutex2 {
|
|
|
|
public:
|
|
|
|
constexpr Mutex2() {}
|
|
|
|
|
|
|
|
void Lock() ACQUIRE() {
|
|
|
|
u64 reset_mask = ~0ull;
|
|
|
|
u64 state = atomic_load_relaxed(&state_);
|
|
|
|
const uptr kMaxSpinIters = 1500;
|
|
|
|
for (uptr spin_iters = 0;; spin_iters++) {
|
|
|
|
u64 new_state;
|
|
|
|
bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
|
|
|
|
if (LIKELY(!locked)) {
|
|
|
|
// The mutex is not read-/write-locked, try to lock.
|
|
|
|
new_state = (state | kWriterLock) & reset_mask;
|
|
|
|
} else if (spin_iters > kMaxSpinIters) {
|
|
|
|
// We've spun enough, increment waiting writers count and block.
|
|
|
|
// The counter will be decremented by whoever wakes us.
|
|
|
|
new_state = (state + kWaitingWriterInc) & reset_mask;
|
|
|
|
} else if ((state & kWriterSpinWait) == 0) {
|
|
|
|
// Active spinning, but denote our presence so that unlocking
|
|
|
|
// thread does not wake up other threads.
|
|
|
|
new_state = state | kWriterSpinWait;
|
|
|
|
} else {
|
|
|
|
// Active spinning.
|
|
|
|
state = atomic_load(&state_, memory_order_relaxed);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
|
|
|
memory_order_acquire)))
|
|
|
|
continue;
|
|
|
|
if (LIKELY(!locked))
|
|
|
|
return; // We've locked the mutex.
|
|
|
|
if (spin_iters > kMaxSpinIters) {
|
|
|
|
// We've incremented waiting writers, so now block.
|
|
|
|
writers_.Wait();
|
|
|
|
spin_iters = 0;
|
|
|
|
state = atomic_load(&state_, memory_order_relaxed);
|
|
|
|
DCHECK_NE(state & kWriterSpinWait, 0);
|
|
|
|
} else {
|
|
|
|
// We've set kWriterSpinWait, but we are still in active spinning.
|
|
|
|
}
|
|
|
|
// We either blocked and were unblocked,
|
|
|
|
// or we just spun but set kWriterSpinWait.
|
|
|
|
// Either way we need to reset kWriterSpinWait
|
|
|
|
// next time we take the lock or block again.
|
|
|
|
reset_mask = ~kWriterSpinWait;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Unlock() RELEASE() {
|
|
|
|
bool wake_writer;
|
|
|
|
u64 wake_readers;
|
|
|
|
u64 new_state;
|
|
|
|
u64 state = atomic_load_relaxed(&state_);
|
|
|
|
do {
|
|
|
|
DCHECK_NE(state & kWriterLock, 0);
|
|
|
|
DCHECK_EQ(state & kReaderLockMask, 0);
|
|
|
|
new_state = state & ~kWriterLock;
|
|
|
|
wake_writer =
|
|
|
|
(state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
|
|
|
|
if (wake_writer)
|
|
|
|
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
|
|
|
|
wake_readers =
|
|
|
|
(state & (kWriterSpinWait | kWaitingWriterMask)) != 0
|
|
|
|
? 0
|
|
|
|
: ((state & kWaitingReaderMask) >> kWaitingReaderShift);
|
|
|
|
if (wake_readers)
|
|
|
|
new_state = (new_state & ~kWaitingReaderMask) +
|
|
|
|
(wake_readers << kReaderLockShift);
|
|
|
|
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
|
|
|
memory_order_release)));
|
|
|
|
if (UNLIKELY(wake_writer))
|
|
|
|
writers_.Post();
|
|
|
|
else if (UNLIKELY(wake_readers))
|
|
|
|
readers_.Post(wake_readers);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReadLock() ACQUIRE_SHARED() {
|
|
|
|
bool locked;
|
|
|
|
u64 new_state;
|
|
|
|
u64 state = atomic_load_relaxed(&state_);
|
|
|
|
do {
|
|
|
|
locked =
|
|
|
|
(state & kReaderLockMask) == 0 &&
|
|
|
|
(state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
|
|
|
|
if (LIKELY(!locked))
|
|
|
|
new_state = state + kReaderLockInc;
|
|
|
|
else
|
|
|
|
new_state = state + kWaitingReaderInc;
|
|
|
|
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
|
|
|
memory_order_acquire)));
|
|
|
|
if (UNLIKELY(locked))
|
|
|
|
readers_.Wait();
|
|
|
|
DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
|
|
|
|
DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReadUnlock() RELEASE_SHARED() {
|
|
|
|
bool wake;
|
|
|
|
u64 new_state;
|
|
|
|
u64 state = atomic_load_relaxed(&state_);
|
|
|
|
do {
|
|
|
|
DCHECK_NE(state & kReaderLockMask, 0);
|
|
|
|
DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
|
|
|
|
new_state = state - kReaderLockInc;
|
|
|
|
wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
|
|
|
|
(new_state & kWaitingWriterMask) != 0;
|
|
|
|
if (wake)
|
|
|
|
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
|
|
|
|
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
|
|
|
|
memory_order_release)));
|
|
|
|
if (UNLIKELY(wake))
|
|
|
|
writers_.Post();
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function does not guarantee an explicit check that the calling thread
|
|
|
|
// is the thread which owns the mutex. This behavior, while more strictly
|
|
|
|
// correct, causes problems in cases like StopTheWorld, where a parent thread
|
|
|
|
// owns the mutex but a child checks that it is locked. Rather than
|
|
|
|
// maintaining complex state to work around those situations, the check only
|
|
|
|
// checks that the mutex is owned.
|
|
|
|
void CheckWriteLocked() const CHECK_LOCKED() {
|
|
|
|
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
|
|
|
|
|
|
|
|
void CheckReadLocked() const CHECK_LOCKED() {
|
|
|
|
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
atomic_uint64_t state_ = {0};
|
|
|
|
Semaphore writers_;
|
|
|
|
Semaphore readers_;
|
|
|
|
|
|
|
|
// The state has 3 counters:
|
|
|
|
// - number of readers holding the lock,
|
|
|
|
// if non zero, the mutex is read-locked
|
|
|
|
// - number of waiting readers,
|
|
|
|
// if not zero, the mutex is write-locked
|
|
|
|
// - number of waiting writers,
|
|
|
|
// if non zero, the mutex is read- or write-locked
|
|
|
|
// And 2 flags:
|
|
|
|
// - writer lock
|
|
|
|
// if set, the mutex is write-locked
|
|
|
|
// - a writer is awake and spin-waiting
|
|
|
|
// the flag is used to prevent thundering herd problem
|
|
|
|
// (new writers are not woken if this flag is set)
|
|
|
|
//
|
|
|
|
// Writer support active spinning, readers does not.
|
|
|
|
// But readers are more aggressive and always take the mutex
|
|
|
|
// if there are any other readers.
|
|
|
|
// Writers hand off the mutex to readers: after wake up readers
|
|
|
|
// already assume ownership of the mutex (don't need to do any
|
|
|
|
// state updates). But the mutex is not handed off to writers,
|
|
|
|
// after wake up writers compete to lock the mutex again.
|
|
|
|
// This is needed to allow repeated write locks even in presence
|
|
|
|
// of other blocked writers.
|
|
|
|
static constexpr u64 kCounterWidth = 20;
|
|
|
|
static constexpr u64 kReaderLockShift = 0;
|
|
|
|
static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
|
|
|
|
static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
|
|
|
|
<< kReaderLockShift;
|
|
|
|
static constexpr u64 kWaitingReaderShift = kCounterWidth;
|
|
|
|
static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
|
|
|
|
static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
|
|
|
|
<< kWaitingReaderShift;
|
|
|
|
static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
|
|
|
|
static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
|
|
|
|
static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
|
|
|
|
<< kWaitingWriterShift;
|
|
|
|
static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
|
|
|
|
static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
|
|
|
|
|
|
|
|
Mutex2(const Mutex2 &) = delete;
|
|
|
|
void operator=(const Mutex2 &) = delete;
|
|
|
|
};
|
|
|
|
|
2021-07-15 23:15:48 +08:00
|
|
|
void FutexWait(atomic_uint32_t *p, u32 cmp);
|
|
|
|
void FutexWake(atomic_uint32_t *p, u32 count);
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX BlockingMutex {
|
2013-01-14 15:51:39 +08:00
|
|
|
public:
|
2015-01-30 14:20:43 +08:00
|
|
|
explicit constexpr BlockingMutex(LinkerInitialized)
|
2018-07-31 08:08:26 +08:00
|
|
|
: opaque_storage_ {0, }, owner_ {0} {}
|
2013-03-14 21:30:56 +08:00
|
|
|
BlockingMutex();
|
2021-07-10 01:29:41 +08:00
|
|
|
void Lock() ACQUIRE();
|
|
|
|
void Unlock() RELEASE();
|
2017-02-10 03:29:11 +08:00
|
|
|
|
|
|
|
// This function does not guarantee an explicit check that the calling thread
|
|
|
|
// is the thread which owns the mutex. This behavior, while more strictly
|
|
|
|
// correct, causes problems in cases like StopTheWorld, where a parent thread
|
|
|
|
// owns the mutex but a child checks that it is locked. Rather than
|
|
|
|
// maintaining complex state to work around those situations, the check only
|
|
|
|
// checks that the mutex is owned, and assumes callers to be generally
|
|
|
|
// well-behaved.
|
2021-07-15 17:18:53 +08:00
|
|
|
void CheckLocked() const CHECK_LOCKED();
|
[Sanitizers] Basic sanitizer Solaris support (PR 33274)
Summary:
This is the first mostly working version of the Sanitizer port to 32-bit Solaris/x86.
It is currently based on Solaris 11.4 Beta.
This part was initially developed inside libsanitizer in the GCC tree and should apply to
both. Subsequent parts will address changes to clang, the compiler-rt build system
and testsuite.
I'm not yet sure what the right patch granularity is: if it's profitable to split the patch
up, I'd like to get guidance on how to do so.
Most of the changes are probably straightforward with a few exceptions:
* The Solaris syscall interface isn't stable, undocumented and can change within an
OS release. The stable interface is the libc interface, which I'm using here, if possible
using the internal _-prefixed names.
* While the patch primarily target 32-bit x86, I've left a few sparc changes in. They
cannot currently be used with clang due to a backend limitation, but have worked
fine inside the gcc tree.
* Some functions (e.g. largefile versions of functions like open64) only exist in 32-bit
Solaris, so I've introduced a separate SANITIZER_SOLARIS32 to check for that.
The patch (with the subsequent ones to be submitted shortly) was tested
on i386-pc-solaris2.11. Only a few failures remain, some of them analyzed, some
still TBD:
AddressSanitizer-i386-sunos :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos :: TestCases/malloc-no-intercept.c
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/start-deactivated.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/default_options.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/malloc-no-intercept.c
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/MemoryMappingLayout.DumpListOfModules
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/SanitizerCommon.PthreadDestructorIterations
Maybe this is good enough the get the ball rolling.
Reviewers: kcc, alekseyshl
Reviewed By: alekseyshl
Subscribers: srhines, jyknight, kubamracek, krytarowski, fedor.sergeev, llvm-commits, #sanitizers
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D40898
llvm-svn: 320740
2017-12-15 04:14:29 +08:00
|
|
|
|
2013-01-14 15:51:39 +08:00
|
|
|
private:
|
[Sanitizers] Basic sanitizer Solaris support (PR 33274)
Summary:
This is the first mostly working version of the Sanitizer port to 32-bit Solaris/x86.
It is currently based on Solaris 11.4 Beta.
This part was initially developed inside libsanitizer in the GCC tree and should apply to
both. Subsequent parts will address changes to clang, the compiler-rt build system
and testsuite.
I'm not yet sure what the right patch granularity is: if it's profitable to split the patch
up, I'd like to get guidance on how to do so.
Most of the changes are probably straightforward with a few exceptions:
* The Solaris syscall interface isn't stable, undocumented and can change within an
OS release. The stable interface is the libc interface, which I'm using here, if possible
using the internal _-prefixed names.
* While the patch primarily target 32-bit x86, I've left a few sparc changes in. They
cannot currently be used with clang due to a backend limitation, but have worked
fine inside the gcc tree.
* Some functions (e.g. largefile versions of functions like open64) only exist in 32-bit
Solaris, so I've introduced a separate SANITIZER_SOLARIS32 to check for that.
The patch (with the subsequent ones to be submitted shortly) was tested
on i386-pc-solaris2.11. Only a few failures remain, some of them analyzed, some
still TBD:
AddressSanitizer-i386-sunos :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos :: TestCases/malloc-no-intercept.c
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/start-deactivated.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/default_options.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/malloc-no-intercept.c
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/MemoryMappingLayout.DumpListOfModules
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/SanitizerCommon.PthreadDestructorIterations
Maybe this is good enough the get the ball rolling.
Reviewers: kcc, alekseyshl
Reviewed By: alekseyshl
Subscribers: srhines, jyknight, kubamracek, krytarowski, fedor.sergeev, llvm-commits, #sanitizers
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D40898
llvm-svn: 320740
2017-12-15 04:14:29 +08:00
|
|
|
// Solaris mutex_t has a member that requires 64-bit alignment.
|
|
|
|
ALIGNED(8) uptr opaque_storage_[10];
|
2013-01-14 16:01:58 +08:00
|
|
|
uptr owner_; // for debugging
|
2013-01-14 15:51:39 +08:00
|
|
|
};
|
|
|
|
|
2014-03-18 16:30:14 +08:00
|
|
|
// Reader-writer spin mutex.
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX RWMutex {
|
2014-03-18 16:30:14 +08:00
|
|
|
public:
|
|
|
|
RWMutex() {
|
|
|
|
atomic_store(&state_, kUnlocked, memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
~RWMutex() {
|
|
|
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Lock() ACQUIRE() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 cmp = kUnlocked;
|
|
|
|
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
|
|
|
|
memory_order_acquire))
|
|
|
|
return;
|
|
|
|
LockSlow();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Unlock() RELEASE() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
|
|
|
|
DCHECK_NE(prev & kWriteLock, 0);
|
|
|
|
(void)prev;
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void ReadLock() ACQUIRE_SHARED() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
|
|
|
|
if ((prev & kWriteLock) == 0)
|
|
|
|
return;
|
|
|
|
ReadLockSlow();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void ReadUnlock() RELEASE_SHARED() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
|
|
|
|
DCHECK_EQ(prev & kWriteLock, 0);
|
|
|
|
DCHECK_GT(prev & ~kWriteLock, 0);
|
|
|
|
(void)prev;
|
|
|
|
}
|
|
|
|
|
2021-07-15 17:18:53 +08:00
|
|
|
void CheckLocked() const CHECK_LOCKED() {
|
2014-03-18 16:30:14 +08:00
|
|
|
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
atomic_uint32_t state_;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
kUnlocked = 0,
|
|
|
|
kWriteLock = 1,
|
|
|
|
kReadLock = 2
|
|
|
|
};
|
|
|
|
|
|
|
|
void NOINLINE LockSlow() {
|
|
|
|
for (int i = 0;; i++) {
|
|
|
|
if (i < 10)
|
|
|
|
proc_yield(10);
|
|
|
|
else
|
|
|
|
internal_sched_yield();
|
|
|
|
u32 cmp = atomic_load(&state_, memory_order_relaxed);
|
|
|
|
if (cmp == kUnlocked &&
|
|
|
|
atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
|
|
|
|
memory_order_acquire))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE ReadLockSlow() {
|
|
|
|
for (int i = 0;; i++) {
|
|
|
|
if (i < 10)
|
|
|
|
proc_yield(10);
|
|
|
|
else
|
|
|
|
internal_sched_yield();
|
|
|
|
u32 prev = atomic_load(&state_, memory_order_acquire);
|
|
|
|
if ((prev & kWriteLock) == 0)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-01 11:48:23 +08:00
|
|
|
RWMutex(const RWMutex &) = delete;
|
|
|
|
void operator=(const RWMutex &) = delete;
|
2014-03-18 16:30:14 +08:00
|
|
|
};
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
template <typename MutexType>
|
|
|
|
class SCOPED_LOCK GenericScopedLock {
|
2012-06-30 01:10:08 +08:00
|
|
|
public:
|
2021-07-10 01:29:41 +08:00
|
|
|
explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
2012-06-30 01:10:08 +08:00
|
|
|
mu_->Lock();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
~GenericScopedLock() RELEASE() { mu_->Unlock(); }
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
MutexType *mu_;
|
|
|
|
|
2021-07-01 11:48:23 +08:00
|
|
|
GenericScopedLock(const GenericScopedLock &) = delete;
|
|
|
|
void operator=(const GenericScopedLock &) = delete;
|
2012-06-30 01:10:08 +08:00
|
|
|
};
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
template <typename MutexType>
|
|
|
|
class SCOPED_LOCK GenericScopedReadLock {
|
2012-06-30 01:10:08 +08:00
|
|
|
public:
|
2021-07-10 01:29:41 +08:00
|
|
|
explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
2012-06-30 01:10:08 +08:00
|
|
|
mu_->ReadLock();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
MutexType *mu_;
|
|
|
|
|
2021-07-01 11:48:23 +08:00
|
|
|
GenericScopedReadLock(const GenericScopedReadLock &) = delete;
|
|
|
|
void operator=(const GenericScopedReadLock &) = delete;
|
2012-06-30 01:10:08 +08:00
|
|
|
};
|
|
|
|
|
2012-08-30 18:02:48 +08:00
|
|
|
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
|
2013-01-14 15:51:39 +08:00
|
|
|
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
|
2014-03-18 16:30:14 +08:00
|
|
|
typedef GenericScopedLock<RWMutex> RWMutexLock;
|
|
|
|
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
|
2012-07-02 14:54:24 +08:00
|
|
|
|
2012-06-30 01:10:08 +08:00
|
|
|
} // namespace __sanitizer
|
|
|
|
|
|
|
|
#endif // SANITIZER_MUTEX_H
|