2012-06-30 01:10:08 +08:00
|
|
|
//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-06-30 01:10:08 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-06-30 01:32:18 +08:00
|
|
|
//
|
|
|
|
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
#ifndef SANITIZER_MUTEX_H
|
|
|
|
#define SANITIZER_MUTEX_H
|
|
|
|
|
|
|
|
#include "sanitizer_atomic.h"
|
2012-07-02 15:09:21 +08:00
|
|
|
#include "sanitizer_internal_defs.h"
|
|
|
|
#include "sanitizer_libc.h"
|
2021-07-10 01:29:41 +08:00
|
|
|
#include "sanitizer_thread_safety.h"
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX StaticSpinMutex {
|
2012-06-30 01:32:18 +08:00
|
|
|
public:
|
2012-08-30 18:02:48 +08:00
|
|
|
void Init() {
|
2012-06-30 01:32:18 +08:00
|
|
|
atomic_store(&state_, 0, memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Lock() ACQUIRE() {
|
2013-01-11 19:03:35 +08:00
|
|
|
if (TryLock())
|
2012-07-02 15:09:21 +08:00
|
|
|
return;
|
|
|
|
LockSlow();
|
2012-06-30 01:32:18 +08:00
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
bool TryLock() TRY_ACQUIRE(true) {
|
2013-01-11 19:03:35 +08:00
|
|
|
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
|
2012-06-30 01:32:18 +08:00
|
|
|
|
2021-07-15 17:18:53 +08:00
|
|
|
void CheckLocked() const CHECK_LOCKED() {
|
2013-10-17 19:18:11 +08:00
|
|
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
|
|
|
|
}
|
|
|
|
|
2012-06-30 01:32:18 +08:00
|
|
|
private:
|
|
|
|
atomic_uint8_t state_;
|
|
|
|
|
2012-07-02 15:09:21 +08:00
|
|
|
void NOINLINE LockSlow() {
|
|
|
|
for (int i = 0;; i++) {
|
|
|
|
if (i < 10)
|
|
|
|
proc_yield(10);
|
|
|
|
else
|
|
|
|
internal_sched_yield();
|
|
|
|
if (atomic_load(&state_, memory_order_relaxed) == 0
|
|
|
|
&& atomic_exchange(&state_, 1, memory_order_acquire) == 0)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2012-08-30 18:02:48 +08:00
|
|
|
};
|
2012-07-02 15:09:21 +08:00
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX SpinMutex : public StaticSpinMutex {
|
2012-08-30 18:02:48 +08:00
|
|
|
public:
|
|
|
|
SpinMutex() {
|
|
|
|
Init();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2021-07-01 11:48:23 +08:00
|
|
|
SpinMutex(const SpinMutex &) = delete;
|
|
|
|
void operator=(const SpinMutex &) = delete;
|
2012-06-30 01:32:18 +08:00
|
|
|
};
|
|
|
|
|
2021-07-15 23:15:48 +08:00
|
|
|
// Semaphore provides an OS-dependent way to park/unpark threads.
|
|
|
|
// The last thread returned from Wait can destroy the object
|
|
|
|
// (destruction-safety).
|
|
|
|
class Semaphore {
|
|
|
|
public:
|
|
|
|
constexpr Semaphore() {}
|
|
|
|
Semaphore(const Semaphore &) = delete;
|
|
|
|
void operator=(const Semaphore &) = delete;
|
|
|
|
|
|
|
|
void Wait();
|
|
|
|
void Post(u32 count = 1);
|
|
|
|
|
|
|
|
private:
|
|
|
|
atomic_uint32_t state_ = {0};
|
|
|
|
};
|
|
|
|
|
|
|
|
void FutexWait(atomic_uint32_t *p, u32 cmp);
|
|
|
|
void FutexWake(atomic_uint32_t *p, u32 count);
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX BlockingMutex {
|
2013-01-14 15:51:39 +08:00
|
|
|
public:
|
2015-01-30 14:20:43 +08:00
|
|
|
explicit constexpr BlockingMutex(LinkerInitialized)
|
2018-07-31 08:08:26 +08:00
|
|
|
: opaque_storage_ {0, }, owner_ {0} {}
|
2013-03-14 21:30:56 +08:00
|
|
|
BlockingMutex();
|
2021-07-10 01:29:41 +08:00
|
|
|
void Lock() ACQUIRE();
|
|
|
|
void Unlock() RELEASE();
|
2017-02-10 03:29:11 +08:00
|
|
|
|
|
|
|
// This function does not guarantee an explicit check that the calling thread
|
|
|
|
// is the thread which owns the mutex. This behavior, while more strictly
|
|
|
|
// correct, causes problems in cases like StopTheWorld, where a parent thread
|
|
|
|
// owns the mutex but a child checks that it is locked. Rather than
|
|
|
|
// maintaining complex state to work around those situations, the check only
|
|
|
|
// checks that the mutex is owned, and assumes callers to be generally
|
|
|
|
// well-behaved.
|
2021-07-15 17:18:53 +08:00
|
|
|
void CheckLocked() const CHECK_LOCKED();
|
[Sanitizers] Basic sanitizer Solaris support (PR 33274)
Summary:
This is the first mostly working version of the Sanitizer port to 32-bit Solaris/x86.
It is currently based on Solaris 11.4 Beta.
This part was initially developed inside libsanitizer in the GCC tree and should apply to
both. Subsequent parts will address changes to clang, the compiler-rt build system
and testsuite.
I'm not yet sure what the right patch granularity is: if it's profitable to split the patch
up, I'd like to get guidance on how to do so.
Most of the changes are probably straightforward with a few exceptions:
* The Solaris syscall interface isn't stable, undocumented and can change within an
OS release. The stable interface is the libc interface, which I'm using here, if possible
using the internal _-prefixed names.
* While the patch primarily target 32-bit x86, I've left a few sparc changes in. They
cannot currently be used with clang due to a backend limitation, but have worked
fine inside the gcc tree.
* Some functions (e.g. largefile versions of functions like open64) only exist in 32-bit
Solaris, so I've introduced a separate SANITIZER_SOLARIS32 to check for that.
The patch (with the subsequent ones to be submitted shortly) was tested
on i386-pc-solaris2.11. Only a few failures remain, some of them analyzed, some
still TBD:
AddressSanitizer-i386-sunos :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos :: TestCases/malloc-no-intercept.c
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/start-deactivated.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/default_options.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/malloc-no-intercept.c
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/MemoryMappingLayout.DumpListOfModules
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/SanitizerCommon.PthreadDestructorIterations
Maybe this is good enough the get the ball rolling.
Reviewers: kcc, alekseyshl
Reviewed By: alekseyshl
Subscribers: srhines, jyknight, kubamracek, krytarowski, fedor.sergeev, llvm-commits, #sanitizers
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D40898
llvm-svn: 320740
2017-12-15 04:14:29 +08:00
|
|
|
|
2013-01-14 15:51:39 +08:00
|
|
|
private:
|
[Sanitizers] Basic sanitizer Solaris support (PR 33274)
Summary:
This is the first mostly working version of the Sanitizer port to 32-bit Solaris/x86.
It is currently based on Solaris 11.4 Beta.
This part was initially developed inside libsanitizer in the GCC tree and should apply to
both. Subsequent parts will address changes to clang, the compiler-rt build system
and testsuite.
I'm not yet sure what the right patch granularity is: if it's profitable to split the patch
up, I'd like to get guidance on how to do so.
Most of the changes are probably straightforward with a few exceptions:
* The Solaris syscall interface isn't stable, undocumented and can change within an
OS release. The stable interface is the libc interface, which I'm using here, if possible
using the internal _-prefixed names.
* While the patch primarily target 32-bit x86, I've left a few sparc changes in. They
cannot currently be used with clang due to a backend limitation, but have worked
fine inside the gcc tree.
* Some functions (e.g. largefile versions of functions like open64) only exist in 32-bit
Solaris, so I've introduced a separate SANITIZER_SOLARIS32 to check for that.
The patch (with the subsequent ones to be submitted shortly) was tested
on i386-pc-solaris2.11. Only a few failures remain, some of them analyzed, some
still TBD:
AddressSanitizer-i386-sunos :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos :: TestCases/malloc-no-intercept.c
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/concurrent_overflow.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/Posix/start-deactivated.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/default_options.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/init-order-atexit.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/log-path_test.cc
AddressSanitizer-i386-sunos-dynamic :: TestCases/malloc-no-intercept.c
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/MemoryMappingLayout.DumpListOfModules
SanitizerCommon-Unit :: ./Sanitizer-i386-Test/SanitizerCommon.PthreadDestructorIterations
Maybe this is good enough the get the ball rolling.
Reviewers: kcc, alekseyshl
Reviewed By: alekseyshl
Subscribers: srhines, jyknight, kubamracek, krytarowski, fedor.sergeev, llvm-commits, #sanitizers
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D40898
llvm-svn: 320740
2017-12-15 04:14:29 +08:00
|
|
|
// Solaris mutex_t has a member that requires 64-bit alignment.
|
|
|
|
ALIGNED(8) uptr opaque_storage_[10];
|
2013-01-14 16:01:58 +08:00
|
|
|
uptr owner_; // for debugging
|
2013-01-14 15:51:39 +08:00
|
|
|
};
|
|
|
|
|
2014-03-18 16:30:14 +08:00
|
|
|
// Reader-writer spin mutex.
|
2021-07-10 01:29:41 +08:00
|
|
|
class MUTEX RWMutex {
|
2014-03-18 16:30:14 +08:00
|
|
|
public:
|
|
|
|
RWMutex() {
|
|
|
|
atomic_store(&state_, kUnlocked, memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
~RWMutex() {
|
|
|
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Lock() ACQUIRE() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 cmp = kUnlocked;
|
|
|
|
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
|
|
|
|
memory_order_acquire))
|
|
|
|
return;
|
|
|
|
LockSlow();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void Unlock() RELEASE() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
|
|
|
|
DCHECK_NE(prev & kWriteLock, 0);
|
|
|
|
(void)prev;
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void ReadLock() ACQUIRE_SHARED() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
|
|
|
|
if ((prev & kWriteLock) == 0)
|
|
|
|
return;
|
|
|
|
ReadLockSlow();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
void ReadUnlock() RELEASE_SHARED() {
|
2014-03-18 16:30:14 +08:00
|
|
|
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
|
|
|
|
DCHECK_EQ(prev & kWriteLock, 0);
|
|
|
|
DCHECK_GT(prev & ~kWriteLock, 0);
|
|
|
|
(void)prev;
|
|
|
|
}
|
|
|
|
|
2021-07-15 17:18:53 +08:00
|
|
|
void CheckLocked() const CHECK_LOCKED() {
|
2014-03-18 16:30:14 +08:00
|
|
|
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
atomic_uint32_t state_;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
kUnlocked = 0,
|
|
|
|
kWriteLock = 1,
|
|
|
|
kReadLock = 2
|
|
|
|
};
|
|
|
|
|
|
|
|
void NOINLINE LockSlow() {
|
|
|
|
for (int i = 0;; i++) {
|
|
|
|
if (i < 10)
|
|
|
|
proc_yield(10);
|
|
|
|
else
|
|
|
|
internal_sched_yield();
|
|
|
|
u32 cmp = atomic_load(&state_, memory_order_relaxed);
|
|
|
|
if (cmp == kUnlocked &&
|
|
|
|
atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
|
|
|
|
memory_order_acquire))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE ReadLockSlow() {
|
|
|
|
for (int i = 0;; i++) {
|
|
|
|
if (i < 10)
|
|
|
|
proc_yield(10);
|
|
|
|
else
|
|
|
|
internal_sched_yield();
|
|
|
|
u32 prev = atomic_load(&state_, memory_order_acquire);
|
|
|
|
if ((prev & kWriteLock) == 0)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-01 11:48:23 +08:00
|
|
|
RWMutex(const RWMutex &) = delete;
|
|
|
|
void operator=(const RWMutex &) = delete;
|
2014-03-18 16:30:14 +08:00
|
|
|
};
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
template <typename MutexType>
|
|
|
|
class SCOPED_LOCK GenericScopedLock {
|
2012-06-30 01:10:08 +08:00
|
|
|
public:
|
2021-07-10 01:29:41 +08:00
|
|
|
explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
2012-06-30 01:10:08 +08:00
|
|
|
mu_->Lock();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
~GenericScopedLock() RELEASE() { mu_->Unlock(); }
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
MutexType *mu_;
|
|
|
|
|
2021-07-01 11:48:23 +08:00
|
|
|
GenericScopedLock(const GenericScopedLock &) = delete;
|
|
|
|
void operator=(const GenericScopedLock &) = delete;
|
2012-06-30 01:10:08 +08:00
|
|
|
};
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
template <typename MutexType>
|
|
|
|
class SCOPED_LOCK GenericScopedReadLock {
|
2012-06-30 01:10:08 +08:00
|
|
|
public:
|
2021-07-10 01:29:41 +08:00
|
|
|
explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
2012-06-30 01:10:08 +08:00
|
|
|
mu_->ReadLock();
|
|
|
|
}
|
|
|
|
|
2021-07-10 01:29:41 +08:00
|
|
|
~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
|
2012-06-30 01:10:08 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
MutexType *mu_;
|
|
|
|
|
2021-07-01 11:48:23 +08:00
|
|
|
GenericScopedReadLock(const GenericScopedReadLock &) = delete;
|
|
|
|
void operator=(const GenericScopedReadLock &) = delete;
|
2012-06-30 01:10:08 +08:00
|
|
|
};
|
|
|
|
|
2012-08-30 18:02:48 +08:00
|
|
|
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
|
2013-01-14 15:51:39 +08:00
|
|
|
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
|
2014-03-18 16:30:14 +08:00
|
|
|
typedef GenericScopedLock<RWMutex> RWMutexLock;
|
|
|
|
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
|
2012-07-02 14:54:24 +08:00
|
|
|
|
2012-06-30 01:10:08 +08:00
|
|
|
} // namespace __sanitizer
|
|
|
|
|
|
|
|
#endif // SANITIZER_MUTEX_H
|