tsan: lock ScopedErrorReportLock around fork

Currently we don't lock ScopedErrorReportLock around fork
and it mostly works becuase tsan has own report_mtx that
is locked around fork and tsan reports.
However, sanitizer_common code prints some own reports
which are not protected by tsan's report_mtx. So it's better
to lock ScopedErrorReportLock explicitly.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D106048
This commit is contained in:
Dmitry Vyukov 2021-07-15 11:18:53 +02:00
parent b980d2f54b
commit c3c324dddf
6 changed files with 26 additions and 19 deletions

View File

@ -237,10 +237,16 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
ScopedErrorReportLock();
~ScopedErrorReportLock();
ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
static void CheckLocked();
static void Lock() ACQUIRE(mutex_);
static void Unlock() RELEASE(mutex_);
static void CheckLocked() CHECK_LOCKED(mutex_);
private:
static atomic_uintptr_t reporting_thread_;
static StaticSpinMutex mutex_;
};
extern uptr stoptheworld_tracer_pid;

View File

@ -38,7 +38,7 @@ class MUTEX StaticSpinMutex {
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
void CheckLocked() const CHECK_LOCKED {
void CheckLocked() const CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@ -84,7 +84,7 @@ class MUTEX BlockingMutex {
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned, and assumes callers to be generally
// well-behaved.
void CheckLocked() const CHECK_LOCKED;
void CheckLocked() const CHECK_LOCKED();
private:
// Solaris mutex_t has a member that requires 64-bit alignment.
@ -131,7 +131,7 @@ class MUTEX RWMutex {
(void)prev;
}
void CheckLocked() const CHECK_LOCKED {
void CheckLocked() const CHECK_LOCKED() {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}

View File

@ -250,17 +250,17 @@ void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
static atomic_uintptr_t reporting_thread = {0};
static StaticSpinMutex CommonSanitizerReportMutex;
atomic_uintptr_t ScopedErrorReportLock::reporting_thread_ = {0};
StaticSpinMutex ScopedErrorReportLock::mutex_;
ScopedErrorReportLock::ScopedErrorReportLock() {
void ScopedErrorReportLock::Lock() {
uptr current = GetThreadSelf();
for (;;) {
uptr expected = 0;
if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
if (atomic_compare_exchange_strong(&reporting_thread_, &expected, current,
memory_order_relaxed)) {
// We've claimed reporting_thread so proceed.
CommonSanitizerReportMutex.Lock();
mutex_.Lock();
return;
}
@ -282,13 +282,11 @@ ScopedErrorReportLock::ScopedErrorReportLock() {
}
}
ScopedErrorReportLock::~ScopedErrorReportLock() {
CommonSanitizerReportMutex.Unlock();
atomic_store_relaxed(&reporting_thread, 0);
void ScopedErrorReportLock::Unlock() {
mutex_.Unlock();
atomic_store_relaxed(&reporting_thread_, 0);
}
void ScopedErrorReportLock::CheckLocked() {
CommonSanitizerReportMutex.CheckLocked();
}
void ScopedErrorReportLock::CheckLocked() { mutex_.CheckLocked(); }
} // namespace __sanitizer

View File

@ -95,7 +95,7 @@ class MUTEX ThreadRegistry {
uptr GetMaxAliveThreads();
void Lock() ACQUIRE() { mtx_.Lock(); }
void CheckLocked() const CHECK_LOCKED { mtx_.CheckLocked(); }
void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
void Unlock() RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.

View File

@ -36,7 +36,7 @@
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
#define CHECK_LOCKED THREAD_ANNOTATION(assert_capability(this))
#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
#endif

View File

@ -534,6 +534,7 @@ int Finalize(ThreadState *thr) {
void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
ScopedErrorReportLock::Lock();
// Suppress all reports in the pthread_atfork callbacks.
// Reports will deadlock on the report_mtx.
// We could ignore sync operations as well,
@ -548,6 +549,7 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
}
@ -555,6 +557,7 @@ void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();