forked from OSchip/llvm-project
asan/tsan: move blocking mutex from asan to sanitizer_common
llvm-svn: 172380
This commit is contained in:
parent
0d2c29e807
commit
f22982bf0a
|
@ -29,7 +29,6 @@
|
|||
#if ASAN_ALLOCATOR_VERSION == 1
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_lock.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_report.h"
|
||||
|
@ -37,6 +36,7 @@
|
|||
#include "asan_thread_registry.h"
|
||||
#include "sanitizer/asan_interface.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
|
@ -229,7 +229,7 @@ class MallocInfo {
|
|||
AsanChunk *m = 0;
|
||||
AsanChunk **fl = &free_lists_[size_class];
|
||||
{
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
for (uptr i = 0; i < n_chunks; i++) {
|
||||
if (!(*fl)) {
|
||||
*fl = GetNewChunks(size_class);
|
||||
|
@ -247,7 +247,7 @@ class MallocInfo {
|
|||
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
|
||||
bool eat_free_lists) {
|
||||
CHECK(flags()->quarantine_size > 0);
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
AsanChunkFifoList *q = &x->quarantine_;
|
||||
if (q->size() > 0) {
|
||||
quarantine_.PushList(q);
|
||||
|
@ -271,18 +271,18 @@ class MallocInfo {
|
|||
}
|
||||
|
||||
void BypassThreadLocalQuarantine(AsanChunk *chunk) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
quarantine_.Push(chunk);
|
||||
}
|
||||
|
||||
AsanChunk *FindChunkByAddr(uptr addr) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
return FindChunkByAddrUnlocked(addr);
|
||||
}
|
||||
|
||||
uptr AllocationSize(uptr ptr) {
|
||||
if (!ptr) return 0;
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
|
||||
// Make sure this is our chunk and |ptr| actually points to the beginning
|
||||
// of the allocated memory.
|
||||
|
@ -305,7 +305,7 @@ class MallocInfo {
|
|||
}
|
||||
|
||||
void PrintStatus() {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
uptr malloced = 0;
|
||||
|
||||
Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
|
||||
|
@ -323,7 +323,7 @@ class MallocInfo {
|
|||
}
|
||||
|
||||
PageGroup *FindPageGroup(uptr addr) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
return FindPageGroupUnlocked(addr);
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,7 @@ class MallocInfo {
|
|||
|
||||
AsanChunk *free_lists_[kNumberOfSizeClasses];
|
||||
AsanChunkFifoList quarantine_;
|
||||
AsanLock mu_;
|
||||
BlockingMutex mu_;
|
||||
|
||||
PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
|
||||
atomic_uint32_t n_page_groups_;
|
||||
|
|
|
@ -13,13 +13,13 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_lock.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer/asan_interface.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
|
@ -30,7 +30,7 @@ struct ListOfGlobals {
|
|||
ListOfGlobals *next;
|
||||
};
|
||||
|
||||
static AsanLock mu_for_globals(LINKER_INITIALIZED);
|
||||
static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
|
||||
static LowLevelAllocator allocator_for_globals;
|
||||
static ListOfGlobals *list_of_all_globals;
|
||||
static ListOfGlobals *list_of_dynamic_init_globals;
|
||||
|
@ -62,7 +62,7 @@ static uptr GetAlignedSize(uptr size) {
|
|||
|
||||
bool DescribeAddressIfGlobal(uptr addr) {
|
||||
if (!flags()->report_globals) return false;
|
||||
ScopedLock lock(&mu_for_globals);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
bool res = false;
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
const Global &g = *l->g;
|
||||
|
@ -146,7 +146,7 @@ using namespace __asan; // NOLINT
|
|||
void __asan_register_global(uptr addr, uptr size,
|
||||
const char *name) {
|
||||
if (!flags()->report_globals) return;
|
||||
ScopedLock lock(&mu_for_globals);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
Global *g = (Global *)allocator_for_globals.Allocate(sizeof(Global));
|
||||
g->beg = addr;
|
||||
g->size = size;
|
||||
|
@ -158,7 +158,7 @@ void __asan_register_global(uptr addr, uptr size,
|
|||
// Register an array of globals.
|
||||
void __asan_register_globals(__asan_global *globals, uptr n) {
|
||||
if (!flags()->report_globals) return;
|
||||
ScopedLock lock(&mu_for_globals);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
RegisterGlobal(&globals[i]);
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
|
|||
// We must do this when a shared objects gets dlclosed.
|
||||
void __asan_unregister_globals(__asan_global *globals, uptr n) {
|
||||
if (!flags()->report_globals) return;
|
||||
ScopedLock lock(&mu_for_globals);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
UnregisterGlobal(&globals[i]);
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
|
|||
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
|
||||
if (!flags()->check_initialization_order) return;
|
||||
CHECK(list_of_dynamic_init_globals);
|
||||
ScopedLock lock(&mu_for_globals);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
bool from_current_tu = false;
|
||||
// The list looks like:
|
||||
// a => ... => b => last_addr => ... => first_addr => c => ...
|
||||
|
@ -202,7 +202,7 @@ void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
|
|||
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
|
||||
void __asan_after_dynamic_init() {
|
||||
if (!flags()->check_initialization_order) return;
|
||||
ScopedLock lock(&mu_for_globals);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next)
|
||||
UnpoisonGlobal(l->g);
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_lock.h"
|
||||
#include "asan_thread.h"
|
||||
#include "asan_thread_registry.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
@ -102,26 +101,6 @@ void AsanPlatformThreadInit() {
|
|||
// Nothing here for now.
|
||||
}
|
||||
|
||||
AsanLock::AsanLock(LinkerInitialized) {
|
||||
// We assume that pthread_mutex_t initialized to all zeroes is a valid
|
||||
// unlocked mutex. We can not use PTHREAD_MUTEX_INITIALIZER as it triggers
|
||||
// a gcc warning:
|
||||
// extended initializer lists only available with -std=c++0x or -std=gnu++0x
|
||||
}
|
||||
|
||||
void AsanLock::Lock() {
|
||||
CHECK(sizeof(pthread_mutex_t) <= sizeof(opaque_storage_));
|
||||
pthread_mutex_lock((pthread_mutex_t*)&opaque_storage_);
|
||||
CHECK(!owner_);
|
||||
owner_ = (uptr)pthread_self();
|
||||
}
|
||||
|
||||
void AsanLock::Unlock() {
|
||||
CHECK(owner_ == (uptr)pthread_self());
|
||||
owner_ = 0;
|
||||
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
|
||||
}
|
||||
|
||||
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
|
||||
#if defined(__arm__) || \
|
||||
defined(__powerpc__) || defined(__powerpc64__) || \
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
//===-- asan_lock.h ---------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// A wrapper for a simple lock.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_LOCK_H
|
||||
#define ASAN_LOCK_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
#include "asan_internal.h"
|
||||
|
||||
// The locks in ASan are global objects and they are never destroyed to avoid
|
||||
// at-exit races (that is, a lock is being used by other threads while the main
|
||||
// thread is doing atexit destructors).
|
||||
// We define the class using opaque storage to avoid including system headers.
|
||||
|
||||
namespace __asan {
|
||||
|
||||
class AsanLock {
|
||||
public:
|
||||
explicit AsanLock(LinkerInitialized);
|
||||
void Lock();
|
||||
void Unlock();
|
||||
bool IsLocked() { return owner_ != 0; }
|
||||
private:
|
||||
uptr opaque_storage_[10];
|
||||
uptr owner_; // for debugging and for malloc_introspection_t interface
|
||||
};
|
||||
|
||||
typedef GenericScopedLock<AsanLock> ScopedLock;
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_LOCK_H
|
|
@ -141,25 +141,6 @@ void AsanPlatformThreadInit() {
|
|||
}
|
||||
}
|
||||
|
||||
AsanLock::AsanLock(LinkerInitialized) {
|
||||
// We assume that OS_SPINLOCK_INIT is zero
|
||||
}
|
||||
|
||||
void AsanLock::Lock() {
|
||||
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
|
||||
CHECK(OS_SPINLOCK_INIT == 0);
|
||||
CHECK(owner_ != (uptr)pthread_self());
|
||||
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
|
||||
CHECK(!owner_);
|
||||
owner_ = (uptr)pthread_self();
|
||||
}
|
||||
|
||||
void AsanLock::Unlock() {
|
||||
CHECK(owner_ == (uptr)pthread_self());
|
||||
owner_ = 0;
|
||||
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
|
||||
}
|
||||
|
||||
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
|
||||
(void)fast;
|
||||
stack->size = 0;
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include "asan_allocator.h"
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_lock.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_lock.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_thread_registry.h"
|
||||
#include "sanitizer/asan_interface.h"
|
||||
|
@ -55,13 +54,13 @@ void AsanStats::Print() {
|
|||
malloc_large, malloc_small_slow);
|
||||
}
|
||||
|
||||
static AsanLock print_lock(LINKER_INITIALIZED);
|
||||
static BlockingMutex print_lock(LINKER_INITIALIZED);
|
||||
|
||||
static void PrintAccumulatedStats() {
|
||||
AsanStats stats;
|
||||
asanThreadRegistry().GetAccumulatedStats(&stats);
|
||||
// Use lock to keep reports from mixing up.
|
||||
ScopedLock lock(&print_lock);
|
||||
BlockingMutexLock lock(&print_lock);
|
||||
stats.Print();
|
||||
StackDepotStats *stack_depot_stats = StackDepotGetStats();
|
||||
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
|
||||
|
|
|
@ -44,7 +44,7 @@ void AsanThreadRegistry::Init() {
|
|||
}
|
||||
|
||||
void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
u32 tid = n_threads_;
|
||||
n_threads_++;
|
||||
CHECK(n_threads_ < kMaxNumberOfThreads);
|
||||
|
@ -56,7 +56,7 @@ void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
|
|||
}
|
||||
|
||||
void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
FlushToAccumulatedStatsUnlocked(&thread->stats());
|
||||
AsanThreadSummary *summary = thread->summary();
|
||||
CHECK(summary);
|
||||
|
@ -105,13 +105,13 @@ AsanStats &AsanThreadRegistry::GetCurrentThreadStats() {
|
|||
}
|
||||
|
||||
void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
UpdateAccumulatedStatsUnlocked();
|
||||
internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_));
|
||||
}
|
||||
|
||||
uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
UpdateAccumulatedStatsUnlocked();
|
||||
uptr malloced = accumulated_stats_.malloced;
|
||||
uptr freed = accumulated_stats_.freed;
|
||||
|
@ -121,13 +121,13 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
|
|||
}
|
||||
|
||||
uptr AsanThreadRegistry::GetHeapSize() {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
UpdateAccumulatedStatsUnlocked();
|
||||
return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
|
||||
}
|
||||
|
||||
uptr AsanThreadRegistry::GetFreeBytes() {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
UpdateAccumulatedStatsUnlocked();
|
||||
uptr total_free = accumulated_stats_.mmaped
|
||||
- accumulated_stats_.munmaped
|
||||
|
@ -143,7 +143,7 @@ uptr AsanThreadRegistry::GetFreeBytes() {
|
|||
// Return several stats counters with a single call to
|
||||
// UpdateAccumulatedStatsUnlocked().
|
||||
void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
UpdateAccumulatedStatsUnlocked();
|
||||
malloc_stats->blocks_in_use = accumulated_stats_.mallocs;
|
||||
malloc_stats->size_in_use = accumulated_stats_.malloced;
|
||||
|
@ -158,7 +158,7 @@ AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) {
|
|||
}
|
||||
|
||||
AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) {
|
||||
ScopedLock lock(&mu_);
|
||||
BlockingMutexLock lock(&mu_);
|
||||
for (u32 tid = 0; tid < n_threads_; tid++) {
|
||||
AsanThread *t = thread_summaries_[tid]->thread();
|
||||
if (!t || !(t->fake_stack().StackSize())) continue;
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
#ifndef ASAN_THREAD_REGISTRY_H
|
||||
#define ASAN_THREAD_REGISTRY_H
|
||||
|
||||
#include "asan_lock.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
|
@ -73,7 +73,7 @@ class AsanThreadRegistry {
|
|||
// per-thread AsanStats.
|
||||
uptr max_malloced_memory_;
|
||||
u32 n_threads_;
|
||||
AsanLock mu_;
|
||||
BlockingMutex mu_;
|
||||
bool inited_;
|
||||
};
|
||||
|
||||
|
|
|
@ -19,15 +19,14 @@
|
|||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_lock.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
|
||||
static AsanLock dbghelp_lock(LINKER_INITIALIZED);
|
||||
static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
|
||||
static bool dbghelp_initialized = false;
|
||||
#pragma comment(lib, "dbghelp.lib")
|
||||
|
||||
|
@ -55,42 +54,6 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
|
|||
stack->trace[i] = (uptr)tmp[i + offset];
|
||||
}
|
||||
|
||||
// ---------------------- AsanLock ---------------- {{{1
|
||||
enum LockState {
|
||||
LOCK_UNINITIALIZED = 0,
|
||||
LOCK_READY = -1,
|
||||
};
|
||||
|
||||
AsanLock::AsanLock(LinkerInitialized li) {
|
||||
// FIXME: see comments in AsanLock::Lock() for the details.
|
||||
CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
|
||||
|
||||
CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
|
||||
InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
|
||||
owner_ = LOCK_READY;
|
||||
}
|
||||
|
||||
void AsanLock::Lock() {
|
||||
if (owner_ == LOCK_UNINITIALIZED) {
|
||||
// FIXME: hm, global AsanLock objects are not initialized?!?
|
||||
// This might be a side effect of the clang+cl+link Frankenbuild...
|
||||
new(this) AsanLock((LinkerInitialized)(LINKER_INITIALIZED + 1));
|
||||
|
||||
// FIXME: If it turns out the linker doesn't invoke our
|
||||
// constructors, we should probably manually Lock/Unlock all the global
|
||||
// locks while we're starting in one thread to avoid double-init races.
|
||||
}
|
||||
EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
|
||||
CHECK(owner_ == LOCK_READY);
|
||||
owner_ = GetThreadSelf();
|
||||
}
|
||||
|
||||
void AsanLock::Unlock() {
|
||||
CHECK(owner_ == GetThreadSelf());
|
||||
owner_ = LOCK_READY;
|
||||
LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
|
||||
}
|
||||
|
||||
// ---------------------- TSD ---------------- {{{1
|
||||
static bool tsd_key_inited = false;
|
||||
|
||||
|
@ -151,7 +114,7 @@ using namespace __asan; // NOLINT
|
|||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
|
||||
bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
|
||||
ScopedLock lock(&dbghelp_lock);
|
||||
BlockingMutexLock lock(&dbghelp_lock);
|
||||
if (!dbghelp_initialized) {
|
||||
SymSetOptions(SYMOPT_DEFERRED_LOADS |
|
||||
SYMOPT_UNDNAME |
|
||||
|
|
|
@ -234,7 +234,7 @@ class SizeClassAllocator64 {
|
|||
Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
SpinMutexLock l(®ion->mutex);
|
||||
BlockingMutexLock l(®ion->mutex);
|
||||
if (region->free_list.empty())
|
||||
PopulateFreeList(c, class_id, region);
|
||||
CHECK(!region->free_list.empty());
|
||||
|
@ -246,7 +246,7 @@ class SizeClassAllocator64 {
|
|||
|
||||
void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
SpinMutexLock l(®ion->mutex);
|
||||
BlockingMutexLock l(®ion->mutex);
|
||||
region->free_list.push_front(b);
|
||||
region->n_freed++;
|
||||
}
|
||||
|
@ -343,7 +343,7 @@ class SizeClassAllocator64 {
|
|||
static const uptr kMetaMapSize = 1 << 16;
|
||||
|
||||
struct RegionInfo {
|
||||
SpinMutex mutex;
|
||||
BlockingMutex mutex;
|
||||
IntrusiveList<Batch> free_list;
|
||||
uptr allocated_user; // Bytes allocated for user memory.
|
||||
uptr allocated_meta; // Bytes allocated for metadata.
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <unwind.h>
|
||||
#include <errno.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <linux/futex.h>
|
||||
|
||||
// Are we using 32-bit or 64-bit syscalls?
|
||||
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
|
||||
|
@ -436,6 +437,34 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
|
|||
|
||||
#endif // #ifndef SANITIZER_GO
|
||||
|
||||
enum MutexState {
|
||||
MtxUnlocked = 0,
|
||||
MtxLocked = 1,
|
||||
MtxSleeping = 2
|
||||
};
|
||||
|
||||
BlockingMutex::BlockingMutex(LinkerInitialized) {
|
||||
}
|
||||
|
||||
void BlockingMutex::Lock() {
|
||||
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
||||
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
|
||||
return;
|
||||
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
|
||||
syscall(__NR_futex, m, FUTEX_WAIT_PRIVATE, MtxSleeping, 0, 0, 0);
|
||||
}
|
||||
|
||||
void BlockingMutex::Unlock() {
|
||||
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
||||
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
|
||||
if (v == MtxUnlocked) {
|
||||
Printf("FATAL: unlock of unlocked mutex\n");
|
||||
Die();
|
||||
}
|
||||
if (v == MtxSleeping)
|
||||
syscall(__NR_futex, m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // __linux__
|
||||
|
|
|
@ -267,6 +267,25 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
|
|||
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
|
||||
}
|
||||
|
||||
BlockingMutex::BlockingMutex(LinkerInitialized) {
|
||||
// We assume that OS_SPINLOCK_INIT is zero
|
||||
}
|
||||
|
||||
void BlockingMutex::Lock() {
|
||||
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
|
||||
CHECK(OS_SPINLOCK_INIT == 0);
|
||||
CHECK(owner_ != (uptr)pthread_self());
|
||||
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
|
||||
CHECK(!owner_);
|
||||
owner_ = (uptr)pthread_self();
|
||||
}
|
||||
|
||||
void BlockingMutex::Unlock() {
|
||||
CHECK(owner_ == (uptr)pthread_self());
|
||||
owner_ = 0;
|
||||
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // __APPLE__
|
||||
|
|
|
@ -67,6 +67,15 @@ class SpinMutex : public StaticSpinMutex {
|
|||
void operator=(const SpinMutex&);
|
||||
};
|
||||
|
||||
class BlockingMutex {
|
||||
public:
|
||||
explicit BlockingMutex(LinkerInitialized);
|
||||
void Lock();
|
||||
void Unlock();
|
||||
private:
|
||||
uptr opaque_storage_[10];
|
||||
};
|
||||
|
||||
template<typename MutexType>
|
||||
class GenericScopedLock {
|
||||
public:
|
||||
|
@ -106,6 +115,7 @@ class GenericScopedReadLock {
|
|||
};
|
||||
|
||||
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
|
||||
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
|
@ -226,6 +227,42 @@ int internal_sched_yield() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// ---------------------- BlockingMutex ---------------- {{{1
|
||||
enum LockState {
|
||||
LOCK_UNINITIALIZED = 0,
|
||||
LOCK_READY = -1,
|
||||
};
|
||||
|
||||
BlockingMutex::BlockingMutex(LinkerInitialized li) {
|
||||
// FIXME: see comments in BlockingMutex::Lock() for the details.
|
||||
CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
|
||||
|
||||
CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
|
||||
InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
|
||||
owner_ = LOCK_READY;
|
||||
}
|
||||
|
||||
void BlockingMutex::Lock() {
|
||||
if (owner_ == LOCK_UNINITIALIZED) {
|
||||
// FIXME: hm, global BlockingMutex objects are not initialized?!?
|
||||
// This might be a side effect of the clang+cl+link Frankenbuild...
|
||||
new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
|
||||
|
||||
// FIXME: If it turns out the linker doesn't invoke our
|
||||
// constructors, we should probably manually Lock/Unlock all the global
|
||||
// locks while we're starting in one thread to avoid double-init races.
|
||||
}
|
||||
EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
|
||||
CHECK(owner_ == LOCK_READY);
|
||||
owner_ = GetThreadSelf();
|
||||
}
|
||||
|
||||
void BlockingMutex::Unlock() {
|
||||
CHECK(owner_ == GetThreadSelf());
|
||||
owner_ = LOCK_READY;
|
||||
LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // _WIN32
|
||||
|
|
|
@ -7,6 +7,7 @@ set(SANITIZER_UNITTESTS
|
|||
sanitizer_flags_test.cc
|
||||
sanitizer_libc_test.cc
|
||||
sanitizer_list_test.cc
|
||||
sanitizer_mutex_test.cc
|
||||
sanitizer_printf_test.cc
|
||||
sanitizer_stackdepot_test.cc
|
||||
sanitizer_test_main.cc
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
DEBUG=0
|
||||
LDFLAGS=-ldl -lpthread -pie
|
||||
CXXFLAGS = -fPIE -g -Wall -Werror -DTSAN_DEBUG=$(DEBUG)
|
||||
CXXFLAGS = -fPIE -g -Wall -Werror -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
|
||||
# Silence warnings that Clang produces for gtest code.
|
||||
# Use -Wno-attributes so that gcc doesn't complain about unknown warning types.
|
||||
CXXFLAGS += -Wno-attributes
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG)
|
||||
CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
|
||||
ifeq ($(DEBUG), 0)
|
||||
CXXFLAGS += -O3
|
||||
endif
|
||||
|
|
Loading…
Reference in New Issue