llvm-project/compiler-rt/lib/asan/asan_thread.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

560 lines
19 KiB
C++
Raw Normal View History

//===-- asan_thread.cpp ---------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Thread-related code.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan/lsan_common.h"
namespace __asan {
// AsanThreadContext implementation.
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack)
stack_id = StackDepotPut(*args->stack);
thread = args->thread;
thread->set_context(this);
}
void AsanThreadContext::OnFinished() {
// Drop the link to the AsanThread object.
thread = nullptr;
}
// MIPS requires aligned address
static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadRegistry *asan_thread_registry;
static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
BlockingMutexLock lock(&mu_for_thread_context);
return new(allocator_for_thread_context) AsanThreadContext(tid);
}
ThreadRegistry &asanThreadRegistry() {
static bool initialized;
// Don't worry about thread_safety - this should be called when there is
// a single thread.
if (!initialized) {
// Never reuse ASan threads: we store pointer to AsanThreadContext
// in TSD and can't reliably tell when no more TSD destructors will
// be called. It would be wrong to reuse AsanThreadContext for another
// thread before all TSD destructors will be called for it.
asan_thread_registry =
new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
initialized = true;
}
return *asan_thread_registry;
}
AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
return static_cast<AsanThreadContext *>(
asanThreadRegistry().GetThreadLocked(tid));
}
// AsanThread implementation.
AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
u32 parent_tid, StackTrace *stack,
bool detached) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
parent_tid, &args);
return thread;
}
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
int tid = this->tid();
VReport(1, "T%d exited\n", tid);
bool was_running =
(asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning);
if (was_running) {
if (AsanThread *thread = GetCurrentThread())
CHECK_EQ(this, thread);
malloc_storage().CommitBack();
if (common_flags()->use_sigaltstack)
UnsetAlternateSignalStack();
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStackAndTLS();
DeleteFakeStack(tid);
} else {
CHECK_NE(this, GetCurrentThread());
}
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
if (was_running)
DTLS_Destroy();
}
void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
uptr size) {
if (atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: starting fiber switch while in fiber switch\n");
Die();
}
next_stack_bottom_ = bottom;
next_stack_top_ = bottom + size;
atomic_store(&stack_switching_, 1, memory_order_release);
FakeStack *current_fake_stack = fake_stack_;
if (fake_stack_save)
*fake_stack_save = fake_stack_;
fake_stack_ = nullptr;
SetTLSFakeStack(nullptr);
// if fake_stack_save is null, the fiber will die, delete the fakestack
if (!fake_stack_save && current_fake_stack)
current_fake_stack->Destroy(this->tid());
}
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
uptr *bottom_old,
uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
Die();
}
if (fake_stack_save) {
SetTLSFakeStack(fake_stack_save);
fake_stack_ = fake_stack_save;
}
if (bottom_old)
*bottom_old = stack_bottom_;
if (size_old)
*size_old = stack_top_ - stack_bottom_;
stack_bottom_ = next_stack_bottom_;
stack_top_ = next_stack_top_;
atomic_store(&stack_switching_, 0, memory_order_release);
next_stack_top_ = 0;
next_stack_bottom_ = 0;
}
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
if (!atomic_load(&stack_switching_, memory_order_acquire)) {
// Make sure the stack bounds are fully initialized.
if (stack_bottom_ >= stack_top_) return {0, 0};
return {stack_bottom_, stack_top_};
}
char local;
const uptr cur_stack = (uptr)&local;
// Note: need to check next stack first, because FinishSwitchFiber
// may be in process of overwriting stack_top_/bottom_. But in such case
// we are already on the next stack.
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
return {next_stack_bottom_, next_stack_top_};
return {stack_bottom_, stack_top_};
}
uptr AsanThread::stack_top() {
return GetStackBounds().top;
}
uptr AsanThread::stack_bottom() {
return GetStackBounds().bottom;
}
uptr AsanThread::stack_size() {
const auto bounds = GetStackBounds();
return bounds.top - bounds.bottom;
}
// We want to create the FakeStack lazily on the first use, but not earlier
// than the stack size is known and the procedure has to be async-signal safe.
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
uptr stack_size = this->stack_size();
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
return nullptr;
uptr old_val = 0;
// fake_stack_ has 3 states:
// 0 -- not initialized
// 1 -- being initialized
// ptr -- initialized
// This CAS checks if the state was 0 and if so changes it to state 1,
2014-05-15 10:22:34 +08:00
// if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
stack_size_log =
Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
stack_size_log =
Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
DCHECK_EQ(GetCurrentThread(), this);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
}
return nullptr;
}
void AsanThread::Init(const InitOptions *options) {
DCHECK_NE(tid(), kInvalidTid);
next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release);
CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls(options);
if (stack_top_ != stack_bottom_) {
CHECK_GT(this->stack_size(), 0U);
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
}
ClearShadowForThreadStackAndTLS();
fake_stack_ = nullptr;
if (__asan_option_detect_stack_use_after_return &&
tid() == GetCurrentTidOrInvalid()) {
// AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
// called from the context of the thread it is initializing, not its parent.
// Most platforms call AsanThread::Init on the newly-spawned thread, but
// Fuchsia calls this function from the parent thread. To support that
// approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
// be called by the new thread when it first attempts to access the fake
// stack.
AsyncSignalSafeLazyInitFakeStack();
}
int local = 0;
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
&local);
}
// Fuchsia doesn't use ThreadStart.
// asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA
[ASan] Stop blocking child thread progress from parent thread in `pthread_create` interceptor. Previously in ASan's `pthread_create` interceptor we would block in the `pthread_create` interceptor waiting for the child thread to start. Unfortunately this has bad performance characteristics because the OS scheduler doesn't know the relationship between the parent and child thread (i.e. the parent thread cannot make progress until the child thread makes progress) and may make the wrong scheduling decision which stalls progress. It turns out that ASan didn't use to block in this interceptor but was changed to do so to try to address http://llvm.org/bugs/show_bug.cgi?id=21621/. In that bug the problem being addressed was a LeakSanitizer false positive. That bug concerns a heap object being passed as `arg` to `pthread_create`. If: * The calling thread loses a live reference to the object (e.g. `pthread_create` finishes and the thread no longer has a live reference to the object). * Leak checking is triggered. * The child thread has not yet started (once it starts it will have a live reference). then the heap object will incorrectly appear to be leaked. This bug is covered by the `lsan/TestCases/leak_check_before_thread_started.cpp` test case. In b029c5101fb49b3577a1c322f42ef9fc616f25bf ASan was changed to block in `pthread_create()` until the child thread starts so that `arg` is kept alive for the purposes of leaking check. While this change "works" its problematic due to the performance problems it causes. The change is also completely unnecessary if leak checking is disabled (via detect_leaks runtime option or CAN_SANITIZE_LEAKS compile time config). This patch does two things: 1. Takes a different approach to solving the leak false positive by making LSan's leak checking mechanism treat the `arg` pointer of created but not started threads as reachable. This is done by implementing the `ForEachRegisteredThreadContextCb` callback for ASan. 2. Removes the blocking behaviour in the ASan `pthread_create` interceptor. rdar://problem/63537240 Differential Revision: https://reviews.llvm.org/D95184
2021-01-07 09:41:46 +08:00
thread_return_t AsanThread::ThreadStart(tid_t os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
2012-05-31 22:35:53 +08:00
// start_routine_ == 0 if we're on the main thread or on one of the
// OS X libdispatch worker threads. But nobody is supposed to call
// ThreadStart() for the worker threads.
CHECK_EQ(tid(), 0);
return 0;
}
thread_return_t res = start_routine_(arg_);
// On POSIX systems we defer this to the TSD destructor. LSan will consider
// the thread's memory as non-live from the moment we call Destroy(), even
// though that memory might contain pointers to heap objects which will be
// cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
// the TSD destructors have run might cause false positives in LSan.
if (!SANITIZER_POSIX)
this->Destroy();
return res;
}
AsanThread *CreateMainThread() {
AsanThread *main_thread = AsanThread::Create(
/* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
[ASan] Stop blocking child thread progress from parent thread in `pthread_create` interceptor. Previously in ASan's `pthread_create` interceptor we would block in the `pthread_create` interceptor waiting for the child thread to start. Unfortunately this has bad performance characteristics because the OS scheduler doesn't know the relationship between the parent and child thread (i.e. the parent thread cannot make progress until the child thread makes progress) and may make the wrong scheduling decision which stalls progress. It turns out that ASan didn't use to block in this interceptor but was changed to do so to try to address http://llvm.org/bugs/show_bug.cgi?id=21621/. In that bug the problem being addressed was a LeakSanitizer false positive. That bug concerns a heap object being passed as `arg` to `pthread_create`. If: * The calling thread loses a live reference to the object (e.g. `pthread_create` finishes and the thread no longer has a live reference to the object). * Leak checking is triggered. * The child thread has not yet started (once it starts it will have a live reference). then the heap object will incorrectly appear to be leaked. This bug is covered by the `lsan/TestCases/leak_check_before_thread_started.cpp` test case. In b029c5101fb49b3577a1c322f42ef9fc616f25bf ASan was changed to block in `pthread_create()` until the child thread starts so that `arg` is kept alive for the purposes of leaking check. While this change "works" its problematic due to the performance problems it causes. The change is also completely unnecessary if leak checking is disabled (via detect_leaks runtime option or CAN_SANITIZE_LEAKS compile time config). This patch does two things: 1. Takes a different approach to solving the leak false positive by making LSan's leak checking mechanism treat the `arg` pointer of created but not started threads as reachable. This is done by implementing the `ForEachRegisteredThreadContextCb` callback for ASan. 2. Removes the blocking behaviour in the ASan `pthread_create` interceptor. rdar://problem/63537240 Differential Revision: https://reviews.llvm.org/D95184
2021-01-07 09:41:46 +08:00
main_thread->ThreadStart(internal_getpid());
return main_thread;
}
// This implementation doesn't use the argument, which is just passed down
// from the caller of Init (which see, above). It's only there to support
// OS-specific implementations that need more information passed through.
void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
DCHECK_EQ(options, nullptr);
uptr tls_size = 0;
uptr stack_size = 0;
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
&tls_begin_, &tls_size);
[sanitizer] Simplify GetTls with dl_iterate_phdr on Linux and use it on musl/FreeBSD ... so that FreeBSD specific GetTls/glibc specific pthread_self code can be removed. This also helps FreeBSD arm64/powerpc64 which don't have GetTls implementation yet. GetTls is the range of * thread control block and optional TLS_PRE_TCB_SIZE * static TLS blocks plus static TLS surplus On glibc, lsan requires the range to include `pthread::{specific_1stblock,specific}` so that allocations only referenced by `pthread_setspecific` can be scanned. This patch uses `dl_iterate_phdr` to collect TLS blocks. Find the one with `dlpi_tls_modid==1` as one of the initially loaded module, then find consecutive ranges. The boundaries give us addr and size. This allows us to drop the glibc internal `_dl_get_tls_static_info` and `InitTlsSize`. However, huge glibc x86-64 binaries with numerous shared objects may observe time complexity penalty, so exclude them for now. Use the simplified method with non-Android Linux for now, but in theory this can be used with *BSD and potentially other ELF OSes. This removal of RISC-V `__builtin_thread_pointer` makes the code compilable with more compiler versions (added in Clang in 2020-03, added in GCC in 2020-07). This simplification enables D99566 for TLS Variant I architectures. Note: as of musl 1.2.2 and FreeBSD 12.2, dlpi_tls_data returned by dl_iterate_phdr is not desired: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774 This can be worked around by using `__tls_get_addr({modid,0})` instead of `dlpi_tls_data`. The workaround can be shared with the workaround for glibc<2.25. This fixes some tests on Alpine Linux x86-64 (musl) ``` test/lsan/Linux/cleanup_in_tsd_destructor.c test/lsan/Linux/fork.cpp test/lsan/Linux/fork_threaded.cpp test/lsan/Linux/use_tls_static.cpp test/lsan/many_tls_keys_thread.cpp test/msan/tls_reuse.cpp ``` and `test/lsan/TestCases/many_tls_keys_pthread.cpp` on glibc aarch64. The number of sanitizer test failures does not change on FreeBSD/amd64 12.2. Differential Revision: https://reviews.llvm.org/D98926
2021-04-16 06:34:43 +08:00
stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
if (stack_top_ != stack_bottom_) {
int local;
CHECK(AddrIsInStack((uptr)&local));
}
}
#endif // !SANITIZER_FUCHSIA
void AsanThread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_) {
uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
tls_end_ - tls_begin_aligned,
tls_end_aligned - tls_end_, 0);
}
}
bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
StackFrameAccess *access) {
if (stack_top_ == stack_bottom_)
return false;
2012-05-31 22:35:53 +08:00
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
} else if (FakeStack *fake_stack = get_fake_stack()) {
bottom = fake_stack->AddrIsInFakeStack(addr);
CHECK(bottom);
access->offset = addr - bottom;
access->frame_pc = ((uptr*)bottom)[2];
access->frame_descr = (const char *)((uptr*)bottom)[1];
return true;
}
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
2012-05-31 23:02:07 +08:00
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
mem_ptr -= SHADOW_GRANULARITY;
}
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr == kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
mem_ptr -= SHADOW_GRANULARITY;
}
if (shadow_ptr < shadow_bottom) {
return false;
}
uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
CHECK(ptr[0] == kCurrentStackFrameMagic);
access->offset = addr - (uptr)ptr;
access->frame_pc = ptr[2];
access->frame_descr = (const char*)ptr[1];
return true;
}
uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
} else if (FakeStack *fake_stack = get_fake_stack()) {
bottom = fake_stack->AddrIsInFakeStack(addr);
if (bottom == 0) {
return 0;
}
} else {
return 0;
}
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
(*shadow_ptr != kAsanStackLeftRedzoneMagic &&
*shadow_ptr != kAsanStackMidRedzoneMagic &&
*shadow_ptr != kAsanStackRightRedzoneMagic))
shadow_ptr--;
return (uptr)shadow_ptr + 1;
}
bool AsanThread::AddrIsInStack(uptr addr) {
const auto bounds = GetStackBounds();
return addr >= bounds.bottom && addr < bounds.top;
}
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
void *addr) {
AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base);
AsanThread *t = tctx->thread;
if (!t)
return false;
if (t->AddrIsInStack((uptr)addr))
return true;
FakeStack *fake_stack = t->get_fake_stack();
if (!fake_stack)
return false;
return fake_stack->AddrIsInFakeStack((uptr)addr);
}
AsanThread *GetCurrentThread() {
AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
if (!context) {
if (SANITIZER_ANDROID) {
// On Android, libc constructor is called _after_ asan_init, and cleans up
// TSD. Try to figure out if this is still the main thread by the stack
// address. We are not entirely sure that we have correct main thread
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(kMainTid);
if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
}
return nullptr;
}
return context->thread;
}
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
(void *)GetThreadSelf());
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
AsanTSDSet(t->context());
CHECK_EQ(t->context(), AsanTSDGet());
}
u32 GetCurrentTidOrInvalid() {
AsanThread *t = GetCurrentThread();
return t ? t->tid() : kInvalidTid;
}
AsanThread *FindThreadByStackAddress(uptr addr) {
asanThreadRegistry().CheckLocked();
AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
(void *)addr));
return tctx ? tctx->thread : nullptr;
}
void EnsureMainThreadIDIsCorrect() {
AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
if (context && (context->tid == kMainTid))
context->os_id = GetTid();
}
__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return nullptr;
return context->thread;
}
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t) return false;
*stack_begin = t->stack_bottom();
*stack_end = t->stack_top();
*tls_begin = t->tls_begin();
*tls_end = t->tls_end();
// ASan doesn't keep allocator caches in TLS, so these are unused.
*cache_begin = 0;
*cache_end = 0;
*dtls = t->dtls();
return true;
}
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t)
return;
__asan::FakeStack *fake_stack = t->get_fake_stack();
if (!fake_stack)
return;
fake_stack->ForEachFakeFrame(callback, arg);
}
void LockThreadRegistry() {
__asan::asanThreadRegistry().Lock();
}
void UnlockThreadRegistry() {
__asan::asanThreadRegistry().Unlock();
}
ThreadRegistry *GetThreadRegistryLocked() {
__asan::asanThreadRegistry().CheckLocked();
return &__asan::asanThreadRegistry();
}
void EnsureMainThreadIDIsCorrect() {
__asan::EnsureMainThreadIDIsCorrect();
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
uptr size) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
return;
}
t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_finish_switch_fiber(void* fakestack,
const void **bottom_old,
uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
t->FinishSwitchFiber((FakeStack*)fakestack,
(uptr*)bottom_old,
(uptr*)size_old);
}
}