2013-05-20 19:06:50 +08:00
|
|
|
//=-- lsan_common_linux.cc ------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is a part of LeakSanitizer.
|
|
|
|
// Implementation of common leak checking functionality. Linux-specific code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "sanitizer_common/sanitizer_platform.h"
|
|
|
|
#include "lsan_common.h"
|
|
|
|
|
2013-05-21 22:12:11 +08:00
|
|
|
#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
|
2013-05-20 19:06:50 +08:00
|
|
|
#include <link.h>
|
|
|
|
|
|
|
|
#include "sanitizer_common/sanitizer_common.h"
|
2014-02-14 23:12:46 +08:00
|
|
|
#include "sanitizer_common/sanitizer_flags.h"
|
2013-05-20 19:06:50 +08:00
|
|
|
#include "sanitizer_common/sanitizer_linux.h"
|
|
|
|
#include "sanitizer_common/sanitizer_stackdepot.h"
|
|
|
|
|
|
|
|
namespace __lsan {
|
|
|
|
|
|
|
|
static const char kLinkerName[] = "ld";
|
2016-02-23 02:52:51 +08:00
|
|
|
|
|
|
|
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
|
2015-10-01 08:22:21 +08:00
|
|
|
static LoadedModule *linker = nullptr;
|
2013-05-20 19:06:50 +08:00
|
|
|
|
|
|
|
static bool IsLinker(const char* full_name) {
|
|
|
|
return LibraryNameIs(full_name, kLinkerName);
|
|
|
|
}
|
|
|
|
|
2017-02-17 11:23:07 +08:00
|
|
|
static THREADLOCAL u32 current_thread_tid = kInvalidTid;
|
|
|
|
u32 GetCurrentThread() { return current_thread_tid; }
|
|
|
|
void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
|
|
|
|
|
Use pthreads to manage thread-local storage on darwin for leak sanitizer
Summary:
__thread is supported on Darwin, but is implemented dynamically via
function calls to __tls_get_addr. This causes two issues when combined
with leak sanitizer, due to malloc() interception.
- The dynamic loader calls malloc during the process of loading
the sanitizer dylib, while swapping a placeholder tlv_boostrap
function for __tls_get_addr. This will cause tlv_bootstrap to
be called in DisabledInThisThread() via the asan allocator.
- The first time __tls_get_addr is called, it allocates memory
for the thread-local object, during which it calls malloc(). This
call will be intercepted, leading to an infinite loop in the asan
allocator, in which the allocator calls DisabledInThisThread,
which calls tls_get_addr, which calls into the allocator again.
Reviewers: kcc, glider, kubamracek
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29786
llvm-svn: 294994
2017-02-14 06:20:07 +08:00
|
|
|
__attribute__((tls_model("initial-exec")))
|
|
|
|
THREADLOCAL int disable_counter;
|
|
|
|
bool DisabledInThisThread() { return disable_counter > 0; }
|
|
|
|
void DisableInThisThread() { disable_counter++; }
|
|
|
|
void EnableInThisThread() {
|
|
|
|
if (disable_counter == 0) {
|
|
|
|
DisableCounterUnderflow();
|
|
|
|
}
|
|
|
|
disable_counter--;
|
|
|
|
}
|
|
|
|
|
2013-05-20 19:06:50 +08:00
|
|
|
void InitializePlatformSpecificModules() {
|
2016-02-23 02:52:51 +08:00
|
|
|
ListOfModules modules;
|
|
|
|
modules.init();
|
|
|
|
for (LoadedModule &module : modules) {
|
|
|
|
if (!IsLinker(module.full_name())) continue;
|
|
|
|
if (linker == nullptr) {
|
|
|
|
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
|
|
|
|
*linker = module;
|
|
|
|
module = LoadedModule();
|
|
|
|
} else {
|
|
|
|
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
|
|
|
|
"TLS will not be handled correctly.\n", kLinkerName);
|
|
|
|
linker->clear();
|
|
|
|
linker = nullptr;
|
|
|
|
return;
|
|
|
|
}
|
2013-05-20 19:06:50 +08:00
|
|
|
}
|
2016-02-23 02:52:51 +08:00
|
|
|
VReport(1, "LeakSanitizer: Dynamic linker not found. "
|
|
|
|
"TLS will not be handled correctly.\n");
|
2013-05-20 19:06:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
|
|
|
void *data) {
|
2013-06-24 16:34:50 +08:00
|
|
|
Frontier *frontier = reinterpret_cast<Frontier *>(data);
|
2013-05-20 19:06:50 +08:00
|
|
|
for (uptr j = 0; j < info->dlpi_phnum; j++) {
|
|
|
|
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
|
|
|
|
// We're looking for .data and .bss sections, which reside in writeable,
|
|
|
|
// loadable segments.
|
|
|
|
if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
|
|
|
|
(phdr->p_memsz == 0))
|
|
|
|
continue;
|
|
|
|
uptr begin = info->dlpi_addr + phdr->p_vaddr;
|
|
|
|
uptr end = begin + phdr->p_memsz;
|
|
|
|
uptr allocator_begin = 0, allocator_end = 0;
|
|
|
|
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
|
|
|
|
if (begin <= allocator_begin && allocator_begin < end) {
|
|
|
|
CHECK_LE(allocator_begin, allocator_end);
|
2016-10-26 14:56:51 +08:00
|
|
|
CHECK_LE(allocator_end, end);
|
2013-05-20 19:06:50 +08:00
|
|
|
if (begin < allocator_begin)
|
|
|
|
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
|
|
|
|
kReachable);
|
|
|
|
if (allocator_end < end)
|
|
|
|
ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
|
|
|
|
kReachable);
|
|
|
|
} else {
|
|
|
|
ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-24 16:34:50 +08:00
|
|
|
// Scans global variables for heap pointers.
|
2013-06-14 18:07:56 +08:00
|
|
|
void ProcessGlobalRegions(Frontier *frontier) {
|
2013-12-26 01:14:40 +08:00
|
|
|
if (!flags()->use_globals) return;
|
2013-05-20 19:06:50 +08:00
|
|
|
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
|
|
|
|
}
|
|
|
|
|
2013-08-26 21:24:43 +08:00
|
|
|
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
2013-05-20 19:06:50 +08:00
|
|
|
CHECK(stack_id);
|
2014-10-26 14:23:07 +08:00
|
|
|
StackTrace stack = map->Get(stack_id);
|
2013-05-20 19:06:50 +08:00
|
|
|
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
2014-10-26 14:23:07 +08:00
|
|
|
if (stack.size >= 2)
|
|
|
|
return stack.trace[1];
|
2013-06-06 22:19:36 +08:00
|
|
|
return 0;
|
2013-05-20 19:06:50 +08:00
|
|
|
}
|
|
|
|
|
2013-08-26 21:24:43 +08:00
|
|
|
struct ProcessPlatformAllocParam {
|
|
|
|
Frontier *frontier;
|
|
|
|
StackDepotReverseMap *stack_depot_reverse_map;
|
2016-01-15 03:16:05 +08:00
|
|
|
bool skip_linker_allocations;
|
2013-08-26 21:24:43 +08:00
|
|
|
};
|
|
|
|
|
2013-06-24 16:34:50 +08:00
|
|
|
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
|
|
|
|
// reachable. Marks them as reachable and adds them to the frontier.
|
|
|
|
static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
|
|
|
|
CHECK(arg);
|
2013-08-26 21:24:43 +08:00
|
|
|
ProcessPlatformAllocParam *param =
|
|
|
|
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
|
2013-06-24 16:34:50 +08:00
|
|
|
chunk = GetUserBegin(chunk);
|
|
|
|
LsanMetadata m(chunk);
|
2015-04-25 00:53:15 +08:00
|
|
|
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
2013-09-12 16:16:28 +08:00
|
|
|
u32 stack_id = m.stack_trace_id();
|
2013-09-30 18:57:56 +08:00
|
|
|
uptr caller_pc = 0;
|
|
|
|
if (stack_id > 0)
|
|
|
|
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
|
|
|
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
|
|
|
// it as reachable, as we can't properly report its allocation stack anyway.
|
2016-01-15 03:16:05 +08:00
|
|
|
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
|
|
|
linker->containsAddress(caller_pc))) {
|
2013-05-20 19:06:50 +08:00
|
|
|
m.set_tag(kReachable);
|
2013-08-26 21:24:43 +08:00
|
|
|
param->frontier->push_back(chunk);
|
2013-05-20 19:06:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-24 16:34:50 +08:00
|
|
|
// Handles dynamically allocated TLS blocks by treating all chunks allocated
|
|
|
|
// from ld-linux.so as reachable.
|
2014-01-23 23:10:35 +08:00
|
|
|
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
|
|
|
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
|
|
|
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
|
|
|
// blocks, but we can make sure they come from our own allocator by intercepting
|
|
|
|
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
|
|
|
// addresses are stored in a dynamically allocated array (the DTV) which is
|
|
|
|
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
|
|
|
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
|
|
|
// the DTV. This is because the initial DTV is allocated before our interception
|
|
|
|
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
|
|
|
// can't special-case it either, since we don't know its size.
|
|
|
|
// Our solution is to include in the root set all allocations made from
|
|
|
|
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
|
|
|
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
|
|
|
// which we don't care about).
|
2013-06-14 18:07:56 +08:00
|
|
|
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
2013-08-26 21:24:43 +08:00
|
|
|
StackDepotReverseMap stack_depot_reverse_map;
|
2016-01-15 03:16:05 +08:00
|
|
|
ProcessPlatformAllocParam arg;
|
|
|
|
arg.frontier = frontier;
|
|
|
|
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
|
|
|
arg.skip_linker_allocations =
|
|
|
|
flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
|
2013-08-26 21:24:43 +08:00
|
|
|
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
|
2013-05-20 19:06:50 +08:00
|
|
|
}
|
|
|
|
|
2015-02-26 22:01:08 +08:00
|
|
|
struct DoStopTheWorldParam {
|
|
|
|
StopTheWorldCallback callback;
|
|
|
|
void *argument;
|
|
|
|
};
|
|
|
|
|
2015-02-26 22:25:25 +08:00
|
|
|
static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
|
|
|
|
void *data) {
|
2015-02-26 22:01:08 +08:00
|
|
|
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
|
|
|
|
StopTheWorld(param->callback, param->argument);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
|
|
|
|
// of the threads is frozen while holding the libdl lock, the tracer will hang
|
|
|
|
// in dl_iterate_phdr() forever.
|
|
|
|
// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
|
|
|
|
// tracer task and the thread that spawned it. Thus, if we run the tracer task
|
|
|
|
// while holding the libdl lock in the parent thread, we can safely reenter it
|
|
|
|
// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
|
|
|
|
// callback in the parent thread.
|
|
|
|
void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
|
|
|
|
DoStopTheWorldParam param = {callback, argument};
|
|
|
|
dl_iterate_phdr(DoStopTheWorldCallback, ¶m);
|
|
|
|
}
|
|
|
|
|
2015-10-01 08:22:21 +08:00
|
|
|
} // namespace __lsan
|
|
|
|
|
|
|
|
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
|