forked from OSchip/llvm-project
Move valid caller-pc checks out of platform-specific checks
Summary: ProcessPlatformSpecificAllocations for linux leak sanitizer iterated over memory chunks and ran two checks concurrently: 1) Ensured the pc was valid 2) Checked whether it was a linker allocation All platforms will need the valid pc check, so it is moved out of the platform- specific file. To prevent code and logic duplication, the linker allocation check is moved as well, with the name of the linker supplied by the platform-specific module. In cases where we don't need to check for linker allocations (ie Darwin), this name will be a nullptr, and we'll only run the caller pc checks. Reviewers: kubamracek, alekseyshl, kcc Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D32130 llvm-svn: 300690
This commit is contained in:
parent
8ea76fa9b4
commit
2096fa4bf9
|
@ -356,6 +356,72 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
||||||
|
CHECK(stack_id);
|
||||||
|
StackTrace stack = map->Get(stack_id);
|
||||||
|
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||||
|
if (stack.size >= 2)
|
||||||
|
return stack.trace[1];
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InvalidPCParam {
|
||||||
|
Frontier *frontier;
|
||||||
|
StackDepotReverseMap *stack_depot_reverse_map;
|
||||||
|
bool skip_linker_allocations;
|
||||||
|
};
|
||||||
|
|
||||||
|
// ForEachChunk callback. If the caller pc is invalid or is within the linker,
|
||||||
|
// mark as reachable. Called by ProcessPlatformSpecificAllocations.
|
||||||
|
static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
||||||
|
CHECK(arg);
|
||||||
|
InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
|
||||||
|
chunk = GetUserBegin(chunk);
|
||||||
|
LsanMetadata m(chunk);
|
||||||
|
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
||||||
|
u32 stack_id = m.stack_trace_id();
|
||||||
|
uptr caller_pc = 0;
|
||||||
|
if (stack_id > 0)
|
||||||
|
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
||||||
|
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||||
|
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||||
|
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
||||||
|
GetLinker()->containsAddress(caller_pc))) {
|
||||||
|
m.set_tag(kReachable);
|
||||||
|
param->frontier->push_back(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// On Linux, handles dynamically allocated TLS blocks by treating all chunks
|
||||||
|
// allocated from ld-linux.so as reachable.
|
||||||
|
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||||
|
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||||
|
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||||
|
// blocks, but we can make sure they come from our own allocator by intercepting
|
||||||
|
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
||||||
|
// addresses are stored in a dynamically allocated array (the DTV) which is
|
||||||
|
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
||||||
|
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
||||||
|
// the DTV. This is because the initial DTV is allocated before our interception
|
||||||
|
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
||||||
|
// can't special-case it either, since we don't know its size.
|
||||||
|
// Our solution is to include in the root set all allocations made from
|
||||||
|
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
||||||
|
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||||
|
// which we don't care about).
|
||||||
|
// On all other platforms, this simply checks to ensure that the caller pc is
|
||||||
|
// valid before reporting chunks as leaked.
|
||||||
|
void ProcessPC(Frontier *frontier) {
|
||||||
|
StackDepotReverseMap stack_depot_reverse_map;
|
||||||
|
InvalidPCParam arg;
|
||||||
|
arg.frontier = frontier;
|
||||||
|
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
||||||
|
arg.skip_linker_allocations =
|
||||||
|
flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
|
||||||
|
ForEachChunk(MarkInvalidPCCb, &arg);
|
||||||
|
}
|
||||||
|
|
||||||
// Sets the appropriate tag on each chunk.
|
// Sets the appropriate tag on each chunk.
|
||||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||||
// Holds the flood fill frontier.
|
// Holds the flood fill frontier.
|
||||||
|
@ -367,11 +433,13 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||||
ProcessRootRegions(&frontier);
|
ProcessRootRegions(&frontier);
|
||||||
FloodFillTag(&frontier, kReachable);
|
FloodFillTag(&frontier, kReachable);
|
||||||
|
|
||||||
|
CHECK_EQ(0, frontier.size());
|
||||||
|
ProcessPC(&frontier);
|
||||||
|
|
||||||
// The check here is relatively expensive, so we do this in a separate flood
|
// The check here is relatively expensive, so we do this in a separate flood
|
||||||
// fill. That way we can skip the check for chunks that are reachable
|
// fill. That way we can skip the check for chunks that are reachable
|
||||||
// otherwise.
|
// otherwise.
|
||||||
LOG_POINTERS("Processing platform-specific allocations.\n");
|
LOG_POINTERS("Processing platform-specific allocations.\n");
|
||||||
CHECK_EQ(0, frontier.size());
|
|
||||||
ProcessPlatformSpecificAllocations(&frontier);
|
ProcessPlatformSpecificAllocations(&frontier);
|
||||||
FloodFillTag(&frontier, kReachable);
|
FloodFillTag(&frontier, kReachable);
|
||||||
|
|
||||||
|
|
|
@ -212,6 +212,10 @@ uptr PointsIntoChunk(void *p);
|
||||||
uptr GetUserBegin(uptr chunk);
|
uptr GetUserBegin(uptr chunk);
|
||||||
// Helper for __lsan_ignore_object().
|
// Helper for __lsan_ignore_object().
|
||||||
IgnoreObjectResult IgnoreObjectLocked(const void *p);
|
IgnoreObjectResult IgnoreObjectLocked(const void *p);
|
||||||
|
|
||||||
|
// Return the linker module, if valid for the platform.
|
||||||
|
LoadedModule *GetLinker();
|
||||||
|
|
||||||
// Wrapper for chunk metadata operations.
|
// Wrapper for chunk metadata operations.
|
||||||
class LsanMetadata {
|
class LsanMetadata {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -89,70 +89,9 @@ void ProcessGlobalRegions(Frontier *frontier) {
|
||||||
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
|
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
LoadedModule *GetLinker() { return linker; }
|
||||||
CHECK(stack_id);
|
|
||||||
StackTrace stack = map->Get(stack_id);
|
|
||||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
|
||||||
if (stack.size >= 2)
|
|
||||||
return stack.trace[1];
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ProcessPlatformAllocParam {
|
void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
|
||||||
Frontier *frontier;
|
|
||||||
StackDepotReverseMap *stack_depot_reverse_map;
|
|
||||||
bool skip_linker_allocations;
|
|
||||||
};
|
|
||||||
|
|
||||||
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
|
|
||||||
// reachable. Marks them as reachable and adds them to the frontier.
|
|
||||||
static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
|
|
||||||
CHECK(arg);
|
|
||||||
ProcessPlatformAllocParam *param =
|
|
||||||
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
|
|
||||||
chunk = GetUserBegin(chunk);
|
|
||||||
LsanMetadata m(chunk);
|
|
||||||
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
|
||||||
u32 stack_id = m.stack_trace_id();
|
|
||||||
uptr caller_pc = 0;
|
|
||||||
if (stack_id > 0)
|
|
||||||
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
|
||||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
|
||||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
|
||||||
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
|
||||||
linker->containsAddress(caller_pc))) {
|
|
||||||
m.set_tag(kReachable);
|
|
||||||
param->frontier->push_back(chunk);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles dynamically allocated TLS blocks by treating all chunks allocated
|
|
||||||
// from ld-linux.so as reachable.
|
|
||||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
|
||||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
|
||||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
|
||||||
// blocks, but we can make sure they come from our own allocator by intercepting
|
|
||||||
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
|
||||||
// addresses are stored in a dynamically allocated array (the DTV) which is
|
|
||||||
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
|
||||||
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
|
||||||
// the DTV. This is because the initial DTV is allocated before our interception
|
|
||||||
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
|
||||||
// can't special-case it either, since we don't know its size.
|
|
||||||
// Our solution is to include in the root set all allocations made from
|
|
||||||
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
|
||||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
|
||||||
// which we don't care about).
|
|
||||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
|
||||||
StackDepotReverseMap stack_depot_reverse_map;
|
|
||||||
ProcessPlatformAllocParam arg;
|
|
||||||
arg.frontier = frontier;
|
|
||||||
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
|
||||||
arg.skip_linker_allocations =
|
|
||||||
flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
|
|
||||||
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DoStopTheWorldParam {
|
struct DoStopTheWorldParam {
|
||||||
StopTheWorldCallback callback;
|
StopTheWorldCallback callback;
|
||||||
|
|
|
@ -87,6 +87,8 @@ void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
|
||||||
|
|
||||||
AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
|
AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
|
||||||
|
|
||||||
|
LoadedModule *GetLinker() { return nullptr; }
|
||||||
|
|
||||||
// Required on Linux for initialization of TLS behavior, but should not be
|
// Required on Linux for initialization of TLS behavior, but should not be
|
||||||
// required on Darwin.
|
// required on Darwin.
|
||||||
void InitializePlatformSpecificModules() {
|
void InitializePlatformSpecificModules() {
|
||||||
|
|
Loading…
Reference in New Issue