forked from OSchip/llvm-project
Revert to C-style callbacks for iteration over allocator chunks.
Also clean up LSan code, fix some comments and replace void* with uptr to bring down the number of reinterpret_casts. llvm-svn: 184700
This commit is contained in:
parent
20bbbd30d2
commit
4e0215a71c
|
@ -718,26 +718,25 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
|
||||||
*end = *begin + sizeof(__asan::allocator);
|
*end = *begin + sizeof(__asan::allocator);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *PointsIntoChunk(void* p) {
|
uptr PointsIntoChunk(void* p) {
|
||||||
uptr addr = reinterpret_cast<uptr>(p);
|
uptr addr = reinterpret_cast<uptr>(p);
|
||||||
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
|
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
|
||||||
if (!m) return 0;
|
if (!m) return 0;
|
||||||
uptr chunk = m->Beg();
|
uptr chunk = m->Beg();
|
||||||
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
|
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
|
||||||
return reinterpret_cast<void *>(chunk);
|
return chunk;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *GetUserBegin(void *p) {
|
uptr GetUserBegin(uptr chunk) {
|
||||||
__asan::AsanChunk *m =
|
__asan::AsanChunk *m =
|
||||||
__asan::GetAsanChunkByAddrFastLocked(reinterpret_cast<uptr>(p));
|
__asan::GetAsanChunkByAddrFastLocked(chunk);
|
||||||
CHECK(m);
|
CHECK(m);
|
||||||
return reinterpret_cast<void *>(m->Beg());
|
return m->Beg();
|
||||||
}
|
}
|
||||||
|
|
||||||
LsanMetadata::LsanMetadata(void *chunk) {
|
LsanMetadata::LsanMetadata(uptr chunk) {
|
||||||
uptr addr = reinterpret_cast<uptr>(chunk);
|
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
|
||||||
metadata_ = reinterpret_cast<void *>(addr - __asan::kChunkHeaderSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LsanMetadata::allocated() const {
|
bool LsanMetadata::allocated() const {
|
||||||
|
@ -765,19 +764,9 @@ u32 LsanMetadata::stack_trace_id() const {
|
||||||
return m->alloc_context_id;
|
return m->alloc_context_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Callable> void ForEachChunk(Callable const &callback) {
|
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||||
__asan::allocator.ForEachChunk(callback);
|
__asan::allocator.ForEachChunk(callback, arg);
|
||||||
}
|
}
|
||||||
#if CAN_SANITIZE_LEAKS
|
|
||||||
template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
|
|
||||||
ProcessPlatformSpecificAllocationsCb const &callback);
|
|
||||||
template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
|
|
||||||
template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
|
|
||||||
template void ForEachChunk<MarkIndirectlyLeakedCb>(
|
|
||||||
MarkIndirectlyLeakedCb const &callback);
|
|
||||||
template void ForEachChunk<CollectIgnoredCb>(
|
|
||||||
CollectIgnoredCb const &callback);
|
|
||||||
#endif // CAN_SANITIZE_LEAKS
|
|
||||||
|
|
||||||
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
||||||
uptr addr = reinterpret_cast<uptr>(p);
|
uptr addr = reinterpret_cast<uptr>(p);
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
pthread_key_t key;
|
pthread_key_t key;
|
||||||
|
|
||||||
void key_destructor(void *) {
|
void key_destructor(void *arg) {
|
||||||
__lsan::ScopedDisabler d;
|
__lsan::ScopedDisabler d;
|
||||||
void *p = malloc(1337);
|
void *p = malloc(1337);
|
||||||
// Break optimization.
|
// Break optimization.
|
||||||
|
|
|
@ -52,7 +52,7 @@ void AllocatorThreadFinish() {
|
||||||
}
|
}
|
||||||
|
|
||||||
static ChunkMetadata *Metadata(void *p) {
|
static ChunkMetadata *Metadata(void *p) {
|
||||||
return (ChunkMetadata *)allocator.GetMetaData(p);
|
return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
|
static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
|
||||||
|
@ -62,14 +62,14 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
|
||||||
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
|
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
|
||||||
m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
|
m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
|
||||||
m->requested_size = size;
|
m->requested_size = size;
|
||||||
atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed);
|
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void RegisterDeallocation(void *p) {
|
static void RegisterDeallocation(void *p) {
|
||||||
if (!p) return;
|
if (!p) return;
|
||||||
ChunkMetadata *m = Metadata(p);
|
ChunkMetadata *m = Metadata(p);
|
||||||
CHECK(m);
|
CHECK(m);
|
||||||
atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed);
|
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||||
|
@ -129,25 +129,26 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
|
||||||
*end = *begin + sizeof(allocator);
|
*end = *begin + sizeof(allocator);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *PointsIntoChunk(void* p) {
|
uptr PointsIntoChunk(void* p) {
|
||||||
void *chunk = allocator.GetBlockBeginFastLocked(p);
|
uptr addr = reinterpret_cast<uptr>(p);
|
||||||
|
uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
|
||||||
if (!chunk) return 0;
|
if (!chunk) return 0;
|
||||||
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
|
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
|
||||||
// valid, but we don't want that.
|
// valid, but we don't want that.
|
||||||
if (p < chunk) return 0;
|
if (addr < chunk) return 0;
|
||||||
ChunkMetadata *m = Metadata(chunk);
|
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
|
||||||
CHECK(m);
|
CHECK(m);
|
||||||
if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size)
|
if (m->allocated && addr < chunk + m->requested_size)
|
||||||
return chunk;
|
return chunk;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *GetUserBegin(void *p) {
|
uptr GetUserBegin(uptr chunk) {
|
||||||
return p;
|
return chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
LsanMetadata::LsanMetadata(void *chunk) {
|
LsanMetadata::LsanMetadata(uptr chunk) {
|
||||||
metadata_ = Metadata(chunk);
|
metadata_ = Metadata(reinterpret_cast<void *>(chunk));
|
||||||
CHECK(metadata_);
|
CHECK(metadata_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,20 +172,10 @@ u32 LsanMetadata::stack_trace_id() const {
|
||||||
return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
|
return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Callable>
|
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||||
void ForEachChunk(Callable const &callback) {
|
allocator.ForEachChunk(callback, arg);
|
||||||
allocator.ForEachChunk(callback);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
|
|
||||||
ProcessPlatformSpecificAllocationsCb const &callback);
|
|
||||||
template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
|
|
||||||
template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
|
|
||||||
template void ForEachChunk<MarkIndirectlyLeakedCb>(
|
|
||||||
MarkIndirectlyLeakedCb const &callback);
|
|
||||||
template void ForEachChunk<CollectIgnoredCb>(
|
|
||||||
CollectIgnoredCb const &callback);
|
|
||||||
|
|
||||||
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
||||||
void *chunk = allocator.GetBlockBegin(p);
|
void *chunk = allocator.GetBlockBegin(p);
|
||||||
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
|
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
#if CAN_SANITIZE_LEAKS
|
#if CAN_SANITIZE_LEAKS
|
||||||
namespace __lsan {
|
namespace __lsan {
|
||||||
|
|
||||||
// This mutex is used to prevent races between DoLeakCheck and SuppressObject.
|
// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
|
||||||
BlockingMutex global_mutex(LINKER_INITIALIZED);
|
BlockingMutex global_mutex(LINKER_INITIALIZED);
|
||||||
|
|
||||||
THREADLOCAL int disable_counter;
|
THREADLOCAL int disable_counter;
|
||||||
|
@ -84,12 +84,12 @@ static inline bool CanBeAHeapPointer(uptr p) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan the memory range, looking for byte patterns that point into allocator
|
// Scans the memory range, looking for byte patterns that point into allocator
|
||||||
// chunks. Mark those chunks with tag and add them to the frontier.
|
// chunks. Marks those chunks with |tag| and adds them to |frontier|.
|
||||||
// There are two usage modes for this function: finding reachable or ignored
|
// There are two usage modes for this function: finding reachable or ignored
|
||||||
// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks
|
// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
|
||||||
// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
|
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
|
||||||
// so frontier = 0.
|
// so |frontier| = 0.
|
||||||
void ScanRangeForPointers(uptr begin, uptr end,
|
void ScanRangeForPointers(uptr begin, uptr end,
|
||||||
Frontier *frontier,
|
Frontier *frontier,
|
||||||
const char *region_type, ChunkTag tag) {
|
const char *region_type, ChunkTag tag) {
|
||||||
|
@ -99,10 +99,10 @@ void ScanRangeForPointers(uptr begin, uptr end,
|
||||||
uptr pp = begin;
|
uptr pp = begin;
|
||||||
if (pp % alignment)
|
if (pp % alignment)
|
||||||
pp = pp + alignment - pp % alignment;
|
pp = pp + alignment - pp % alignment;
|
||||||
for (; pp + sizeof(void *) <= end; pp += alignment) {
|
for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
|
||||||
void *p = *reinterpret_cast<void**>(pp);
|
void *p = *reinterpret_cast<void**>(pp);
|
||||||
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
|
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
|
||||||
void *chunk = PointsIntoChunk(p);
|
uptr chunk = PointsIntoChunk(p);
|
||||||
if (!chunk) continue;
|
if (!chunk) continue;
|
||||||
LsanMetadata m(chunk);
|
LsanMetadata m(chunk);
|
||||||
// Reachable beats ignored beats leaked.
|
// Reachable beats ignored beats leaked.
|
||||||
|
@ -111,14 +111,13 @@ void ScanRangeForPointers(uptr begin, uptr end,
|
||||||
m.set_tag(tag);
|
m.set_tag(tag);
|
||||||
if (flags()->log_pointers)
|
if (flags()->log_pointers)
|
||||||
Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
|
Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
|
||||||
chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(),
|
chunk, chunk + m.requested_size(), m.requested_size());
|
||||||
m.requested_size());
|
|
||||||
if (frontier)
|
if (frontier)
|
||||||
frontier->push_back(reinterpret_cast<uptr>(chunk));
|
frontier->push_back(chunk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan thread data (stacks and TLS) for heap pointers.
|
// Scans thread data (stacks and TLS) for heap pointers.
|
||||||
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||||
Frontier *frontier) {
|
Frontier *frontier) {
|
||||||
InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
|
InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
|
||||||
|
@ -191,31 +190,34 @@ static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
|
||||||
while (frontier->size()) {
|
while (frontier->size()) {
|
||||||
uptr next_chunk = frontier->back();
|
uptr next_chunk = frontier->back();
|
||||||
frontier->pop_back();
|
frontier->pop_back();
|
||||||
LsanMetadata m(reinterpret_cast<void *>(next_chunk));
|
LsanMetadata m(next_chunk);
|
||||||
ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
|
ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
|
||||||
"HEAP", tag);
|
"HEAP", tag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark leaked chunks which are reachable from other leaked chunks.
|
// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
|
||||||
void MarkIndirectlyLeakedCb::operator()(void *p) const {
|
// which are reachable from it as indirectly leaked.
|
||||||
p = GetUserBegin(p);
|
static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
|
||||||
LsanMetadata m(p);
|
chunk = GetUserBegin(chunk);
|
||||||
|
LsanMetadata m(chunk);
|
||||||
if (m.allocated() && m.tag() != kReachable) {
|
if (m.allocated() && m.tag() != kReachable) {
|
||||||
ScanRangeForPointers(reinterpret_cast<uptr>(p),
|
ScanRangeForPointers(chunk, chunk + m.requested_size(),
|
||||||
reinterpret_cast<uptr>(p) + m.requested_size(),
|
|
||||||
/* frontier */ 0, "HEAP", kIndirectlyLeaked);
|
/* frontier */ 0, "HEAP", kIndirectlyLeaked);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectIgnoredCb::operator()(void *p) const {
|
// ForEachChunk callback. If chunk is marked as ignored, adds its address to
|
||||||
p = GetUserBegin(p);
|
// frontier.
|
||||||
LsanMetadata m(p);
|
static void CollectIgnoredCb(uptr chunk, void *arg) {
|
||||||
|
CHECK(arg);
|
||||||
|
chunk = GetUserBegin(chunk);
|
||||||
|
LsanMetadata m(chunk);
|
||||||
if (m.allocated() && m.tag() == kIgnored)
|
if (m.allocated() && m.tag() == kIgnored)
|
||||||
frontier_->push_back(reinterpret_cast<uptr>(p));
|
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the appropriate tag on each chunk.
|
// Sets the appropriate tag on each chunk.
|
||||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||||
// Holds the flood fill frontier.
|
// Holds the flood fill frontier.
|
||||||
Frontier frontier(GetPageSizeCached());
|
Frontier frontier(GetPageSizeCached());
|
||||||
|
@ -233,14 +235,14 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||||
if (flags()->log_pointers)
|
if (flags()->log_pointers)
|
||||||
Report("Scanning ignored chunks.\n");
|
Report("Scanning ignored chunks.\n");
|
||||||
CHECK_EQ(0, frontier.size());
|
CHECK_EQ(0, frontier.size());
|
||||||
ForEachChunk(CollectIgnoredCb(&frontier));
|
ForEachChunk(CollectIgnoredCb, &frontier);
|
||||||
FloodFillTag(&frontier, kIgnored);
|
FloodFillTag(&frontier, kIgnored);
|
||||||
|
|
||||||
// Iterate over leaked chunks and mark those that are reachable from other
|
// Iterate over leaked chunks and mark those that are reachable from other
|
||||||
// leaked chunks.
|
// leaked chunks.
|
||||||
if (flags()->log_pointers)
|
if (flags()->log_pointers)
|
||||||
Report("Scanning leaked chunks.\n");
|
Report("Scanning leaked chunks.\n");
|
||||||
ForEachChunk(MarkIndirectlyLeakedCb());
|
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintStackTraceById(u32 stack_trace_id) {
|
static void PrintStackTraceById(u32 stack_trace_id) {
|
||||||
|
@ -251,9 +253,12 @@ static void PrintStackTraceById(u32 stack_trace_id) {
|
||||||
common_flags()->strip_path_prefix, 0);
|
common_flags()->strip_path_prefix, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectLeaksCb::operator()(void *p) const {
|
// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
|
||||||
p = GetUserBegin(p);
|
static void CollectLeaksCb(uptr chunk, void *arg) {
|
||||||
LsanMetadata m(p);
|
CHECK(arg);
|
||||||
|
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
|
||||||
|
chunk = GetUserBegin(chunk);
|
||||||
|
LsanMetadata m(chunk);
|
||||||
if (!m.allocated()) return;
|
if (!m.allocated()) return;
|
||||||
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
||||||
uptr resolution = flags()->resolution;
|
uptr resolution = flags()->resolution;
|
||||||
|
@ -261,33 +266,29 @@ void CollectLeaksCb::operator()(void *p) const {
|
||||||
uptr size = 0;
|
uptr size = 0;
|
||||||
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
|
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
|
||||||
size = Min(size, resolution);
|
size = Min(size, resolution);
|
||||||
leak_report_->Add(StackDepotPut(trace, size), m.requested_size(),
|
leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
|
||||||
m.tag());
|
|
||||||
} else {
|
} else {
|
||||||
leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag());
|
leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void CollectLeaks(LeakReport *leak_report) {
|
// ForEachChunkCallback. Prints addresses of unreachable chunks.
|
||||||
ForEachChunk(CollectLeaksCb(leak_report));
|
static void PrintLeakedCb(uptr chunk, void *arg) {
|
||||||
}
|
chunk = GetUserBegin(chunk);
|
||||||
|
LsanMetadata m(chunk);
|
||||||
void PrintLeakedCb::operator()(void *p) const {
|
|
||||||
p = GetUserBegin(p);
|
|
||||||
LsanMetadata m(p);
|
|
||||||
if (!m.allocated()) return;
|
if (!m.allocated()) return;
|
||||||
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
||||||
Printf("%s leaked %zu byte object at %p.\n",
|
Printf("%s leaked %zu byte object at %p.\n",
|
||||||
m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
|
m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
|
||||||
m.requested_size(), p);
|
m.requested_size(), chunk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintLeaked() {
|
static void PrintLeaked() {
|
||||||
Printf("\n");
|
Printf("\n");
|
||||||
Printf("Reporting individual objects:\n");
|
Printf("Reporting individual objects:\n");
|
||||||
ForEachChunk(PrintLeakedCb());
|
ForEachChunk(PrintLeakedCb, 0 /* arg */);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DoLeakCheckParam {
|
struct DoLeakCheckParam {
|
||||||
|
@ -302,7 +303,7 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
|
||||||
CHECK(!param->success);
|
CHECK(!param->success);
|
||||||
CHECK(param->leak_report.IsEmpty());
|
CHECK(param->leak_report.IsEmpty());
|
||||||
ClassifyAllChunks(suspended_threads);
|
ClassifyAllChunks(suspended_threads);
|
||||||
CollectLeaks(¶m->leak_report);
|
ForEachChunk(CollectLeaksCb, ¶m->leak_report);
|
||||||
if (!param->leak_report.IsEmpty() && flags()->report_objects)
|
if (!param->leak_report.IsEmpty() && flags()->report_objects)
|
||||||
PrintLeaked();
|
PrintLeaked();
|
||||||
param->success = true;
|
param->success = true;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#ifndef LSAN_COMMON_H
|
#ifndef LSAN_COMMON_H
|
||||||
#define LSAN_COMMON_H
|
#define LSAN_COMMON_H
|
||||||
|
|
||||||
|
#include "sanitizer_common/sanitizer_allocator.h"
|
||||||
#include "sanitizer_common/sanitizer_common.h"
|
#include "sanitizer_common/sanitizer_common.h"
|
||||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||||
#include "sanitizer_common/sanitizer_platform.h"
|
#include "sanitizer_common/sanitizer_platform.h"
|
||||||
|
@ -105,55 +106,6 @@ void ScanRangeForPointers(uptr begin, uptr end,
|
||||||
Frontier *frontier,
|
Frontier *frontier,
|
||||||
const char *region_type, ChunkTag tag);
|
const char *region_type, ChunkTag tag);
|
||||||
|
|
||||||
// Callables for iterating over chunks. Those classes are used as template
|
|
||||||
// parameters in ForEachChunk, so we must expose them here to allow for explicit
|
|
||||||
// template instantiation.
|
|
||||||
|
|
||||||
// Identifies unreachable chunks which must be treated as reachable. Marks them
|
|
||||||
// as reachable and adds them to the frontier.
|
|
||||||
class ProcessPlatformSpecificAllocationsCb {
|
|
||||||
public:
|
|
||||||
explicit ProcessPlatformSpecificAllocationsCb(
|
|
||||||
Frontier *frontier)
|
|
||||||
: frontier_(frontier) {}
|
|
||||||
void operator()(void *p) const;
|
|
||||||
private:
|
|
||||||
Frontier *frontier_;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Prints addresses of unreachable chunks.
|
|
||||||
class PrintLeakedCb {
|
|
||||||
public:
|
|
||||||
void operator()(void *p) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Aggregates unreachable chunks into a LeakReport.
|
|
||||||
class CollectLeaksCb {
|
|
||||||
public:
|
|
||||||
explicit CollectLeaksCb(LeakReport *leak_report)
|
|
||||||
: leak_report_(leak_report) {}
|
|
||||||
void operator()(void *p) const;
|
|
||||||
private:
|
|
||||||
LeakReport *leak_report_;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Scans each leaked chunk for pointers to other leaked chunks, and marks each
|
|
||||||
// of them as indirectly leaked.
|
|
||||||
class MarkIndirectlyLeakedCb {
|
|
||||||
public:
|
|
||||||
void operator()(void *p) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Finds all chunk marked as kIgnored and adds their addresses to frontier.
|
|
||||||
class CollectIgnoredCb {
|
|
||||||
public:
|
|
||||||
explicit CollectIgnoredCb(Frontier *frontier)
|
|
||||||
: frontier_(frontier) {}
|
|
||||||
void operator()(void *p) const;
|
|
||||||
private:
|
|
||||||
Frontier *frontier_;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum IgnoreObjectResult {
|
enum IgnoreObjectResult {
|
||||||
kIgnoreObjectSuccess,
|
kIgnoreObjectSuccess,
|
||||||
kIgnoreObjectAlreadyIgnored,
|
kIgnoreObjectAlreadyIgnored,
|
||||||
|
@ -167,8 +119,8 @@ bool DisabledInThisThread();
|
||||||
|
|
||||||
// The following must be implemented in the parent tool.
|
// The following must be implemented in the parent tool.
|
||||||
|
|
||||||
template<typename Callable> void ForEachChunk(Callable const &callback);
|
void ForEachChunk(ForEachChunkCallback callback, void *arg);
|
||||||
// The address range occupied by the global allocator object.
|
// Returns the address range occupied by the global allocator object.
|
||||||
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
|
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
|
||||||
// Wrappers for allocator's ForceLock()/ForceUnlock().
|
// Wrappers for allocator's ForceLock()/ForceUnlock().
|
||||||
void LockAllocator();
|
void LockAllocator();
|
||||||
|
@ -179,18 +131,18 @@ void UnlockThreadRegistry();
|
||||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||||
uptr *tls_begin, uptr *tls_end,
|
uptr *tls_begin, uptr *tls_end,
|
||||||
uptr *cache_begin, uptr *cache_end);
|
uptr *cache_begin, uptr *cache_end);
|
||||||
// If p points into a chunk that has been allocated to the user, return its
|
// If p points into a chunk that has been allocated to the user, returns its
|
||||||
// user-visible address. Otherwise, return 0.
|
// user-visible address. Otherwise, returns 0.
|
||||||
void *PointsIntoChunk(void *p);
|
uptr PointsIntoChunk(void *p);
|
||||||
// Return address of user-visible chunk contained in this allocator chunk.
|
// Returns address of user-visible chunk contained in this allocator chunk.
|
||||||
void *GetUserBegin(void *p);
|
uptr GetUserBegin(uptr chunk);
|
||||||
// Helper for __lsan_ignore_object().
|
// Helper for __lsan_ignore_object().
|
||||||
IgnoreObjectResult IgnoreObjectLocked(const void *p);
|
IgnoreObjectResult IgnoreObjectLocked(const void *p);
|
||||||
// Wrapper for chunk metadata operations.
|
// Wrapper for chunk metadata operations.
|
||||||
class LsanMetadata {
|
class LsanMetadata {
|
||||||
public:
|
public:
|
||||||
// Constructor accepts pointer to user-visible chunk.
|
// Constructor accepts address of user-visible chunk.
|
||||||
explicit LsanMetadata(void *chunk);
|
explicit LsanMetadata(uptr chunk);
|
||||||
bool allocated() const;
|
bool allocated() const;
|
||||||
ChunkTag tag() const;
|
ChunkTag tag() const;
|
||||||
void set_tag(ChunkTag value);
|
void set_tag(ChunkTag value);
|
||||||
|
|
|
@ -53,8 +53,7 @@ void InitializePlatformSpecificModules() {
|
||||||
|
|
||||||
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
||||||
void *data) {
|
void *data) {
|
||||||
Frontier *frontier =
|
Frontier *frontier = reinterpret_cast<Frontier *>(data);
|
||||||
reinterpret_cast<Frontier *>(data);
|
|
||||||
for (uptr j = 0; j < info->dlpi_phnum; j++) {
|
for (uptr j = 0; j < info->dlpi_phnum; j++) {
|
||||||
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
|
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
|
||||||
// We're looking for .data and .bss sections, which reside in writeable,
|
// We're looking for .data and .bss sections, which reside in writeable,
|
||||||
|
@ -82,7 +81,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan global variables for heap pointers.
|
// Scans global variables for heap pointers.
|
||||||
void ProcessGlobalRegions(Frontier *frontier) {
|
void ProcessGlobalRegions(Frontier *frontier) {
|
||||||
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
|
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
|
||||||
// deadlocking by running this under StopTheWorld. However, the lock is
|
// deadlocking by running this under StopTheWorld. However, the lock is
|
||||||
|
@ -101,23 +100,26 @@ static uptr GetCallerPC(u32 stack_id) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProcessPlatformSpecificAllocationsCb::operator()(void *p) const {
|
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
|
||||||
p = GetUserBegin(p);
|
// reachable. Marks them as reachable and adds them to the frontier.
|
||||||
LsanMetadata m(p);
|
static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
|
||||||
|
CHECK(arg);
|
||||||
|
chunk = GetUserBegin(chunk);
|
||||||
|
LsanMetadata m(chunk);
|
||||||
if (m.allocated() && m.tag() != kReachable) {
|
if (m.allocated() && m.tag() != kReachable) {
|
||||||
if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) {
|
if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) {
|
||||||
m.set_tag(kReachable);
|
m.set_tag(kReachable);
|
||||||
frontier_->push_back(reinterpret_cast<uptr>(p));
|
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle dynamically allocated TLS blocks by treating all chunks allocated from
|
// Handles dynamically allocated TLS blocks by treating all chunks allocated
|
||||||
// ld-linux.so as reachable.
|
// from ld-linux.so as reachable.
|
||||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||||
if (!flags()->use_tls) return;
|
if (!flags()->use_tls) return;
|
||||||
if (!linker) return;
|
if (!linker) return;
|
||||||
ForEachChunk(ProcessPlatformSpecificAllocationsCb(frontier));
|
ForEachChunk(ProcessPlatformSpecificAllocationsCb, frontier);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __lsan
|
} // namespace __lsan
|
||||||
|
|
|
@ -279,6 +279,9 @@ struct NoOpMapUnmapCallback {
|
||||||
void OnUnmap(uptr p, uptr size) const { }
|
void OnUnmap(uptr p, uptr size) const { }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Callback type for iterating over chunks.
|
||||||
|
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
|
||||||
|
|
||||||
// SizeClassAllocator64 -- allocator for 64-bit address space.
|
// SizeClassAllocator64 -- allocator for 64-bit address space.
|
||||||
//
|
//
|
||||||
// Space: a portion of address space of kSpaceSize bytes starting at
|
// Space: a portion of address space of kSpaceSize bytes starting at
|
||||||
|
@ -433,20 +436,18 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over existing chunks. May include chunks that are not currently
|
// Iterate over all existing chunks.
|
||||||
// allocated to the user (e.g. freed).
|
// The allocator must be locked when calling this function.
|
||||||
// The caller is expected to call ForceLock() before calling this function.
|
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||||
template<typename Callable>
|
|
||||||
void ForEachChunk(const Callable &callback) {
|
|
||||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||||
RegionInfo *region = GetRegionInfo(class_id);
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
uptr chunk_size = SizeClassMap::Size(class_id);
|
uptr chunk_size = SizeClassMap::Size(class_id);
|
||||||
uptr region_beg = kSpaceBeg + class_id * kRegionSize;
|
uptr region_beg = kSpaceBeg + class_id * kRegionSize;
|
||||||
for (uptr p = region_beg;
|
for (uptr chunk = region_beg;
|
||||||
p < region_beg + region->allocated_user;
|
chunk < region_beg + region->allocated_user;
|
||||||
p += chunk_size) {
|
chunk += chunk_size) {
|
||||||
// Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
|
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
|
||||||
callback((void *)p);
|
callback(chunk, arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -726,21 +727,19 @@ class SizeClassAllocator32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over existing chunks. May include chunks that are not currently
|
// Iterate over all existing chunks.
|
||||||
// allocated to the user (e.g. freed).
|
// The allocator must be locked when calling this function.
|
||||||
// The caller is expected to call ForceLock() before calling this function.
|
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||||
template<typename Callable>
|
|
||||||
void ForEachChunk(const Callable &callback) {
|
|
||||||
for (uptr region = 0; region < kNumPossibleRegions; region++)
|
for (uptr region = 0; region < kNumPossibleRegions; region++)
|
||||||
if (possible_regions[region]) {
|
if (possible_regions[region]) {
|
||||||
uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
|
uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
|
||||||
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
|
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
|
||||||
uptr region_beg = region * kRegionSize;
|
uptr region_beg = region * kRegionSize;
|
||||||
for (uptr p = region_beg;
|
for (uptr chunk = region_beg;
|
||||||
p < region_beg + max_chunks_in_region * chunk_size;
|
chunk < region_beg + max_chunks_in_region * chunk_size;
|
||||||
p += chunk_size) {
|
chunk += chunk_size) {
|
||||||
// Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
|
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
|
||||||
callback((void *)p);
|
callback(chunk, arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1108,13 +1107,11 @@ class LargeMmapAllocator {
|
||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over existing chunks. May include chunks that are not currently
|
// Iterate over all existing chunks.
|
||||||
// allocated to the user (e.g. freed).
|
// The allocator must be locked when calling this function.
|
||||||
// The caller is expected to call ForceLock() before calling this function.
|
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||||
template<typename Callable>
|
|
||||||
void ForEachChunk(const Callable &callback) {
|
|
||||||
for (uptr i = 0; i < n_chunks_; i++)
|
for (uptr i = 0; i < n_chunks_; i++)
|
||||||
callback(GetUser(chunks_[i]));
|
callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -1290,13 +1287,11 @@ class CombinedAllocator {
|
||||||
primary_.ForceUnlock();
|
primary_.ForceUnlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over existing chunks. May include chunks that are not currently
|
// Iterate over all existing chunks.
|
||||||
// allocated to the user (e.g. freed).
|
// The allocator must be locked when calling this function.
|
||||||
// The caller is expected to call ForceLock() before calling this function.
|
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||||
template<typename Callable>
|
primary_.ForEachChunk(callback, arg);
|
||||||
void ForEachChunk(const Callable &callback) {
|
secondary_.ForEachChunk(callback, arg);
|
||||||
primary_.ForEachChunk(callback);
|
|
||||||
secondary_.ForEachChunk(callback);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -635,15 +635,8 @@ TEST(Allocator, ScopedBuffer) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class IterationTestCallback {
|
void IterationTestCallback(uptr chunk, void *arg) {
|
||||||
public:
|
reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
|
||||||
explicit IterationTestCallback(std::set<void *> *chunks)
|
|
||||||
: chunks_(chunks) {}
|
|
||||||
void operator()(void *chunk) const {
|
|
||||||
chunks_->insert(chunk);
|
|
||||||
}
|
|
||||||
private:
|
|
||||||
std::set<void *> *chunks_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class Allocator>
|
template <class Allocator>
|
||||||
|
@ -673,15 +666,15 @@ void TestSizeClassAllocatorIteration() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<void *> reported_chunks;
|
std::set<uptr> reported_chunks;
|
||||||
IterationTestCallback callback(&reported_chunks);
|
|
||||||
a->ForceLock();
|
a->ForceLock();
|
||||||
a->ForEachChunk(callback);
|
a->ForEachChunk(IterationTestCallback, &reported_chunks);
|
||||||
a->ForceUnlock();
|
a->ForceUnlock();
|
||||||
|
|
||||||
for (uptr i = 0; i < allocated.size(); i++) {
|
for (uptr i = 0; i < allocated.size(); i++) {
|
||||||
// Don't use EXPECT_NE. Reporting the first mismatch is enough.
|
// Don't use EXPECT_NE. Reporting the first mismatch is enough.
|
||||||
ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
|
ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
|
||||||
|
reported_chunks.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
a->TestOnlyUnmap();
|
a->TestOnlyUnmap();
|
||||||
|
@ -711,15 +704,15 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
|
||||||
for (uptr i = 0; i < kNumAllocs; i++)
|
for (uptr i = 0; i < kNumAllocs; i++)
|
||||||
allocated[i] = (char *)a.Allocate(&stats, size, 1);
|
allocated[i] = (char *)a.Allocate(&stats, size, 1);
|
||||||
|
|
||||||
std::set<void *> reported_chunks;
|
std::set<uptr> reported_chunks;
|
||||||
IterationTestCallback callback(&reported_chunks);
|
|
||||||
a.ForceLock();
|
a.ForceLock();
|
||||||
a.ForEachChunk(callback);
|
a.ForEachChunk(IterationTestCallback, &reported_chunks);
|
||||||
a.ForceUnlock();
|
a.ForceUnlock();
|
||||||
|
|
||||||
for (uptr i = 0; i < kNumAllocs; i++) {
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
||||||
// Don't use EXPECT_NE. Reporting the first mismatch is enough.
|
// Don't use EXPECT_NE. Reporting the first mismatch is enough.
|
||||||
ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
|
ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
|
||||||
|
reported_chunks.end());
|
||||||
}
|
}
|
||||||
for (uptr i = 0; i < kNumAllocs; i++)
|
for (uptr i = 0; i < kNumAllocs; i++)
|
||||||
a.Deallocate(&stats, allocated[i]);
|
a.Deallocate(&stats, allocated[i]);
|
||||||
|
|
Loading…
Reference in New Issue