[lsan] Ignore inderect leaks referenced by suppressed blocks

This makes suppression list to work similar to __lsan_ignore_object.

Existing behavior was inconsistent and very inconvenient for complex
data structures.

Example:

struct B;
struct A { B* ptr; };
A* t = makeA();
t->ptr = makeB();

Before the patch: if makeA suppressed by suppression file, lsan will
still report the makeB() leak, so we need two suppressions.

After the patch: a single makeA suppression is enough (the same as a
single __lsan_ignore_object(t)).

Differential Revision: https://reviews.llvm.org/D93884
This commit is contained in:
Vitaly Buka 2020-12-28 19:19:38 -08:00
parent 6be0b9a8dd
commit 9a0237011b
3 changed files with 91 additions and 23 deletions

View File

@ -68,9 +68,11 @@ void RegisterLsanFlags(FlagParser *parser, Flags *f) {
class LeakSuppressionContext {
bool parsed = false;
SuppressionContext context;
bool suppressed_stacks_sorted = true;
InternalMmapVector<u32> suppressed_stacks;
Suppression *GetSuppressionForAddr(uptr addr);
void LazyParse();
void LazyInit();
public:
LeakSuppressionContext(const char *supprression_types[],
@ -78,6 +80,14 @@ class LeakSuppressionContext {
: context(supprression_types, suppression_types_num) {}
Suppression *GetSuppressionForStack(u32 stack_trace_id);
const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
if (!suppressed_stacks_sorted) {
suppressed_stacks_sorted = true;
SortAndDedup(suppressed_stacks);
}
return suppressed_stacks;
}
void PrintMatchedSuppressions();
};
@ -105,7 +115,7 @@ void InitializeSuppressions() {
LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
}
void LeakSuppressionContext::LazyParse() {
void LeakSuppressionContext::LazyInit() {
if (!parsed) {
parsed = true;
context.ParseFromFile(flags()->suppressions);
@ -412,6 +422,24 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
}
}
static void IgnoredSuppressedCb(uptr chunk, void *arg) {
CHECK(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (!m.allocated() || m.tag() == kIgnored)
return;
const InternalMmapVector<u32> &suppressed =
*static_cast<const InternalMmapVector<u32> *>(arg);
uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
return;
LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", chunk,
chunk + m.requested_size(), m.requested_size());
m.set_tag(kIgnored);
}
// ForEachChunk callback. If chunk is marked as ignored, adds its address to
// frontier.
static void CollectIgnoredCb(uptr chunk, void *arg) {
@ -495,6 +523,12 @@ void ProcessPC(Frontier *frontier) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
const InternalMmapVector<u32> &suppressed_stacks =
GetSuppressionContext()->GetSortedSuppressedStacks();
if (!suppressed_stacks.empty()) {
ForEachChunk(IgnoredSuppressedCb,
const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
}
ForEachChunk(CollectIgnoredCb, frontier);
ProcessGlobalRegions(frontier);
ProcessThreads(suspended_threads, frontier);
@ -643,21 +677,41 @@ static bool PrintResults(LeakReport &report) {
static bool CheckForLeaks() {
if (&__lsan_is_turned_off && __lsan_is_turned_off())
return false;
EnsureMainThreadIDIsCorrect();
CheckForLeaksParam param;
LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
// Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
// suppressions. However if a stack id was previously suppressed, it should be
// suppressed in future checks as well.
for (int i = 0;; ++i) {
EnsureMainThreadIDIsCorrect();
CheckForLeaksParam param;
LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
Report(
"HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
"etc)\n");
Die();
}
// No new suppressions stacks, so rerun will not help and we can report.
if (!param.leak_report.ApplySuppressions())
return PrintResults(param.leak_report);
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
Report(
"HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
Die();
// No indirect leaks to report, so we are done here.
if (!param.leak_report.IndirectUnsuppressedLeakCount())
return PrintResults(param.leak_report);
if (i >= 8) {
Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
return PrintResults(param.leak_report);
}
// We found a new previously unseen suppressed call stack. Rerun to make
// sure it does not hold indirect leaks.
VReport(1, "Rerun with %zu suppressed stacks.",
GetSuppressionContext()->GetSortedSuppressedStacks().size());
}
param.leak_report.ApplySuppressions();
return PrintResults(param.leak_report);
}
static bool has_reported_leaks = false;
@ -703,13 +757,16 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
Suppression *LeakSuppressionContext::GetSuppressionForStack(
u32 stack_trace_id) {
LazyParse();
LazyInit();
StackTrace stack = StackDepotGet(stack_trace_id);
for (uptr i = 0; i < stack.size; i++) {
Suppression *s = GetSuppressionForAddr(
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
if (s)
if (s) {
suppressed_stacks_sorted = false;
suppressed_stacks.push_back(stack_trace_id);
return s;
}
}
return nullptr;
}
@ -820,8 +877,9 @@ void LeakReport::PrintSummary() {
ReportErrorSummary(summary.data());
}
void LeakReport::ApplySuppressions() {
uptr LeakReport::ApplySuppressions() {
LeakSuppressionContext *suppressions = GetSuppressionContext();
uptr new_suppressions = false;
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s =
suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id);
@ -830,8 +888,10 @@ void LeakReport::ApplySuppressions() {
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
leaks_[i].hit_count);
leaks_[i].is_suppressed = true;
++new_suppressions;
}
}
return new_suppressions;
}
uptr LeakReport::UnsuppressedLeakCount() {
@ -841,6 +901,14 @@ uptr LeakReport::UnsuppressedLeakCount() {
return result;
}
uptr LeakReport::IndirectUnsuppressedLeakCount() {
uptr result = 0;
for (uptr i = 0; i < leaks_.size(); i++)
if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
result++;
return result;
}
} // namespace __lsan
#else // CAN_SANITIZE_LEAKS
namespace __lsan {

View File

@ -103,8 +103,9 @@ class LeakReport {
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
void ApplySuppressions();
uptr ApplySuppressions();
uptr UnsuppressedLeakCount();
uptr IndirectUnsuppressedLeakCount();
private:
void PrintReportForLeak(uptr index);

View File

@ -17,15 +17,14 @@
#include <stdio.h>
#include <stdlib.h>
void* LSanTestLeakingFunc() {
void *LSanTestLeakingFunc() {
void *p = malloc(666);
fprintf(stderr, "Test alloc: %p.\n", p);
return p;
}
void LSanTestUnsuppressedLeakingFunc() {
void** p = (void**)LSanTestLeakingFunc();
// FIXME: This must be suppressed as well.
void **p = (void **)LSanTestLeakingFunc();
*p = malloc(777);
fprintf(stderr, "Test alloc: %p.\n", *p);
}
@ -38,6 +37,6 @@ int main() {
}
// CHECK: Suppressions used:
// CHECK: 1 666 *LSanTestLeakingFunc*
// CHECK: SUMMARY: {{(Leak|Address)}}Sanitizer: 2114 byte(s) leaked in 2 allocation(s)
// CHECK: SUMMARY: {{(Leak|Address)}}Sanitizer: 1337 byte(s) leaked in 1 allocation(s)
// NOSUPP: SUMMARY: {{(Leak|Address)}}Sanitizer: 2780 byte(s) leaked in 3 allocation(s).