[NFC][lsan] Clang-format lsan_common.cpp

This commit is contained in:
Vitaly Buka 2021-12-07 21:34:06 -08:00
parent e587372f85
commit b79ea567cf
1 changed files with 110 additions and 92 deletions

View File

@ -34,7 +34,6 @@ Mutex global_mutex;
Flags lsan_flags;
void DisableCounterUnderflow() {
if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
@ -43,27 +42,29 @@ void DisableCounterUnderflow() {
}
void Flags::SetDefaults() {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "lsan_flags.inc"
#undef LSAN_FLAG
# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
# include "lsan_flags.inc"
# undef LSAN_FLAG
}
void RegisterLsanFlags(FlagParser *parser, Flags *f) {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "lsan_flags.inc"
#undef LSAN_FLAG
# define LSAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
# include "lsan_flags.inc"
# undef LSAN_FLAG
}
#define LOG_POINTERS(...) \
do { \
if (flags()->log_pointers) Report(__VA_ARGS__); \
} while (0)
# define LOG_POINTERS(...) \
do { \
if (flags()->log_pointers) \
Report(__VA_ARGS__); \
} while (0)
#define LOG_THREADS(...) \
do { \
if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0)
# define LOG_THREADS(...) \
do { \
if (flags()->log_threads) \
Report(__VA_ARGS__); \
} while (0)
class LeakSuppressionContext {
bool parsed = false;
@ -95,17 +96,17 @@ class LeakSuppressionContext {
ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
static LeakSuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
static const char *kSuppressionTypes[] = {kSuppressionLeak};
static const char kStdSuppressions[] =
#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
// For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
// definition.
"leak:*pthread_exit*\n"
#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
#if SANITIZER_MAC
# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
# if SANITIZER_MAC
// For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
"leak:*_os_trace*\n"
#endif
# endif
// TLS leak in some glibc versions, described in
// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
"leak:*tls_get_addr*\n";
@ -146,9 +147,9 @@ void InitCommonLsan() {
}
}
class Decorator: public __sanitizer::SanitizerCommonDecorator {
class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
Decorator() : SanitizerCommonDecorator() {}
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
};
@ -157,19 +158,19 @@ static inline bool CanBeAHeapPointer(uptr p) {
// Since our heap is located in mmap-ed memory, we can assume a sensible lower
// bound on heap addresses.
const uptr kMinAddress = 4 * 4096;
if (p < kMinAddress) return false;
#if defined(__x86_64__)
if (p < kMinAddress)
return false;
# if defined(__x86_64__)
// Accept only canonical form user-space addresses.
return ((p >> 47) == 0);
#elif defined(__mips64)
# elif defined(__mips64)
return ((p >> 40) == 0);
#elif defined(__aarch64__)
unsigned runtimeVMA =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
# elif defined(__aarch64__)
unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
return ((p >> runtimeVMA) == 0);
#else
# else
return true;
#endif
# endif
}
// Scans the memory range, looking for byte patterns that point into allocator
@ -178,8 +179,7 @@ static inline bool CanBeAHeapPointer(uptr p) {
// (|tag| = kReachable) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0.
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
const char *region_type, ChunkTag tag) {
CHECK(tag == kReachable || tag == kIndirectlyLeaked);
const uptr alignment = flags()->pointer_alignment();
@ -190,13 +190,17 @@ void ScanRangeForPointers(uptr begin, uptr end,
pp = pp + alignment - pp % alignment;
for (; pp + sizeof(void *) <= end; pp += alignment) {
void *p = *reinterpret_cast<void **>(pp);
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p)))
continue;
uptr chunk = PointsIntoChunk(p);
if (!chunk) continue;
if (!chunk)
continue;
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
if (chunk == begin) continue;
if (chunk == begin)
continue;
LsanMetadata m(chunk);
if (m.tag() == kReachable || m.tag() == kIgnored) continue;
if (m.tag() == kReachable || m.tag() == kIgnored)
continue;
// Do this check relatively late so we can log only the interesting cases.
if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
@ -234,23 +238,23 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
}
}
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
}
#if SANITIZER_FUCHSIA
# if SANITIZER_FUCHSIA
// Fuchsia handles all threads together with its own callback.
static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
#else
# else
#if SANITIZER_ANDROID
# if SANITIZER_ANDROID
// FIXME: Move this out into *libcdep.cpp
extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
pid_t, void (*cb)(void *, void *, uptr, void *), void *);
#endif
# endif
static void ProcessThreadRegistry(Frontier *frontier) {
InternalMmapVector<uptr> ptrs;
@ -282,9 +286,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
LOG_THREADS("Processing thread %llu.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
&cache_begin, &cache_end, &dtls);
bool thread_found =
GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
&tls_end, &cache_begin, &cache_end, &dtls);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
@ -298,7 +302,8 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Report("Unable to get registers from thread %llu.\n", os_id);
// If unable to get SP, consider the entire stack to be reachable unless
// GetRegistersAndSP failed with ESRCH.
if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
continue;
sp = stack_begin;
}
@ -353,7 +358,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
#if SANITIZER_ANDROID
# if SANITIZER_ANDROID
auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
void *arg) -> void {
ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
@ -366,7 +371,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
// thread is suspended in the middle of updating its DTLS. IOWs, we
// could scan already freed memory. (probably fine for now)
__libc_iterate_dynamic_tls(os_id, cb, frontier);
#else
# else
if (dtls && !DTLSInDestruction(dtls)) {
ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
uptr dtls_beg = dtv.beg;
@ -383,7 +388,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
// this and continue.
LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
}
#endif
# endif
}
}
@ -391,13 +396,14 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
ProcessThreadRegistry(frontier);
}
#endif // SANITIZER_FUCHSIA
# endif // SANITIZER_FUCHSIA
void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
uptr region_begin, uptr region_end, bool is_readable) {
uptr intersection_begin = Max(root_region.begin, region_begin);
uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
if (intersection_begin >= intersection_end) return;
if (intersection_begin >= intersection_end)
return;
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
(void *)root_region.begin,
(void *)(root_region.begin + root_region.size),
@ -420,7 +426,8 @@ static void ProcessRootRegion(Frontier *frontier,
// Scans root regions for heap pointers.
static void ProcessRootRegions(Frontier *frontier) {
if (!flags()->use_root_regions) return;
if (!flags()->use_root_regions)
return;
for (uptr i = 0; i < root_regions.size(); i++)
ProcessRootRegion(frontier, root_regions[i]);
}
@ -586,7 +593,8 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (!m.allocated()) return;
if (!m.allocated())
return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
m.tag());
@ -622,13 +630,13 @@ static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
}
}
#if SANITIZER_FUCHSIA
# if SANITIZER_FUCHSIA
// Fuchsia provides a libc interface that guarantees all threads are
// covered, and SuspendedThreadList is never really used.
static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
#else // !SANITIZER_FUCHSIA
# else // !SANITIZER_FUCHSIA
static void ReportUnsuspendedThreads(
const SuspendedThreadsList &suspended_threads) {
@ -642,7 +650,7 @@ static void ReportUnsuspendedThreads(
&ReportIfNotSuspended, &threads);
}
#endif // !SANITIZER_FUCHSIA
# endif // !SANITIZER_FUCHSIA
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
void *arg) {
@ -726,10 +734,12 @@ bool HasReportedLeaks() { return has_reported_leaks; }
void DoLeakCheck() {
Lock l(&global_mutex);
static bool already_done;
if (already_done) return;
if (already_done)
return;
already_done = true;
has_reported_leaks = CheckForLeaks();
if (has_reported_leaks) HandleLeaks();
if (has_reported_leaks)
HandleLeaks();
}
static int DoRecoverableLeakCheck() {
@ -806,9 +816,10 @@ void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
}
}
if (i == leaks_.size()) {
if (leaks_.size() == kMaxLeaksConsidered) return;
Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
is_directly_leaked, /* is_suppressed */ false };
if (leaks_.size() == kMaxLeaksConsidered)
return;
Leak leak = {next_id_++, /* hit_count */ 1, leaked_size,
stack_trace_id, is_directly_leaked, /* is_suppressed */ false};
leaks_.push_back(leak);
}
if (flags()->report_objects) {
@ -828,9 +839,10 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
Printf("Too many leaks! Only the first %zu leaks encountered will be "
"reported.\n",
kMaxLeaksConsidered);
Printf(
"Too many leaks! Only the first %zu leaks encountered will be "
"reported.\n",
kMaxLeaksConsidered);
uptr unsuppressed_count = UnsuppressedLeakCount();
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
@ -838,10 +850,12 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
Sort(leaks_.data(), leaks_.size(), &LeakComparator);
uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
if (leaks_[i].is_suppressed)
continue;
PrintReportForLeak(i);
leaks_reported++;
if (leaks_reported == num_leaks_to_report) break;
if (leaks_reported == num_leaks_to_report)
break;
}
if (leaks_reported < unsuppressed_count) {
uptr remaining = unsuppressed_count - leaks_reported;
@ -880,9 +894,10 @@ void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
if (leaks_[i].is_suppressed)
continue;
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
}
InternalScopedString summary;
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
@ -899,7 +914,7 @@ uptr LeakReport::ApplySuppressions() {
if (s) {
s->weight += leaks_[i].total_size;
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
leaks_[i].hit_count);
leaks_[i].hit_count);
leaks_[i].is_suppressed = true;
++new_suppressions;
}
@ -910,7 +925,8 @@ uptr LeakReport::ApplySuppressions() {
uptr LeakReport::UnsuppressedLeakCount() {
uptr result = 0;
for (uptr i = 0; i < leaks_.size(); i++)
if (!leaks_[i].is_suppressed) result++;
if (!leaks_[i].is_suppressed)
result++;
return result;
}
@ -922,16 +938,16 @@ uptr LeakReport::IndirectUnsuppressedLeakCount() {
return result;
}
} // namespace __lsan
#else // CAN_SANITIZE_LEAKS
} // namespace __lsan
#else // CAN_SANITIZE_LEAKS
namespace __lsan {
void InitCommonLsan() { }
void DoLeakCheck() { }
void DoRecoverableLeakCheckVoid() { }
void DisableInThisThread() { }
void EnableInThisThread() { }
}
#endif // CAN_SANITIZE_LEAKS
void InitCommonLsan() {}
void DoLeakCheck() {}
void DoRecoverableLeakCheckVoid() {}
void DisableInThisThread() {}
void EnableInThisThread() {}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS
using namespace __lsan;
@ -948,11 +964,13 @@ void __lsan_ignore_object(const void *p) {
if (res == kIgnoreObjectInvalid)
VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
if (res == kIgnoreObjectAlreadyIgnored)
VReport(1, "__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p);
VReport(1,
"__lsan_ignore_object(): "
"heap object at %p is already being ignored\n",
p);
if (res == kIgnoreObjectSuccess)
VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
#endif // CAN_SANITIZE_LEAKS
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@ -962,7 +980,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
RootRegion region = {reinterpret_cast<uptr>(begin), size};
root_regions.push_back(region);
VReport(1, "Registered root region at %p of size %zu\n", begin, size);
#endif // CAN_SANITIZE_LEAKS
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@ -988,7 +1006,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
begin, size);
Die();
}
#endif // CAN_SANITIZE_LEAKS
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@ -1010,7 +1028,7 @@ void __lsan_do_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
__lsan::DoLeakCheck();
#endif // CAN_SANITIZE_LEAKS
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@ -1018,7 +1036,7 @@ int __lsan_do_recoverable_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
return __lsan::DoRecoverableLeakCheck();
#endif // CAN_SANITIZE_LEAKS
#endif // CAN_SANITIZE_LEAKS
return 0;
}
@ -1027,14 +1045,14 @@ SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
__lsan_is_turned_off() {
return 0;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_suppressions() {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
__lsan_default_suppressions() {
return "";
}
#endif
} // extern "C"
} // extern "C"