From 4e0215a71cbc2af0796e4f02415739247c8fa28b Mon Sep 17 00:00:00 2001 From: Sergey Matveev Date: Mon, 24 Jun 2013 08:34:50 +0000 Subject: [PATCH] Revert to C-style callbacks for iteration over allocator chunks. Also clean up LSan code, fix some comments and replace void* with uptr to bring down the number of reinterpret_casts. llvm-svn: 184700 --- compiler-rt/lib/asan/asan_allocator2.cc | 29 ++----- .../TestCases/disabler_in_tsd_destructor.cc | 2 +- compiler-rt/lib/lsan/lsan_allocator.cc | 39 ++++----- compiler-rt/lib/lsan/lsan_common.cc | 87 ++++++++++--------- compiler-rt/lib/lsan/lsan_common.h | 68 +++------------ compiler-rt/lib/lsan/lsan_common_linux.cc | 22 ++--- .../sanitizer_common/sanitizer_allocator.h | 61 ++++++------- .../tests/sanitizer_allocator_test.cc | 27 +++--- 8 files changed, 129 insertions(+), 206 deletions(-) diff --git a/compiler-rt/lib/asan/asan_allocator2.cc b/compiler-rt/lib/asan/asan_allocator2.cc index 6096998a6a7c..63d6adaece31 100644 --- a/compiler-rt/lib/asan/asan_allocator2.cc +++ b/compiler-rt/lib/asan/asan_allocator2.cc @@ -718,26 +718,25 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) { *end = *begin + sizeof(__asan::allocator); } -void *PointsIntoChunk(void* p) { +uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); if (!m) return 0; uptr chunk = m->Beg(); if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) - return reinterpret_cast(chunk); + return chunk; return 0; } -void *GetUserBegin(void *p) { +uptr GetUserBegin(uptr chunk) { __asan::AsanChunk *m = - __asan::GetAsanChunkByAddrFastLocked(reinterpret_cast(p)); + __asan::GetAsanChunkByAddrFastLocked(chunk); CHECK(m); - return reinterpret_cast(m->Beg()); + return m->Beg(); } -LsanMetadata::LsanMetadata(void *chunk) { - uptr addr = reinterpret_cast(chunk); - metadata_ = reinterpret_cast(addr - __asan::kChunkHeaderSize); +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = reinterpret_cast(chunk - __asan::kChunkHeaderSize); } bool LsanMetadata::allocated() const { @@ -765,19 +764,9 @@ u32 LsanMetadata::stack_trace_id() const { return m->alloc_context_id; } -template void ForEachChunk(Callable const &callback) { - __asan::allocator.ForEachChunk(callback); +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __asan::allocator.ForEachChunk(callback, arg); } -#if CAN_SANITIZE_LEAKS -template void ForEachChunk( - ProcessPlatformSpecificAllocationsCb const &callback); -template void ForEachChunk(PrintLeakedCb const &callback); -template void ForEachChunk(CollectLeaksCb const &callback); -template void ForEachChunk( - MarkIndirectlyLeakedCb const &callback); -template void ForEachChunk( - CollectIgnoredCb const &callback); -#endif // CAN_SANITIZE_LEAKS IgnoreObjectResult IgnoreObjectLocked(const void *p) { uptr addr = reinterpret_cast(p); diff --git a/compiler-rt/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc b/compiler-rt/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc index 2081c6134d81..94e4fc390b3b 100644 --- a/compiler-rt/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc +++ b/compiler-rt/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc @@ -12,7 +12,7 @@ pthread_key_t key; -void key_destructor(void *) { +void key_destructor(void *arg) { __lsan::ScopedDisabler d; void *p = malloc(1337); // Break optimization. diff --git a/compiler-rt/lib/lsan/lsan_allocator.cc b/compiler-rt/lib/lsan/lsan_allocator.cc index 2bdb4a2b005f..1512c2e85f25 100644 --- a/compiler-rt/lib/lsan/lsan_allocator.cc +++ b/compiler-rt/lib/lsan/lsan_allocator.cc @@ -52,7 +52,7 @@ void AllocatorThreadFinish() { } static ChunkMetadata *Metadata(void *p) { - return (ChunkMetadata *)allocator.GetMetaData(p); + return reinterpret_cast(allocator.GetMetaData(p)); } static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { @@ -62,14 +62,14 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; m->stack_trace_id = StackDepotPut(stack.trace, stack.size); m->requested_size = size; - atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed); + atomic_store(reinterpret_cast(m), 1, memory_order_relaxed); } static void RegisterDeallocation(void *p) { if (!p) return; ChunkMetadata *m = Metadata(p); CHECK(m); - atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed); + atomic_store(reinterpret_cast(m), 0, memory_order_relaxed); } void *Allocate(const StackTrace &stack, uptr size, uptr alignment, @@ -129,25 +129,26 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) { *end = *begin + sizeof(allocator); } -void *PointsIntoChunk(void* p) { - void *chunk = allocator.GetBlockBeginFastLocked(p); +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast(p); + uptr chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); if (!chunk) return 0; // LargeMmapAllocator considers pointers to the meta-region of a chunk to be // valid, but we don't want that. - if (p < chunk) return 0; - ChunkMetadata *m = Metadata(chunk); + if (addr < chunk) return 0; + ChunkMetadata *m = Metadata(reinterpret_cast(chunk)); CHECK(m); - if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) + if (m->allocated && addr < chunk + m->requested_size) return chunk; return 0; } -void *GetUserBegin(void *p) { - return p; +uptr GetUserBegin(uptr chunk) { + return chunk; } -LsanMetadata::LsanMetadata(void *chunk) { - metadata_ = Metadata(chunk); +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = Metadata(reinterpret_cast(chunk)); CHECK(metadata_); } @@ -171,20 +172,10 @@ u32 LsanMetadata::stack_trace_id() const { return reinterpret_cast(metadata_)->stack_trace_id; } -template -void ForEachChunk(Callable const &callback) { - allocator.ForEachChunk(callback); +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + allocator.ForEachChunk(callback, arg); } -template void ForEachChunk( - ProcessPlatformSpecificAllocationsCb const &callback); -template void ForEachChunk(PrintLeakedCb const &callback); -template void ForEachChunk(CollectLeaksCb const &callback); -template void ForEachChunk( - MarkIndirectlyLeakedCb const &callback); -template void ForEachChunk( - CollectIgnoredCb const &callback); - IgnoreObjectResult IgnoreObjectLocked(const void *p) { void *chunk = allocator.GetBlockBegin(p); if (!chunk || p < chunk) return kIgnoreObjectInvalid; diff --git a/compiler-rt/lib/lsan/lsan_common.cc b/compiler-rt/lib/lsan/lsan_common.cc index ed1adb23a6bd..5e936cdf4768 100644 --- a/compiler-rt/lib/lsan/lsan_common.cc +++ b/compiler-rt/lib/lsan/lsan_common.cc @@ -23,7 +23,7 @@ #if CAN_SANITIZE_LEAKS namespace __lsan { -// This mutex is used to prevent races between DoLeakCheck and SuppressObject. +// This mutex is used to prevent races between DoLeakCheck and IgnoreObject. BlockingMutex global_mutex(LINKER_INITIALIZED); THREADLOCAL int disable_counter; @@ -84,12 +84,12 @@ static inline bool CanBeAHeapPointer(uptr p) { #endif } -// Scan the memory range, looking for byte patterns that point into allocator -// chunks. Mark those chunks with tag and add them to the frontier. -// There are two usage modes for this function: finding reachable or ignored -// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks -// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, -// so frontier = 0. +// Scans the memory range, looking for byte patterns that point into allocator +// chunks. Marks those chunks with |tag| and adds them to |frontier|. +// There are two usage modes for this function: finding reachable or ignored +// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks +// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, +// so |frontier| = 0. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag) { @@ -99,10 +99,10 @@ void ScanRangeForPointers(uptr begin, uptr end, uptr pp = begin; if (pp % alignment) pp = pp + alignment - pp % alignment; - for (; pp + sizeof(void *) <= end; pp += alignment) { + for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT void *p = *reinterpret_cast(pp); if (!CanBeAHeapPointer(reinterpret_cast(p))) continue; - void *chunk = PointsIntoChunk(p); + uptr chunk = PointsIntoChunk(p); if (!chunk) continue; LsanMetadata m(chunk); // Reachable beats ignored beats leaked. @@ -111,14 +111,13 @@ void ScanRangeForPointers(uptr begin, uptr end, m.set_tag(tag); if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, - chunk, reinterpret_cast(chunk) + m.requested_size(), - m.requested_size()); + chunk, chunk + m.requested_size(), m.requested_size()); if (frontier) - frontier->push_back(reinterpret_cast(chunk)); + frontier->push_back(chunk); } } -// Scan thread data (stacks and TLS) for heap pointers. +// Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { InternalScopedBuffer registers(SuspendedThreadsList::RegisterCount()); @@ -191,31 +190,34 @@ static void FloodFillTag(Frontier *frontier, ChunkTag tag) { while (frontier->size()) { uptr next_chunk = frontier->back(); frontier->pop_back(); - LsanMetadata m(reinterpret_cast(next_chunk)); + LsanMetadata m(next_chunk); ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, "HEAP", tag); } } -// Mark leaked chunks which are reachable from other leaked chunks. -void MarkIndirectlyLeakedCb::operator()(void *p) const { - p = GetUserBegin(p); - LsanMetadata m(p); +// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks +// which are reachable from it as indirectly leaked. +static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { - ScanRangeForPointers(reinterpret_cast(p), - reinterpret_cast(p) + m.requested_size(), + ScanRangeForPointers(chunk, chunk + m.requested_size(), /* frontier */ 0, "HEAP", kIndirectlyLeaked); } } -void CollectIgnoredCb::operator()(void *p) const { - p = GetUserBegin(p); - LsanMetadata m(p); +// ForEachChunk callback. If chunk is marked as ignored, adds its address to +// frontier. +static void CollectIgnoredCb(uptr chunk, void *arg) { + CHECK(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); if (m.allocated() && m.tag() == kIgnored) - frontier_->push_back(reinterpret_cast(p)); + reinterpret_cast(arg)->push_back(chunk); } -// Set the appropriate tag on each chunk. +// Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. Frontier frontier(GetPageSizeCached()); @@ -233,14 +235,14 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { if (flags()->log_pointers) Report("Scanning ignored chunks.\n"); CHECK_EQ(0, frontier.size()); - ForEachChunk(CollectIgnoredCb(&frontier)); + ForEachChunk(CollectIgnoredCb, &frontier); FloodFillTag(&frontier, kIgnored); // Iterate over leaked chunks and mark those that are reachable from other // leaked chunks. if (flags()->log_pointers) Report("Scanning leaked chunks.\n"); - ForEachChunk(MarkIndirectlyLeakedCb()); + ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */); } static void PrintStackTraceById(u32 stack_trace_id) { @@ -251,9 +253,12 @@ static void PrintStackTraceById(u32 stack_trace_id) { common_flags()->strip_path_prefix, 0); } -void CollectLeaksCb::operator()(void *p) const { - p = GetUserBegin(p); - LsanMetadata m(p); +// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport. +static void CollectLeaksCb(uptr chunk, void *arg) { + CHECK(arg); + LeakReport *leak_report = reinterpret_cast(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); if (!m.allocated()) return; if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { uptr resolution = flags()->resolution; @@ -261,33 +266,29 @@ void CollectLeaksCb::operator()(void *p) const { uptr size = 0; const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); size = Min(size, resolution); - leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), - m.tag()); + leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag()); } else { - leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); + leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag()); } } } -static void CollectLeaks(LeakReport *leak_report) { - ForEachChunk(CollectLeaksCb(leak_report)); -} - -void PrintLeakedCb::operator()(void *p) const { - p = GetUserBegin(p); - LsanMetadata m(p); +// ForEachChunkCallback. Prints addresses of unreachable chunks. +static void PrintLeakedCb(uptr chunk, void *arg) { + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); if (!m.allocated()) return; if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { Printf("%s leaked %zu byte object at %p.\n", m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", - m.requested_size(), p); + m.requested_size(), chunk); } } static void PrintLeaked() { Printf("\n"); Printf("Reporting individual objects:\n"); - ForEachChunk(PrintLeakedCb()); + ForEachChunk(PrintLeakedCb, 0 /* arg */); } struct DoLeakCheckParam { @@ -302,7 +303,7 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, CHECK(!param->success); CHECK(param->leak_report.IsEmpty()); ClassifyAllChunks(suspended_threads); - CollectLeaks(¶m->leak_report); + ForEachChunk(CollectLeaksCb, ¶m->leak_report); if (!param->leak_report.IsEmpty() && flags()->report_objects) PrintLeaked(); param->success = true; diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h index 0d90acf065c0..e5f0a22d51b7 100644 --- a/compiler-rt/lib/lsan/lsan_common.h +++ b/compiler-rt/lib/lsan/lsan_common.h @@ -15,6 +15,7 @@ #ifndef LSAN_COMMON_H #define LSAN_COMMON_H +#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_platform.h" @@ -105,55 +106,6 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag); -// Callables for iterating over chunks. Those classes are used as template -// parameters in ForEachChunk, so we must expose them here to allow for explicit -// template instantiation. - -// Identifies unreachable chunks which must be treated as reachable. Marks them -// as reachable and adds them to the frontier. -class ProcessPlatformSpecificAllocationsCb { - public: - explicit ProcessPlatformSpecificAllocationsCb( - Frontier *frontier) - : frontier_(frontier) {} - void operator()(void *p) const; - private: - Frontier *frontier_; -}; - -// Prints addresses of unreachable chunks. -class PrintLeakedCb { - public: - void operator()(void *p) const; -}; - -// Aggregates unreachable chunks into a LeakReport. -class CollectLeaksCb { - public: - explicit CollectLeaksCb(LeakReport *leak_report) - : leak_report_(leak_report) {} - void operator()(void *p) const; - private: - LeakReport *leak_report_; -}; - -// Scans each leaked chunk for pointers to other leaked chunks, and marks each -// of them as indirectly leaked. -class MarkIndirectlyLeakedCb { - public: - void operator()(void *p) const; -}; - -// Finds all chunk marked as kIgnored and adds their addresses to frontier. -class CollectIgnoredCb { - public: - explicit CollectIgnoredCb(Frontier *frontier) - : frontier_(frontier) {} - void operator()(void *p) const; - private: - Frontier *frontier_; -}; - enum IgnoreObjectResult { kIgnoreObjectSuccess, kIgnoreObjectAlreadyIgnored, @@ -167,8 +119,8 @@ bool DisabledInThisThread(); // The following must be implemented in the parent tool. -template void ForEachChunk(Callable const &callback); -// The address range occupied by the global allocator object. +void ForEachChunk(ForEachChunkCallback callback, void *arg); +// Returns the address range occupied by the global allocator object. void GetAllocatorGlobalRange(uptr *begin, uptr *end); // Wrappers for allocator's ForceLock()/ForceUnlock(). void LockAllocator(); @@ -179,18 +131,18 @@ void UnlockThreadRegistry(); bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end); -// If p points into a chunk that has been allocated to the user, return its -// user-visible address. Otherwise, return 0. -void *PointsIntoChunk(void *p); -// Return address of user-visible chunk contained in this allocator chunk. -void *GetUserBegin(void *p); +// If p points into a chunk that has been allocated to the user, returns its +// user-visible address. Otherwise, returns 0. +uptr PointsIntoChunk(void *p); +// Returns address of user-visible chunk contained in this allocator chunk. +uptr GetUserBegin(uptr chunk); // Helper for __lsan_ignore_object(). IgnoreObjectResult IgnoreObjectLocked(const void *p); // Wrapper for chunk metadata operations. class LsanMetadata { public: - // Constructor accepts pointer to user-visible chunk. - explicit LsanMetadata(void *chunk); + // Constructor accepts address of user-visible chunk. + explicit LsanMetadata(uptr chunk); bool allocated() const; ChunkTag tag() const; void set_tag(ChunkTag value); diff --git a/compiler-rt/lib/lsan/lsan_common_linux.cc b/compiler-rt/lib/lsan/lsan_common_linux.cc index 3ce0ea4fadb0..08a05958acfa 100644 --- a/compiler-rt/lib/lsan/lsan_common_linux.cc +++ b/compiler-rt/lib/lsan/lsan_common_linux.cc @@ -53,8 +53,7 @@ void InitializePlatformSpecificModules() { static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, void *data) { - Frontier *frontier = - reinterpret_cast(data); + Frontier *frontier = reinterpret_cast(data); for (uptr j = 0; j < info->dlpi_phnum; j++) { const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]); // We're looking for .data and .bss sections, which reside in writeable, @@ -82,7 +81,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, return 0; } -// Scan global variables for heap pointers. +// Scans global variables for heap pointers. void ProcessGlobalRegions(Frontier *frontier) { // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of // deadlocking by running this under StopTheWorld. However, the lock is @@ -101,23 +100,26 @@ static uptr GetCallerPC(u32 stack_id) { return 0; } -void ProcessPlatformSpecificAllocationsCb::operator()(void *p) const { - p = GetUserBegin(p); - LsanMetadata m(p); +// ForEachChunk callback. Identifies unreachable chunks which must be treated as +// reachable. Marks them as reachable and adds them to the frontier. +static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) { + CHECK(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) { m.set_tag(kReachable); - frontier_->push_back(reinterpret_cast(p)); + reinterpret_cast(arg)->push_back(chunk); } } } -// Handle dynamically allocated TLS blocks by treating all chunks allocated from -// ld-linux.so as reachable. +// Handles dynamically allocated TLS blocks by treating all chunks allocated +// from ld-linux.so as reachable. void ProcessPlatformSpecificAllocations(Frontier *frontier) { if (!flags()->use_tls) return; if (!linker) return; - ForEachChunk(ProcessPlatformSpecificAllocationsCb(frontier)); + ForEachChunk(ProcessPlatformSpecificAllocationsCb, frontier); } } // namespace __lsan diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h index 6d61392f9469..a876d284d638 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h @@ -279,6 +279,9 @@ struct NoOpMapUnmapCallback { void OnUnmap(uptr p, uptr size) const { } }; +// Callback type for iterating over chunks. +typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); + // SizeClassAllocator64 -- allocator for 64-bit address space. // // Space: a portion of address space of kSpaceSize bytes starting at @@ -433,20 +436,18 @@ class SizeClassAllocator64 { } } - // Iterate over existing chunks. May include chunks that are not currently - // allocated to the user (e.g. freed). - // The caller is expected to call ForceLock() before calling this function. - template - void ForEachChunk(const Callable &callback) { + // Iterate over all existing chunks. + // The allocator must be locked when calling this function. + void ForEachChunk(ForEachChunkCallback callback, void *arg) { for (uptr class_id = 1; class_id < kNumClasses; class_id++) { RegionInfo *region = GetRegionInfo(class_id); uptr chunk_size = SizeClassMap::Size(class_id); uptr region_beg = kSpaceBeg + class_id * kRegionSize; - for (uptr p = region_beg; - p < region_beg + region->allocated_user; - p += chunk_size) { - // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p)); - callback((void *)p); + for (uptr chunk = region_beg; + chunk < region_beg + region->allocated_user; + chunk += chunk_size) { + // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); + callback(chunk, arg); } } } @@ -726,21 +727,19 @@ class SizeClassAllocator32 { } } - // Iterate over existing chunks. May include chunks that are not currently - // allocated to the user (e.g. freed). - // The caller is expected to call ForceLock() before calling this function. - template - void ForEachChunk(const Callable &callback) { + // Iterate over all existing chunks. + // The allocator must be locked when calling this function. + void ForEachChunk(ForEachChunkCallback callback, void *arg) { for (uptr region = 0; region < kNumPossibleRegions; region++) if (possible_regions[region]) { uptr chunk_size = SizeClassMap::Size(possible_regions[region]); uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); uptr region_beg = region * kRegionSize; - for (uptr p = region_beg; - p < region_beg + max_chunks_in_region * chunk_size; - p += chunk_size) { - // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p)); - callback((void *)p); + for (uptr chunk = region_beg; + chunk < region_beg + max_chunks_in_region * chunk_size; + chunk += chunk_size) { + // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); + callback(chunk, arg); } } } @@ -1108,13 +1107,11 @@ class LargeMmapAllocator { mutex_.Unlock(); } - // Iterate over existing chunks. May include chunks that are not currently - // allocated to the user (e.g. freed). - // The caller is expected to call ForceLock() before calling this function. - template - void ForEachChunk(const Callable &callback) { + // Iterate over all existing chunks. + // The allocator must be locked when calling this function. + void ForEachChunk(ForEachChunkCallback callback, void *arg) { for (uptr i = 0; i < n_chunks_; i++) - callback(GetUser(chunks_[i])); + callback(reinterpret_cast(GetUser(chunks_[i])), arg); } private: @@ -1290,13 +1287,11 @@ class CombinedAllocator { primary_.ForceUnlock(); } - // Iterate over existing chunks. May include chunks that are not currently - // allocated to the user (e.g. freed). - // The caller is expected to call ForceLock() before calling this function. - template - void ForEachChunk(const Callable &callback) { - primary_.ForEachChunk(callback); - secondary_.ForEachChunk(callback); + // Iterate over all existing chunks. + // The allocator must be locked when calling this function. + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + primary_.ForEachChunk(callback, arg); + secondary_.ForEachChunk(callback, arg); } private: diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index c0d2e2a75862..38343d90cb36 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -635,15 +635,8 @@ TEST(Allocator, ScopedBuffer) { } } -class IterationTestCallback { - public: - explicit IterationTestCallback(std::set *chunks) - : chunks_(chunks) {} - void operator()(void *chunk) const { - chunks_->insert(chunk); - } - private: - std::set *chunks_; +void IterationTestCallback(uptr chunk, void *arg) { + reinterpret_cast *>(arg)->insert(chunk); }; template @@ -673,15 +666,15 @@ void TestSizeClassAllocatorIteration() { } } - std::set reported_chunks; - IterationTestCallback callback(&reported_chunks); + std::set reported_chunks; a->ForceLock(); - a->ForEachChunk(callback); + a->ForEachChunk(IterationTestCallback, &reported_chunks); a->ForceUnlock(); for (uptr i = 0; i < allocated.size(); i++) { // Don't use EXPECT_NE. Reporting the first mismatch is enough. - ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end()); + ASSERT_NE(reported_chunks.find(reinterpret_cast(allocated[i])), + reported_chunks.end()); } a->TestOnlyUnmap(); @@ -711,15 +704,15 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) { for (uptr i = 0; i < kNumAllocs; i++) allocated[i] = (char *)a.Allocate(&stats, size, 1); - std::set reported_chunks; - IterationTestCallback callback(&reported_chunks); + std::set reported_chunks; a.ForceLock(); - a.ForEachChunk(callback); + a.ForEachChunk(IterationTestCallback, &reported_chunks); a.ForceUnlock(); for (uptr i = 0; i < kNumAllocs; i++) { // Don't use EXPECT_NE. Reporting the first mismatch is enough. - ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end()); + ASSERT_NE(reported_chunks.find(reinterpret_cast(allocated[i])), + reported_chunks.end()); } for (uptr i = 0; i < kNumAllocs; i++) a.Deallocate(&stats, allocated[i]);