[sanitizer] Remove StackDepotReverseMap

Now StackDepotGet can retrive the stack in O(1).

Depends on D111612.

Reviewed By: dvyukov

Differential Revision: https://reviews.llvm.org/D111613
This commit is contained in:
Vitaly Buka 2021-10-11 22:18:00 -07:00
parent ce7f8c8474
commit ca0036df7d
6 changed files with 7 additions and 100 deletions

View File

@ -487,7 +487,6 @@ static uptr GetCallerPC(const StackTrace &stack) {
struct InvalidPCParam {
Frontier *frontier;
const StackDepotReverseMap *stack_depot;
bool skip_linker_allocations;
};
@ -502,7 +501,7 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
u32 stack_id = m.stack_trace_id();
uptr caller_pc = 0;
if (stack_id > 0)
caller_pc = GetCallerPC(param->stack_depot->Get(stack_id));
caller_pc = GetCallerPC(StackDepotGet(stack_id));
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
// it as reachable, as we can't properly report its allocation stack anyway.
if (caller_pc == 0 || (param->skip_linker_allocations &&
@ -533,11 +532,9 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
// which we don't care about).
// On all other platforms, this simply checks to ensure that the caller pc is
// valid before reporting chunks as leaked.
static void ProcessPC(Frontier *frontier,
const StackDepotReverseMap &stack_depot) {
static void ProcessPC(Frontier *frontier) {
InvalidPCParam arg;
arg.frontier = frontier;
arg.stack_depot = &stack_depot;
arg.skip_linker_allocations =
flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
ForEachChunk(MarkInvalidPCCb, &arg);
@ -545,7 +542,6 @@ static void ProcessPC(Frontier *frontier,
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
const StackDepotReverseMap &stack_depot,
Frontier *frontier) {
const InternalMmapVector<u32> &suppressed_stacks =
GetSuppressionContext()->GetSortedSuppressedStacks();
@ -560,7 +556,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
FloodFillTag(frontier, kReachable);
CHECK_EQ(0, frontier->size());
ProcessPC(frontier, stack_depot);
ProcessPC(frontier);
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
@ -654,8 +650,7 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
CHECK(param);
CHECK(!param->success);
ReportUnsuspendedThreads(suspended_threads);
ClassifyAllChunks(suspended_threads, param->leak_report.stack_depot(),
&param->frontier);
ClassifyAllChunks(suspended_threads, &param->frontier);
ForEachChunk(CollectLeaksCb, &param->leak_report);
// Clean up for subsequent leak checks. This assumes we did not overwrite any
// kIgnored tags.
@ -795,7 +790,7 @@ void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
if (u32 resolution = flags()->resolution) {
StackTrace stack = stack_depot_.Get(stack_trace_id);
StackTrace stack = StackDepotGet(stack_trace_id);
stack.size = Min(stack.size, resolution);
stack_trace_id = StackDepotPut(stack);
}
@ -863,7 +858,7 @@ void LeakReport::PrintReportForLeak(uptr index) {
Printf("%s", d.Default());
CHECK(leaks_[index].stack_trace_id);
stack_depot_.Get(leaks_[index].stack_trace_id).Print();
StackDepotGet(leaks_[index].stack_trace_id).Print();
if (flags()->report_objects) {
Printf("Objects leaked above:\n");
@ -900,7 +895,7 @@ uptr LeakReport::ApplySuppressions() {
uptr new_suppressions = false;
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = suppressions->GetSuppressionForStack(
leaks_[i].stack_trace_id, stack_depot_.Get(leaks_[i].stack_trace_id));
leaks_[i].stack_trace_id, StackDepotGet(leaks_[i].stack_trace_id));
if (s) {
s->weight += leaks_[i].total_size;
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +

View File

@ -108,14 +108,12 @@ class LeakReport {
uptr ApplySuppressions();
uptr UnsuppressedLeakCount();
uptr IndirectUnsuppressedLeakCount();
const StackDepotReverseMap &stack_depot() { return stack_depot_; }
private:
void PrintReportForLeak(uptr index);
void PrintLeakedObjectsForLeak(uptr index);
u32 next_id_ = 0;
StackDepotReverseMap stack_depot_;
InternalMmapVector<Leak> leaks_;
InternalMmapVector<LeakedObject> leaked_objects_;
};

View File

@ -116,38 +116,4 @@ StackDepotHandle StackDepotNode::get_handle(u32 id) {
return StackDepotHandle(&theDepot.nodes[id], id);
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
const StackDepotReverseMap::IdDescPair &a,
const StackDepotReverseMap::IdDescPair &b) {
return a.id < b.id;
}
void StackDepotReverseMap::Init() const {
if (LIKELY(map_.capacity()))
return;
map_.reserve(StackDepotGetStats().n_uniq_ids + 100);
for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
u32 s = atomic_load(&theDepot.tab[idx], memory_order_consume) &
StackDepot::kUnlockMask;
for (; s;) {
const StackDepotNode &node = theDepot.nodes[s];
IdDescPair pair = {s, &node};
map_.push_back(pair);
s = node.link;
}
}
Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
}
StackTrace StackDepotReverseMap::Get(u32 id) const {
Init();
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, nullptr};
uptr idx = InternalLowerBound(map_, pair, IdDescPair::IdComparator);
if (idx > map_.size() || map_[idx].id != id)
return StackTrace();
return map_[idx].desc->load();
}
} // namespace __sanitizer

View File

@ -43,32 +43,6 @@ void StackDepotLockAll();
void StackDepotUnlockAll();
void StackDepotPrintAll();
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
// StackDepot, but the snapshot is only guaranteed to contain those stack traces
// which were stored before it was instantiated.
class StackDepotReverseMap {
public:
StackDepotReverseMap() = default;
StackTrace Get(u32 id) const;
private:
struct IdDescPair {
u32 id;
const StackDepotNode *desc;
static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
};
void Init() const;
mutable InternalMmapVector<IdDescPair> map_;
// Disallow evil constructors.
StackDepotReverseMap(const StackDepotReverseMap&);
void operator=(const StackDepotReverseMap&);
};
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H

View File

@ -58,7 +58,6 @@ class StackDepotBase {
private:
friend Node;
friend class StackDepotReverseMap;
u32 find(u32 s, args_type args, hash_type hash) const;
static u32 lock(atomic_uint32_t *p);
static void unlock(atomic_uint32_t *p, u32 s);

View File

@ -106,29 +106,4 @@ TEST(SanitizerCommon, StackDepotPrintNoLock) {
}
}
TEST(SanitizerCommon, StackDepotReverseMap) {
uptr array1[] = {1, 2, 3, 4, 5};
uptr array2[] = {7, 1, 3, 0};
uptr array3[] = {10, 2, 5, 3};
uptr array4[] = {1, 3, 2, 5};
u32 ids[4] = {0};
StackTrace s1(array1, ARRAY_SIZE(array1));
StackTrace s2(array2, ARRAY_SIZE(array2));
StackTrace s3(array3, ARRAY_SIZE(array3));
StackTrace s4(array4, ARRAY_SIZE(array4));
ids[0] = StackDepotPut(s1);
ids[1] = StackDepotPut(s2);
ids[2] = StackDepotPut(s3);
ids[3] = StackDepotPut(s4);
StackDepotReverseMap map;
for (uptr i = 0; i < 4; i++) {
StackTrace stack = StackDepotGet(ids[i]);
StackTrace from_map = map.Get(ids[i]);
EXPECT_EQ(stack.size, from_map.size);
EXPECT_EQ(stack.trace, from_map.trace);
}
}
} // namespace __sanitizer