forked from OSchip/llvm-project
[NFC][sanitizer] Add StackDepotTestOnlyUnmap
This commit is contained in:
parent
c0b1b52a28
commit
746dd6a700
|
@ -82,6 +82,7 @@ class TwoLevelMap {
|
|||
MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), MmapSize());
|
||||
UnmapOrDie(p, kSize2);
|
||||
}
|
||||
Init();
|
||||
}
|
||||
|
||||
uptr MemoryUsage() const {
|
||||
|
|
|
@ -26,6 +26,8 @@ class PersistentAllocator {
|
|||
T *alloc(uptr count = 1);
|
||||
uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
|
||||
|
||||
void TestOnlyUnmap();
|
||||
|
||||
private:
|
||||
T *tryAlloc(uptr count);
|
||||
T *refillAndAlloc(uptr count);
|
||||
|
@ -33,6 +35,13 @@ class PersistentAllocator {
|
|||
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
||||
atomic_uintptr_t region_end;
|
||||
atomic_uintptr_t mapped_size;
|
||||
|
||||
struct BlockInfo {
|
||||
const BlockInfo *next;
|
||||
uptr ptr;
|
||||
uptr size;
|
||||
};
|
||||
const BlockInfo *curr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -68,17 +77,34 @@ inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) {
|
|||
if (s)
|
||||
return s;
|
||||
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
||||
uptr size = count * sizeof(T);
|
||||
uptr allocsz = 64 * 1024;
|
||||
if (allocsz < size)
|
||||
allocsz = size;
|
||||
uptr size = count * sizeof(T) + sizeof(BlockInfo);
|
||||
uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
|
||||
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
||||
BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
|
||||
new_block->next = curr;
|
||||
new_block->ptr = mem;
|
||||
new_block->size = allocsz;
|
||||
curr = new_block;
|
||||
|
||||
atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
|
||||
|
||||
allocsz -= sizeof(BlockInfo);
|
||||
atomic_store(®ion_end, mem + allocsz, memory_order_release);
|
||||
atomic_store(®ion_pos, mem, memory_order_release);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void PersistentAllocator<T>::TestOnlyUnmap() {
|
||||
while (curr) {
|
||||
uptr mem = curr->ptr;
|
||||
uptr allocsz = curr->size;
|
||||
curr = curr->next;
|
||||
UnmapOrDie((void *)mem, allocsz);
|
||||
}
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
|
||||
|
|
|
@ -36,7 +36,7 @@ struct StackDepotNode {
|
|||
bool eq(hash_type hash, const args_type &args) const {
|
||||
return hash == stack_hash;
|
||||
}
|
||||
static uptr allocated() { return traceAllocator.allocated(); }
|
||||
static uptr allocated();
|
||||
static hash_type hash(const args_type &args) {
|
||||
MurMur2Hash64Builder H(args.size * sizeof(uptr));
|
||||
for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
|
||||
|
@ -75,6 +75,10 @@ static StackDepot theDepot;
|
|||
static TwoLevelMap<uptr *, StackDepot::kNodesSize1, StackDepot::kNodesSize2>
|
||||
tracePtrs;
|
||||
|
||||
uptr StackDepotNode::allocated() {
|
||||
return traceAllocator.allocated() + tracePtrs.MemoryUsage();
|
||||
}
|
||||
|
||||
void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
|
||||
CHECK_EQ(args.tag & (~kUseCountMask >> kUseCountBits), args.tag);
|
||||
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
|
||||
|
@ -125,4 +129,10 @@ StackDepotHandle StackDepotNode::get_handle(u32 id) {
|
|||
return StackDepotHandle(&theDepot.nodes[id], id);
|
||||
}
|
||||
|
||||
void StackDepotTestOnlyUnmap() {
|
||||
theDepot.TestOnlyUnmap();
|
||||
tracePtrs.TestOnlyUnmap();
|
||||
traceAllocator.TestOnlyUnmap();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
|
|
@ -43,6 +43,8 @@ void StackDepotLockAll();
|
|||
void StackDepotUnlockAll();
|
||||
void StackDepotPrintAll();
|
||||
|
||||
void StackDepotTestOnlyUnmap();
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_STACKDEPOT_H
|
||||
|
|
|
@ -56,6 +56,11 @@ class StackDepotBase {
|
|||
void UnlockAll();
|
||||
void PrintAll();
|
||||
|
||||
void TestOnlyUnmap() {
|
||||
nodes.TestOnlyUnmap();
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
}
|
||||
|
||||
private:
|
||||
friend Node;
|
||||
u32 find(u32 s, args_type args, hash_type hash) const;
|
||||
|
|
|
@ -26,10 +26,12 @@ namespace __sanitizer {
|
|||
|
||||
class StackDepotTest : public testing::Test {
|
||||
protected:
|
||||
void SetUp() override { StackDepotTestOnlyUnmap(); }
|
||||
void TearDown() override {
|
||||
StackDepotStats stack_depot_stats = StackDepotGetStats();
|
||||
Printf("StackDepot: %zd ids; %zdM allocated\n",
|
||||
stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
|
||||
StackDepotTestOnlyUnmap();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -159,43 +161,37 @@ static std::string PrintStackDepotBenchmarkParams(
|
|||
|
||||
class StackDepotBenchmark
|
||||
: public StackDepotTest,
|
||||
public testing::WithParamInterface<StackDepotBenchmarkParams> {
|
||||
protected:
|
||||
void Run() {
|
||||
auto Param = GetParam();
|
||||
std::atomic<unsigned int> here = {};
|
||||
|
||||
auto thread = [&](int idx) {
|
||||
here++;
|
||||
while (here < Param.UniqueThreads) std::this_thread::yield();
|
||||
|
||||
std::vector<uptr> frames(64);
|
||||
for (int r = 0; r < Param.RepeatPerThread; ++r) {
|
||||
std::iota(frames.begin(), frames.end(), idx + 1);
|
||||
for (int i = 0; i < Param.UniqueStacksPerThread; ++i) {
|
||||
StackTrace s(frames.data(), frames.size());
|
||||
auto h = StackDepotPut_WithHandle(s);
|
||||
if (Param.UseCount)
|
||||
h.inc_use_count_unsafe();
|
||||
std::next_permutation(frames.begin(), frames.end());
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < Param.Threads; ++i)
|
||||
threads.emplace_back(thread, Param.UniqueThreads * i);
|
||||
for (auto& t : threads) t.join();
|
||||
}
|
||||
};
|
||||
public testing::WithParamInterface<StackDepotBenchmarkParams> {};
|
||||
|
||||
// Test which can be used as a simple benchmark. It's disabled to avoid slowing
|
||||
// down check-sanitizer.
|
||||
// Usage: Sanitizer-<ARCH>-Test --gtest_also_run_disabled_tests \
|
||||
// '--gtest_filter=*Benchmark*'
|
||||
TEST_P(StackDepotBenchmark, DISABLED_Benchmark) {
|
||||
// Call in subprocess to avoid reuse of the depot.
|
||||
EXPECT_EXIT((Run(), exit(0)), ::testing::ExitedWithCode(0), "");
|
||||
auto Param = GetParam();
|
||||
std::atomic<unsigned int> here = {};
|
||||
|
||||
auto thread = [&](int idx) {
|
||||
here++;
|
||||
while (here < Param.UniqueThreads) std::this_thread::yield();
|
||||
|
||||
std::vector<uptr> frames(64);
|
||||
for (int r = 0; r < Param.RepeatPerThread; ++r) {
|
||||
std::iota(frames.begin(), frames.end(), idx + 1);
|
||||
for (int i = 0; i < Param.UniqueStacksPerThread; ++i) {
|
||||
StackTrace s(frames.data(), frames.size());
|
||||
auto h = StackDepotPut_WithHandle(s);
|
||||
if (Param.UseCount)
|
||||
h.inc_use_count_unsafe();
|
||||
std::next_permutation(frames.begin(), frames.end());
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < Param.Threads; ++i)
|
||||
threads.emplace_back(thread, Param.UniqueThreads * i);
|
||||
for (auto& t : threads) t.join();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(StackDepotBenchmarkSuite, StackDepotBenchmark,
|
||||
|
|
Loading…
Reference in New Issue