[sanitizer_common] Recycle StackDepot memory

This relates to https://reviews.llvm.org/D95835.

In DFSan origin tracking we use StackDepot to record
stack traces and origin traces (like MSan origin tracking).

For at least two reasons, we wanted to control StackDepot's memory cost
1) We may use DFSan origin tracking to monitor programs that run for
   many days. This may eventually use too much memory for StackDepot.
2) DFSan supports flush shadow memory to reduce overhead. After flush,
   all existing IDs in StackDepot are not valid because no one will
   refer to them.
This commit is contained in:
Jianzhou Zhao 2021-02-06 10:01:17 +00:00
parent d5069dace7
commit 78804e6b20
4 changed files with 96 additions and 3 deletions

View File

@ -115,6 +115,10 @@ void StackDepotUnlockAll() {
theDepot.UnlockAll();
}
void StackDepotFree() {
theDepot.Free();
}
void StackDepotPrintAll() {
#if !SANITIZER_GO
theDepot.PrintAll();

View File

@ -39,6 +39,7 @@ StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
// Retrieves a stored stack trace by the id.
StackTrace StackDepotGet(u32 id);
void StackDepotFree();
void StackDepotLockAll();
void StackDepotUnlockAll();
void StackDepotPrintAll();

View File

@ -37,12 +37,15 @@ class StackDepotBase {
void LockAll();
void UnlockAll();
void PrintAll();
void Free();
private:
static Node *find(Node *s, args_type args, u32 hash);
static Node *lock(atomic_uintptr_t *p);
static void unlock(atomic_uintptr_t *p, Node *s);
Node *alloc(uptr part, uptr memsz);
static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
static const int kPartBits = 8;
static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
@ -53,6 +56,7 @@ class StackDepotBase {
atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
atomic_uint32_t seq[kPartCount]; // Unique id generators.
atomic_uintptr_t freeNodes[kPartCount];
StackDepotStats stats;
@ -95,6 +99,57 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
atomic_store(p, (uptr)s, memory_order_release);
}
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::Free() {
LockAll();
for (int i = 0; i < kPartCount; ++i) {
lock(&freeNodes[i]);
}
for (int i = 0; i < kTabSize; ++i) {
atomic_uintptr_t *p_tab = &tab[i];
Node *s = (Node *)(atomic_load(p_tab, memory_order_relaxed) & ~1UL);
while (s) {
uptr part = s->id >> kPartShift;
atomic_uintptr_t *p_free_nodes = &freeNodes[part];
Node *free_nodes_head =
(Node *)(atomic_load(p_free_nodes, memory_order_relaxed) & ~1UL);
Node *next = s->link;
s->link = free_nodes_head;
atomic_store(p_free_nodes, (uptr)s, memory_order_release);
s = next;
}
atomic_store(p_tab, (uptr)nullptr, memory_order_release);
}
stats.n_uniq_ids = 0;
for (int i = 0; i < kPartCount; ++i)
(void)atomic_exchange(&seq[i], 0, memory_order_relaxed);
for (int i = kPartCount - 1; i >= 0; --i) {
atomic_uintptr_t *p = &freeNodes[i];
uptr s = atomic_load(p, memory_order_relaxed);
unlock(p, (Node *)(s & ~1UL));
}
UnlockAll();
}
template <class Node, int kReservedBits, int kTabSizeLog>
Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::alloc(uptr part,
uptr memsz) {
atomic_uintptr_t *p = &freeNodes[part];
Node *head = lock(p);
if (head) {
unlock(p, head->link);
return head;
}
unlock(p, head);
Node *s = (Node *)PersistentAlloc(memsz);
stats.allocated += memsz;
return s;
}
template <class Node, int kReservedBits, int kTabSizeLog>
typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
@ -125,8 +180,7 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
CHECK_NE(id, 0);
CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
uptr memsz = Node::storage_size(args);
s = (Node *)PersistentAlloc(memsz);
stats.allocated += memsz;
s = alloc(part, memsz);
s->id = id;
s->store(args, h);
s->link = s2;
@ -168,7 +222,7 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
for (int i = 0; i < kTabSize; ++i) {
for (int i = kTabSize - 1; i >= 0; --i) {
atomic_uintptr_t *p = &tab[i];
uptr s = atomic_load(p, memory_order_relaxed);
unlock(p, (Node *)(s & ~1UL));

View File

@ -111,4 +111,38 @@ TEST(SanitizerCommon, StackDepotReverseMap) {
}
}
TEST(SanitizerCommon, StackDepotFree) {
uptr array[] = {1, 2, 3, 4, 5};
StackTrace s1(array, ARRAY_SIZE(array));
u32 i1 = StackDepotPut(s1);
StackTrace stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(array), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));
StackDepotStats *stats_before_free = StackDepotGetStats();
EXPECT_EQ(1U, stats_before_free->n_uniq_ids);
EXPECT_NE(0U, stats_before_free->allocated);
StackDepotFree();
StackDepotStats *stats_after_free = StackDepotGetStats();
EXPECT_EQ(0U, stats_after_free->n_uniq_ids);
EXPECT_EQ(stats_before_free->allocated, stats_after_free->allocated);
stack = StackDepotGet(i1);
EXPECT_EQ((uptr*)0, stack.trace);
EXPECT_EQ(i1, StackDepotPut(s1));
StackDepotStats *stats_after_2nd_put = StackDepotGetStats();
EXPECT_EQ(1U, stats_after_2nd_put->n_uniq_ids);
EXPECT_EQ(stats_after_2nd_put->allocated, stats_after_free->allocated);
stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(array), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));
}
} // namespace __sanitizer