forked from OSchip/llvm-project
[sanitizer] Move out stack trace pointer from header StackDepot
Trace pointers accessed very rarely and don't need to be in hot data. Depends on D111613. Reviewed By: dvyukov Differential Revision: https://reviews.llvm.org/D111614
This commit is contained in:
parent
a8e7d11aca
commit
8282024a74
|
@ -38,9 +38,9 @@ struct ChainedOriginDepotNode {
|
|||
|
||||
static bool is_valid(const args_type &args);
|
||||
|
||||
void store(const args_type &args, hash_type other_hash);
|
||||
void store(u32 id, const args_type &args, hash_type other_hash);
|
||||
|
||||
args_type load() const;
|
||||
args_type load(u32 id) const;
|
||||
|
||||
struct Handle {
|
||||
const ChainedOriginDepotNode *node_ = nullptr;
|
||||
|
@ -106,13 +106,13 @@ ChainedOriginDepotNode::hash_type ChainedOriginDepotNode::hash(
|
|||
|
||||
bool ChainedOriginDepotNode::is_valid(const args_type &args) { return true; }
|
||||
|
||||
void ChainedOriginDepotNode::store(const args_type &args,
|
||||
void ChainedOriginDepotNode::store(u32 id, const args_type &args,
|
||||
hash_type other_hash) {
|
||||
here_id = args.here_id;
|
||||
prev_id = args.prev_id;
|
||||
}
|
||||
|
||||
ChainedOriginDepotNode::args_type ChainedOriginDepotNode::load() const {
|
||||
ChainedOriginDepotNode::args_type ChainedOriginDepotNode::load(u32 id) const {
|
||||
args_type ret = {here_id, prev_id};
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ static PersistentAllocator<uptr> traceAllocator;
|
|||
struct StackDepotNode {
|
||||
using hash_type = u64;
|
||||
hash_type stack_hash;
|
||||
uptr *stack_trace;
|
||||
u32 link;
|
||||
atomic_uint32_t tag_and_use_count; // tag : 12 high bits; use_count : 20;
|
||||
|
||||
|
@ -47,22 +46,8 @@ struct StackDepotNode {
|
|||
static bool is_valid(const args_type &args) {
|
||||
return args.size > 0 && args.trace;
|
||||
}
|
||||
void store(const args_type &args, hash_type hash) {
|
||||
CHECK_EQ(args.tag & (~kUseCountMask >> kUseCountBits), args.tag);
|
||||
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
|
||||
memory_order_relaxed);
|
||||
stack_hash = hash;
|
||||
stack_trace = traceAllocator.alloc(args.size + 1);
|
||||
*stack_trace = args.size;
|
||||
internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr));
|
||||
}
|
||||
args_type load() const {
|
||||
if (!stack_trace)
|
||||
return {};
|
||||
u32 tag =
|
||||
atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
|
||||
return args_type(stack_trace + 1, *stack_trace, tag);
|
||||
}
|
||||
void store(u32 id, const args_type &args, hash_type hash);
|
||||
args_type load(u32 id) const;
|
||||
static StackDepotHandle get_handle(u32 id);
|
||||
|
||||
typedef StackDepotHandle handle_type;
|
||||
|
@ -85,6 +70,30 @@ void StackDepotHandle::inc_use_count_unsafe() {
|
|||
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
|
||||
StackDepot;
|
||||
static StackDepot theDepot;
|
||||
// Keep rarely accessed stack traces out of frequently access nodes to improve
|
||||
// caching efficiency.
|
||||
static TwoLevelMap<uptr *, StackDepot::kNodesSize1, StackDepot::kNodesSize2>
|
||||
tracePtrs;
|
||||
|
||||
void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
|
||||
CHECK_EQ(args.tag & (~kUseCountMask >> kUseCountBits), args.tag);
|
||||
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
|
||||
memory_order_relaxed);
|
||||
stack_hash = hash;
|
||||
uptr *stack_trace = traceAllocator.alloc(args.size + 1);
|
||||
*stack_trace = args.size;
|
||||
internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr));
|
||||
tracePtrs[id] = stack_trace;
|
||||
}
|
||||
|
||||
StackDepotNode::args_type StackDepotNode::load(u32 id) const {
|
||||
const uptr *stack_trace = tracePtrs[id];
|
||||
if (!stack_trace)
|
||||
return {};
|
||||
u32 tag =
|
||||
atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
|
||||
return args_type(stack_trace + 1, *stack_trace, tag);
|
||||
}
|
||||
|
||||
StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
|
|||
CHECK_EQ(s & kUnlockMask, s);
|
||||
CHECK_EQ(s & (((u32)-1) >> kReservedBits), s);
|
||||
Node &new_node = nodes[s];
|
||||
new_node.store(args, h);
|
||||
new_node.store(s, args, h);
|
||||
new_node.link = s2;
|
||||
unlock(p, s);
|
||||
if (inserted) *inserted = true;
|
||||
|
@ -151,7 +151,7 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
|
|||
if (!nodes.contains(id))
|
||||
return args_type();
|
||||
const Node &node = nodes[id];
|
||||
return node.load();
|
||||
return node.load(id);
|
||||
}
|
||||
|
||||
template <class Node, int kReservedBits, int kTabSizeLog>
|
||||
|
@ -178,7 +178,7 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
|
|||
for (; s;) {
|
||||
const Node &node = nodes[s];
|
||||
Printf("Stack for id %u:\n", s);
|
||||
node.load().Print();
|
||||
node.load(s).Print();
|
||||
s = node.link;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue