forked from OSchip/llvm-project
[NFC][sanitizer] Remove global PersistentAllocator
This way is easier to track memory usage and do other incremental refactorings. Differential Revision: https://reviews.llvm.org/D111256
This commit is contained in:
parent
78c5754813
commit
8f3e52538d
|
@ -13,14 +13,21 @@
|
|||
|
||||
namespace __sanitizer {
|
||||
|
||||
static PersistentAllocator allocator;
|
||||
|
||||
bool ChainedOriginDepot::ChainedOriginDepotNode::eq(
|
||||
hash_type hash, const args_type &args) const {
|
||||
return here_id == args.here_id && prev_id == args.prev_id;
|
||||
}
|
||||
|
||||
uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
|
||||
const args_type &args) {
|
||||
return sizeof(ChainedOriginDepotNode);
|
||||
uptr ChainedOriginDepot::ChainedOriginDepotNode::allocated() {
|
||||
return allocator.allocated();
|
||||
}
|
||||
|
||||
ChainedOriginDepot::ChainedOriginDepotNode *
|
||||
ChainedOriginDepot::ChainedOriginDepotNode::allocate(const args_type &args) {
|
||||
return static_cast<ChainedOriginDepot::ChainedOriginDepotNode *>(
|
||||
allocator.alloc(sizeof(ChainedOriginDepotNode)));
|
||||
}
|
||||
|
||||
/* This is murmur2 hash for the 64->32 bit case.
|
||||
|
|
|
@ -53,7 +53,9 @@ class ChainedOriginDepot {
|
|||
|
||||
bool eq(hash_type hash, const args_type &args) const;
|
||||
|
||||
static uptr storage_size(const args_type &args);
|
||||
static uptr allocated();
|
||||
|
||||
static ChainedOriginDepotNode *allocate(const args_type &args);
|
||||
|
||||
static hash_type hash(const args_type &args);
|
||||
|
||||
|
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
namespace __sanitizer {
|
||||
|
||||
PersistentAllocator thePersistentAllocator;
|
||||
|
||||
void *PersistentAllocator::refillAndAlloc(uptr size) {
|
||||
// If failed, lock, retry and alloc new superblock.
|
||||
SpinMutexLock l(&mtx);
|
||||
|
@ -27,6 +25,7 @@ void *PersistentAllocator::refillAndAlloc(uptr size) {
|
|||
if (allocsz < size)
|
||||
allocsz = size;
|
||||
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
||||
atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
|
||||
atomic_store(®ion_end, mem + allocsz, memory_order_release);
|
||||
atomic_store(®ion_pos, mem, memory_order_release);
|
||||
}
|
||||
|
|
|
@ -23,13 +23,19 @@ namespace __sanitizer {
|
|||
class PersistentAllocator {
|
||||
public:
|
||||
void *alloc(uptr size);
|
||||
uptr allocated() const {
|
||||
SpinMutexLock l(&mtx);
|
||||
return atomic_load_relaxed(&mapped_size) +
|
||||
atomic_load_relaxed(®ion_pos) - atomic_load_relaxed(®ion_end);
|
||||
}
|
||||
|
||||
private:
|
||||
void *tryAlloc(uptr size);
|
||||
void *refillAndAlloc(uptr size);
|
||||
StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
|
||||
mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
|
||||
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
||||
atomic_uintptr_t region_end;
|
||||
atomic_uintptr_t mapped_size;
|
||||
};
|
||||
|
||||
inline void *PersistentAllocator::tryAlloc(uptr size) {
|
||||
|
@ -51,11 +57,6 @@ inline void *PersistentAllocator::alloc(uptr size) {
|
|||
return refillAndAlloc(size);
|
||||
}
|
||||
|
||||
extern PersistentAllocator thePersistentAllocator;
|
||||
inline void *PersistentAlloc(uptr sz) {
|
||||
return thePersistentAllocator.alloc(sz);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
|
||||
|
|
|
@ -14,10 +14,13 @@
|
|||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_hash.h"
|
||||
#include "sanitizer_persistent_allocator.h"
|
||||
#include "sanitizer_stackdepotbase.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
static PersistentAllocator allocator;
|
||||
|
||||
struct StackDepotNode {
|
||||
using hash_type = u64;
|
||||
hash_type stack_hash;
|
||||
|
@ -36,8 +39,10 @@ struct StackDepotNode {
|
|||
bool eq(hash_type hash, const args_type &args) const {
|
||||
return hash == stack_hash;
|
||||
}
|
||||
static uptr storage_size(const args_type &args) {
|
||||
return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
|
||||
static uptr allocated() { return allocator.allocated(); }
|
||||
static StackDepotNode *allocate(const args_type &args) {
|
||||
uptr alloc_size = sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
|
||||
return (StackDepotNode *)allocator.alloc(alloc_size);
|
||||
}
|
||||
static hash_type hash(const args_type &args) {
|
||||
MurMur2Hash64Builder H(args.size * sizeof(uptr));
|
||||
|
|
|
@ -33,7 +33,7 @@ class StackDepotBase {
|
|||
// Retrieves a stored stack trace by the id.
|
||||
args_type Get(u32 id);
|
||||
|
||||
StackDepotStats GetStats() const { return stats; }
|
||||
StackDepotStats GetStats() const { return {n_uniq_ids, Node::allocated()}; }
|
||||
|
||||
void LockAll();
|
||||
void UnlockAll();
|
||||
|
@ -55,7 +55,7 @@ class StackDepotBase {
|
|||
atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
|
||||
atomic_uint32_t seq[kPartCount]; // Unique id generators.
|
||||
|
||||
StackDepotStats stats;
|
||||
uptr n_uniq_ids;
|
||||
|
||||
friend class StackDepotReverseMap;
|
||||
};
|
||||
|
@ -120,14 +120,12 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
|
|||
}
|
||||
uptr part = (h % kTabSize) / kPartSize;
|
||||
u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
|
||||
stats.n_uniq_ids++;
|
||||
n_uniq_ids++;
|
||||
CHECK_LT(id, kMaxId);
|
||||
id |= part << kPartShift;
|
||||
CHECK_NE(id, 0);
|
||||
CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
|
||||
uptr memsz = Node::storage_size(args);
|
||||
s = (Node *)PersistentAlloc(memsz);
|
||||
stats.allocated += memsz;
|
||||
s = Node::allocate(args);
|
||||
s->id = id;
|
||||
s->store(args, h);
|
||||
s->link = s2;
|
||||
|
|
Loading…
Reference in New Issue