[NFC][sanitizer] Parametrize PersistentAllocator with type

This commit is contained in:
Vitaly Buka 2021-10-08 14:05:29 -07:00
parent 3a208c6894
commit d1aaef4296
3 changed files with 22 additions and 17 deletions

View File

@ -59,7 +59,7 @@ struct ChainedOriginDepotNode {
} // namespace
static PersistentAllocator allocator;
static PersistentAllocator<ChainedOriginDepotNode> allocator;
static StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
@ -71,8 +71,7 @@ uptr ChainedOriginDepotNode::allocated() { return allocator.allocated(); }
ChainedOriginDepotNode *ChainedOriginDepotNode::allocate(
const args_type &args) {
return static_cast<ChainedOriginDepotNode *>(
allocator.alloc(sizeof(ChainedOriginDepotNode)));
return allocator.alloc();
}
/* This is murmur2 hash for the 64->32 bit case.

View File

@ -20,9 +20,10 @@
namespace __sanitizer {
template <typename T>
class PersistentAllocator {
public:
void *alloc(uptr size);
T *alloc(uptr count = 1);
uptr allocated() const {
SpinMutexLock l(&mtx);
return atomic_load_relaxed(&mapped_size) +
@ -30,42 +31,47 @@ class PersistentAllocator {
}
private:
void *tryAlloc(uptr size);
void *refillAndAlloc(uptr size);
T *tryAlloc(uptr count);
T *refillAndAlloc(uptr count);
mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
atomic_uintptr_t region_pos; // Region allocator for Node's.
atomic_uintptr_t region_end;
atomic_uintptr_t mapped_size;
};
inline void *PersistentAllocator::tryAlloc(uptr size) {
template <typename T>
inline T *PersistentAllocator<T>::tryAlloc(uptr count) {
// Optimisic lock-free allocation, essentially try to bump the region ptr.
for (;;) {
uptr cmp = atomic_load(&region_pos, memory_order_acquire);
uptr end = atomic_load(&region_end, memory_order_acquire);
uptr size = count * sizeof(T);
if (cmp == 0 || cmp + size > end) return nullptr;
if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
memory_order_acquire))
return (void *)cmp;
return reinterpret_cast<T *>(cmp);
}
}
inline void *PersistentAllocator::alloc(uptr size) {
template <typename T>
inline T *PersistentAllocator<T>::alloc(uptr count) {
// First, try to allocate optimisitically.
void *s = tryAlloc(size);
T *s = tryAlloc(count);
if (LIKELY(s))
return s;
return refillAndAlloc(size);
return refillAndAlloc(count);
}
inline void *PersistentAllocator::refillAndAlloc(uptr size) {
template <typename T>
inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) {
// If failed, lock, retry and alloc new superblock.
SpinMutexLock l(&mtx);
for (;;) {
void *s = tryAlloc(size);
T *s = tryAlloc(count);
if (s)
return s;
atomic_store(&region_pos, 0, memory_order_relaxed);
uptr size = count * sizeof(T);
uptr allocsz = 64 * 1024;
if (allocsz < size)
allocsz = size;

View File

@ -19,8 +19,8 @@
namespace __sanitizer {
static PersistentAllocator allocator;
static PersistentAllocator traceAllocator;
static PersistentAllocator<StackDepotNode> allocator;
static PersistentAllocator<uptr> traceAllocator;
struct StackDepotNode {
using hash_type = u64;
@ -43,7 +43,7 @@ struct StackDepotNode {
return allocator.allocated() + traceAllocator.allocated();
}
static StackDepotNode *allocate(const args_type &args) {
return (StackDepotNode *)allocator.alloc(sizeof(StackDepotNode));
return allocator.alloc();
}
static hash_type hash(const args_type &args) {
MurMur2Hash64Builder H(args.size * sizeof(uptr));
@ -59,7 +59,7 @@ struct StackDepotNode {
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
memory_order_relaxed);
stack_hash = hash;
stack_trace = (uptr *)traceAllocator.alloc((args.size + 1) * sizeof(uptr));
stack_trace = traceAllocator.alloc(args.size + 1);
*stack_trace = args.size;
internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr));
}