forked from OSchip/llvm-project
[NFC][sanitizer] Parametrize PersistentAllocator with type
This commit is contained in:
parent
3a208c6894
commit
d1aaef4296
|
@ -59,7 +59,7 @@ struct ChainedOriginDepotNode {
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
static PersistentAllocator allocator;
|
static PersistentAllocator<ChainedOriginDepotNode> allocator;
|
||||||
|
|
||||||
static StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
|
static StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
|
||||||
|
|
||||||
|
@ -71,8 +71,7 @@ uptr ChainedOriginDepotNode::allocated() { return allocator.allocated(); }
|
||||||
|
|
||||||
ChainedOriginDepotNode *ChainedOriginDepotNode::allocate(
|
ChainedOriginDepotNode *ChainedOriginDepotNode::allocate(
|
||||||
const args_type &args) {
|
const args_type &args) {
|
||||||
return static_cast<ChainedOriginDepotNode *>(
|
return allocator.alloc();
|
||||||
allocator.alloc(sizeof(ChainedOriginDepotNode)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is murmur2 hash for the 64->32 bit case.
|
/* This is murmur2 hash for the 64->32 bit case.
|
||||||
|
|
|
@ -20,9 +20,10 @@
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
class PersistentAllocator {
|
class PersistentAllocator {
|
||||||
public:
|
public:
|
||||||
void *alloc(uptr size);
|
T *alloc(uptr count = 1);
|
||||||
uptr allocated() const {
|
uptr allocated() const {
|
||||||
SpinMutexLock l(&mtx);
|
SpinMutexLock l(&mtx);
|
||||||
return atomic_load_relaxed(&mapped_size) +
|
return atomic_load_relaxed(&mapped_size) +
|
||||||
|
@ -30,42 +31,47 @@ class PersistentAllocator {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void *tryAlloc(uptr size);
|
T *tryAlloc(uptr count);
|
||||||
void *refillAndAlloc(uptr size);
|
T *refillAndAlloc(uptr count);
|
||||||
mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
|
mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
|
||||||
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
||||||
atomic_uintptr_t region_end;
|
atomic_uintptr_t region_end;
|
||||||
atomic_uintptr_t mapped_size;
|
atomic_uintptr_t mapped_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline void *PersistentAllocator::tryAlloc(uptr size) {
|
template <typename T>
|
||||||
|
inline T *PersistentAllocator<T>::tryAlloc(uptr count) {
|
||||||
// Optimisic lock-free allocation, essentially try to bump the region ptr.
|
// Optimisic lock-free allocation, essentially try to bump the region ptr.
|
||||||
for (;;) {
|
for (;;) {
|
||||||
uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
|
uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
|
||||||
uptr end = atomic_load(®ion_end, memory_order_acquire);
|
uptr end = atomic_load(®ion_end, memory_order_acquire);
|
||||||
|
uptr size = count * sizeof(T);
|
||||||
if (cmp == 0 || cmp + size > end) return nullptr;
|
if (cmp == 0 || cmp + size > end) return nullptr;
|
||||||
if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
|
if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
|
||||||
memory_order_acquire))
|
memory_order_acquire))
|
||||||
return (void *)cmp;
|
return reinterpret_cast<T *>(cmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void *PersistentAllocator::alloc(uptr size) {
|
template <typename T>
|
||||||
|
inline T *PersistentAllocator<T>::alloc(uptr count) {
|
||||||
// First, try to allocate optimisitically.
|
// First, try to allocate optimisitically.
|
||||||
void *s = tryAlloc(size);
|
T *s = tryAlloc(count);
|
||||||
if (LIKELY(s))
|
if (LIKELY(s))
|
||||||
return s;
|
return s;
|
||||||
return refillAndAlloc(size);
|
return refillAndAlloc(count);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void *PersistentAllocator::refillAndAlloc(uptr size) {
|
template <typename T>
|
||||||
|
inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) {
|
||||||
// If failed, lock, retry and alloc new superblock.
|
// If failed, lock, retry and alloc new superblock.
|
||||||
SpinMutexLock l(&mtx);
|
SpinMutexLock l(&mtx);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
void *s = tryAlloc(size);
|
T *s = tryAlloc(count);
|
||||||
if (s)
|
if (s)
|
||||||
return s;
|
return s;
|
||||||
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
||||||
|
uptr size = count * sizeof(T);
|
||||||
uptr allocsz = 64 * 1024;
|
uptr allocsz = 64 * 1024;
|
||||||
if (allocsz < size)
|
if (allocsz < size)
|
||||||
allocsz = size;
|
allocsz = size;
|
||||||
|
|
|
@ -19,8 +19,8 @@
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
static PersistentAllocator allocator;
|
static PersistentAllocator<StackDepotNode> allocator;
|
||||||
static PersistentAllocator traceAllocator;
|
static PersistentAllocator<uptr> traceAllocator;
|
||||||
|
|
||||||
struct StackDepotNode {
|
struct StackDepotNode {
|
||||||
using hash_type = u64;
|
using hash_type = u64;
|
||||||
|
@ -43,7 +43,7 @@ struct StackDepotNode {
|
||||||
return allocator.allocated() + traceAllocator.allocated();
|
return allocator.allocated() + traceAllocator.allocated();
|
||||||
}
|
}
|
||||||
static StackDepotNode *allocate(const args_type &args) {
|
static StackDepotNode *allocate(const args_type &args) {
|
||||||
return (StackDepotNode *)allocator.alloc(sizeof(StackDepotNode));
|
return allocator.alloc();
|
||||||
}
|
}
|
||||||
static hash_type hash(const args_type &args) {
|
static hash_type hash(const args_type &args) {
|
||||||
MurMur2Hash64Builder H(args.size * sizeof(uptr));
|
MurMur2Hash64Builder H(args.size * sizeof(uptr));
|
||||||
|
@ -59,7 +59,7 @@ struct StackDepotNode {
|
||||||
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
|
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
|
||||||
memory_order_relaxed);
|
memory_order_relaxed);
|
||||||
stack_hash = hash;
|
stack_hash = hash;
|
||||||
stack_trace = (uptr *)traceAllocator.alloc((args.size + 1) * sizeof(uptr));
|
stack_trace = traceAllocator.alloc(args.size + 1);
|
||||||
*stack_trace = args.size;
|
*stack_trace = args.size;
|
||||||
internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr));
|
internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr));
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue