forked from OSchip/llvm-project
[sanitizer] Uninline slow path of PersistentAllocator::alloc
This commit is contained in:
parent
f4a5fb0c55
commit
78c5754813
|
@ -15,4 +15,21 @@ namespace __sanitizer {
|
|||
|
||||
PersistentAllocator thePersistentAllocator;
|
||||
|
||||
void *PersistentAllocator::refillAndAlloc(uptr size) {
|
||||
// If failed, lock, retry and alloc new superblock.
|
||||
SpinMutexLock l(&mtx);
|
||||
for (;;) {
|
||||
void *s = tryAlloc(size);
|
||||
if (s)
|
||||
return s;
|
||||
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
||||
uptr allocsz = 64 * 1024;
|
||||
if (allocsz < size)
|
||||
allocsz = size;
|
||||
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
||||
atomic_store(®ion_end, mem + allocsz, memory_order_release);
|
||||
atomic_store(®ion_pos, mem, memory_order_release);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
|
|
@ -26,6 +26,7 @@ class PersistentAllocator {
|
|||
|
||||
private:
|
||||
void *tryAlloc(uptr size);
|
||||
void *refillAndAlloc(uptr size);
|
||||
StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
|
||||
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
||||
atomic_uintptr_t region_end;
|
||||
|
@ -47,18 +48,7 @@ inline void *PersistentAllocator::alloc(uptr size) {
|
|||
// First, try to allocate optimisitically.
|
||||
void *s = tryAlloc(size);
|
||||
if (s) return s;
|
||||
// If failed, lock, retry and alloc new superblock.
|
||||
SpinMutexLock l(&mtx);
|
||||
for (;;) {
|
||||
s = tryAlloc(size);
|
||||
if (s) return s;
|
||||
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
||||
uptr allocsz = 64 * 1024;
|
||||
if (allocsz < size) allocsz = size;
|
||||
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
||||
atomic_store(®ion_end, mem + allocsz, memory_order_release);
|
||||
atomic_store(®ion_pos, mem, memory_order_release);
|
||||
}
|
||||
return refillAndAlloc(size);
|
||||
}
|
||||
|
||||
extern PersistentAllocator thePersistentAllocator;
|
||||
|
|
Loading…
Reference in New Issue