[NFC][sanitizer] Move inline implementation of StackStore into cpp

This commit is contained in:
Vitaly Buka 2021-11-15 13:31:45 -08:00
parent 368a3b5221
commit 6bf71be9f9
6 changed files with 81 additions and 59 deletions

View File

@ -73,6 +73,7 @@ set(SANITIZER_COVERAGE_SOURCES
set(SANITIZER_SYMBOLIZER_SOURCES
sanitizer_allocator_report.cpp
sanitizer_chained_origin_depot.cpp
sanitizer_stack_store.cpp
sanitizer_stackdepot.cpp
sanitizer_stacktrace.cpp
sanitizer_stacktrace_libcdep.cpp

View File

@ -0,0 +1,76 @@
//===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A fast memory allocator that does not support free() nor realloc().
// All allocations are forever.
//===----------------------------------------------------------------------===//
#include "sanitizer_stack_store.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
uptr *StackStore::tryAlloc(uptr count) {
// Optimisic lock-free allocation, essentially try to bump the region ptr.
for (;;) {
uptr cmp = atomic_load(&region_pos, memory_order_acquire);
uptr end = atomic_load(&region_end, memory_order_acquire);
uptr size = count * sizeof(uptr);
if (cmp == 0 || cmp + size > end)
return nullptr;
if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
memory_order_acquire))
return reinterpret_cast<uptr *>(cmp);
}
}
uptr *StackStore::alloc(uptr count) {
// First, try to allocate optimisitically.
uptr *s = tryAlloc(count);
if (LIKELY(s))
return s;
return refillAndAlloc(count);
}
uptr *StackStore::refillAndAlloc(uptr count) {
// If failed, lock, retry and alloc new superblock.
SpinMutexLock l(&mtx);
for (;;) {
uptr *s = tryAlloc(count);
if (s)
return s;
atomic_store(&region_pos, 0, memory_order_relaxed);
uptr size = count * sizeof(uptr) + sizeof(BlockInfo);
uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
new_block->next = curr;
new_block->ptr = mem;
new_block->size = allocsz;
curr = new_block;
atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
allocsz -= sizeof(BlockInfo);
atomic_store(&region_end, mem + allocsz, memory_order_release);
atomic_store(&region_pos, mem, memory_order_release);
}
}
void StackStore::TestOnlyUnmap() {
while (curr) {
uptr mem = curr->ptr;
uptr allocsz = curr->size;
curr = curr->next;
UnmapOrDie((void *)mem, allocsz);
}
internal_memset(this, 0, sizeof(*this));
}
} // namespace __sanitizer

View File

@ -14,7 +14,6 @@
#define SANITIZER_STACK_STORE_H
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
@ -43,63 +42,6 @@ class StackStore {
const BlockInfo *curr;
};
inline uptr *StackStore::tryAlloc(uptr count) {
// Optimisic lock-free allocation, essentially try to bump the region ptr.
for (;;) {
uptr cmp = atomic_load(&region_pos, memory_order_acquire);
uptr end = atomic_load(&region_end, memory_order_acquire);
uptr size = count * sizeof(uptr);
if (cmp == 0 || cmp + size > end)
return nullptr;
if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
memory_order_acquire))
return reinterpret_cast<uptr *>(cmp);
}
}
inline uptr *StackStore::alloc(uptr count) {
// First, try to allocate optimisitically.
uptr *s = tryAlloc(count);
if (LIKELY(s))
return s;
return refillAndAlloc(count);
}
inline uptr *StackStore::refillAndAlloc(uptr count) {
// If failed, lock, retry and alloc new superblock.
SpinMutexLock l(&mtx);
for (;;) {
uptr *s = tryAlloc(count);
if (s)
return s;
atomic_store(&region_pos, 0, memory_order_relaxed);
uptr size = count * sizeof(uptr) + sizeof(BlockInfo);
uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
new_block->next = curr;
new_block->ptr = mem;
new_block->size = allocsz;
curr = new_block;
atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
allocsz -= sizeof(BlockInfo);
atomic_store(&region_end, mem + allocsz, memory_order_release);
atomic_store(&region_pos, mem, memory_order_release);
}
}
inline void StackStore::TestOnlyUnmap() {
while (curr) {
uptr mem = curr->ptr;
uptr allocsz = curr->size;
curr = curr->next;
UnmapOrDie((void *)mem, allocsz);
}
internal_memset(this, 0, sizeof(*this));
}
} // namespace __sanitizer
#endif // SANITIZER_STACK_STORE_H
#endif // SANITIZER_STACK_STORE_H

View File

@ -25,6 +25,7 @@ type ^
..\rtl\tsan_platform_windows.cpp ^
..\..\sanitizer_common\sanitizer_win.cpp ^
..\..\sanitizer_common\sanitizer_deadlock_detector1.cpp ^
..\..\sanitizer_common\sanitizer_stack_store.cpp ^
..\..\sanitizer_common\sanitizer_stackdepot.cpp ^
..\..\sanitizer_common\sanitizer_flag_parser.cpp ^
..\..\sanitizer_common\sanitizer_symbolizer.cpp ^

View File

@ -31,6 +31,7 @@ SRCS="
../../sanitizer_common/sanitizer_printf.cpp
../../sanitizer_common/sanitizer_suppressions.cpp
../../sanitizer_common/sanitizer_thread_registry.cpp
../../sanitizer_common/sanitizer_stack_store.cpp
../../sanitizer_common/sanitizer_stackdepot.cpp
../../sanitizer_common/sanitizer_stacktrace.cpp
../../sanitizer_common/sanitizer_symbolizer.cpp

View File

@ -114,6 +114,7 @@ source_set("sources") {
"sanitizer_report_decorator.h",
"sanitizer_ring_buffer.h",
"sanitizer_solaris.cpp",
"sanitizer_stack_store.cpp",
"sanitizer_stack_store.h",
"sanitizer_stackdepot.cpp",
"sanitizer_stackdepot.h",