2016-07-21 06:06:41 +08:00
|
|
|
//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-07-21 06:06:41 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the Sanitizer Allocator.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef SANITIZER_ALLOCATOR_H
|
|
|
|
#error This file must be included inside sanitizer_allocator.h
|
|
|
|
#endif
|
|
|
|
|
2018-02-24 09:00:27 +08:00
|
|
|
// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
|
|
|
|
// allocated chunks. To be used in memory constrained or not memory hungry cases
|
|
|
|
// (currently, 32 bits and internal allocator).
|
|
|
|
class LargeMmapAllocatorPtrArrayStatic {
|
|
|
|
public:
|
|
|
|
INLINE void *Init() { return &p_[0]; }
|
|
|
|
INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
|
|
|
|
private:
|
|
|
|
static const int kMaxNumChunks = 1 << 15;
|
|
|
|
uptr p_[kMaxNumChunks];
|
|
|
|
};
|
|
|
|
|
|
|
|
// Much less restricted LargeMmapAllocator chunks list (comparing to
|
|
|
|
// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
|
|
|
|
// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
|
|
|
|
// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
|
|
|
|
class LargeMmapAllocatorPtrArrayDynamic {
|
|
|
|
public:
|
|
|
|
INLINE void *Init() {
|
|
|
|
uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
|
2018-04-14 03:21:27 +08:00
|
|
|
SecondaryAllocatorName);
|
2018-02-24 09:00:27 +08:00
|
|
|
CHECK(p);
|
|
|
|
return reinterpret_cast<void*>(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE void EnsureSpace(uptr n) {
|
|
|
|
CHECK_LT(n, kMaxNumChunks);
|
|
|
|
DCHECK(n <= n_reserved_);
|
|
|
|
if (UNLIKELY(n == n_reserved_)) {
|
|
|
|
address_range_.MapOrDie(
|
|
|
|
reinterpret_cast<uptr>(address_range_.base()) +
|
|
|
|
n_reserved_ * sizeof(uptr),
|
|
|
|
kChunksBlockCount * sizeof(uptr));
|
|
|
|
n_reserved_ += kChunksBlockCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static const int kMaxNumChunks = 1 << 20;
|
|
|
|
static const int kChunksBlockCount = 1 << 14;
|
|
|
|
ReservedAddressRange address_range_;
|
|
|
|
uptr n_reserved_;
|
|
|
|
};
|
|
|
|
|
|
|
|
#if SANITIZER_WORDSIZE == 32
|
|
|
|
typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
|
|
|
|
#else
|
|
|
|
typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
|
|
|
|
#endif
|
|
|
|
|
2016-07-21 06:06:41 +08:00
|
|
|
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
|
|
|
// The main purpose of this allocator is to cover large and rare allocation
|
|
|
|
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
2018-02-24 09:00:27 +08:00
|
|
|
template <class MapUnmapCallback = NoOpMapUnmapCallback,
|
2018-11-15 22:20:28 +08:00
|
|
|
class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
|
2018-12-22 05:22:27 +08:00
|
|
|
class AddressSpaceViewTy = LocalAddressSpaceView>
|
2016-07-21 06:06:41 +08:00
|
|
|
class LargeMmapAllocator {
|
|
|
|
public:
|
2018-12-22 05:22:27 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
2017-06-21 05:23:02 +08:00
|
|
|
void InitLinkerInitialized() {
|
2016-07-21 06:06:41 +08:00
|
|
|
page_size_ = GetPageSizeCached();
|
2018-02-24 09:00:27 +08:00
|
|
|
chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
|
2016-07-21 06:06:41 +08:00
|
|
|
}
|
|
|
|
|
2017-06-21 05:23:02 +08:00
|
|
|
void Init() {
|
2016-07-21 06:06:41 +08:00
|
|
|
internal_memset(this, 0, sizeof(*this));
|
2017-06-21 05:23:02 +08:00
|
|
|
InitLinkerInitialized();
|
2016-07-21 06:06:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
|
|
|
CHECK(IsPowerOfTwo(alignment));
|
|
|
|
uptr map_size = RoundUpMapSize(size);
|
|
|
|
if (alignment > page_size_)
|
|
|
|
map_size += alignment;
|
|
|
|
// Overflow.
|
2018-01-18 07:20:36 +08:00
|
|
|
if (map_size < size) {
|
|
|
|
Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
|
|
|
|
"0x%zx bytes with 0x%zx alignment requested\n",
|
|
|
|
SanitizerToolName, map_size, alignment);
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-07-21 06:06:41 +08:00
|
|
|
uptr map_beg = reinterpret_cast<uptr>(
|
2018-04-14 03:21:27 +08:00
|
|
|
MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
|
2017-06-17 02:48:08 +08:00
|
|
|
if (!map_beg)
|
2018-01-18 07:20:36 +08:00
|
|
|
return nullptr;
|
2016-07-21 06:06:41 +08:00
|
|
|
CHECK(IsAligned(map_beg, page_size_));
|
|
|
|
MapUnmapCallback().OnMap(map_beg, map_size);
|
|
|
|
uptr map_end = map_beg + map_size;
|
|
|
|
uptr res = map_beg + page_size_;
|
|
|
|
if (res & (alignment - 1)) // Align.
|
|
|
|
res += alignment - (res & (alignment - 1));
|
|
|
|
CHECK(IsAligned(res, alignment));
|
|
|
|
CHECK(IsAligned(res, page_size_));
|
|
|
|
CHECK_GE(res + size, map_beg);
|
|
|
|
CHECK_LE(res + size, map_end);
|
|
|
|
Header *h = GetHeader(res);
|
|
|
|
h->size = size;
|
|
|
|
h->map_beg = map_beg;
|
|
|
|
h->map_size = map_size;
|
|
|
|
uptr size_log = MostSignificantSetBitIndex(map_size);
|
|
|
|
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
|
|
|
|
{
|
|
|
|
SpinMutexLock l(&mutex_);
|
2018-02-24 09:00:27 +08:00
|
|
|
ptr_array_.EnsureSpace(n_chunks_);
|
2016-07-21 06:06:41 +08:00
|
|
|
uptr idx = n_chunks_++;
|
|
|
|
h->chunk_idx = idx;
|
|
|
|
chunks_[idx] = h;
|
2018-02-24 09:00:27 +08:00
|
|
|
chunks_sorted_ = false;
|
2016-07-21 06:06:41 +08:00
|
|
|
stats.n_allocs++;
|
|
|
|
stats.currently_allocated += map_size;
|
|
|
|
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
|
|
|
|
stats.by_size_log[size_log]++;
|
|
|
|
stat->Add(AllocatorStatAllocated, map_size);
|
|
|
|
stat->Add(AllocatorStatMapped, map_size);
|
|
|
|
}
|
|
|
|
return reinterpret_cast<void*>(res);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Deallocate(AllocatorStats *stat, void *p) {
|
|
|
|
Header *h = GetHeader(p);
|
|
|
|
{
|
|
|
|
SpinMutexLock l(&mutex_);
|
|
|
|
uptr idx = h->chunk_idx;
|
|
|
|
CHECK_EQ(chunks_[idx], h);
|
|
|
|
CHECK_LT(idx, n_chunks_);
|
2018-02-24 09:00:27 +08:00
|
|
|
chunks_[idx] = chunks_[--n_chunks_];
|
2016-07-21 06:06:41 +08:00
|
|
|
chunks_[idx]->chunk_idx = idx;
|
|
|
|
chunks_sorted_ = false;
|
|
|
|
stats.n_frees++;
|
|
|
|
stats.currently_allocated -= h->map_size;
|
|
|
|
stat->Sub(AllocatorStatAllocated, h->map_size);
|
|
|
|
stat->Sub(AllocatorStatMapped, h->map_size);
|
|
|
|
}
|
|
|
|
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
|
|
|
|
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr TotalMemoryUsed() {
|
|
|
|
SpinMutexLock l(&mutex_);
|
|
|
|
uptr res = 0;
|
|
|
|
for (uptr i = 0; i < n_chunks_; i++) {
|
|
|
|
Header *h = chunks_[i];
|
|
|
|
CHECK_EQ(h->chunk_idx, i);
|
|
|
|
res += RoundUpMapSize(h->size);
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PointerIsMine(const void *p) {
|
|
|
|
return GetBlockBegin(p) != nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr GetActuallyAllocatedSize(void *p) {
|
|
|
|
return RoundUpTo(GetHeader(p)->size, page_size_);
|
|
|
|
}
|
|
|
|
|
|
|
|
// At least page_size_/2 metadata bytes is available.
|
|
|
|
void *GetMetaData(const void *p) {
|
|
|
|
// Too slow: CHECK_EQ(p, GetBlockBegin(p));
|
|
|
|
if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
|
|
|
|
Printf("%s: bad pointer %p\n", SanitizerToolName, p);
|
|
|
|
CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
|
|
|
|
}
|
|
|
|
return GetHeader(p) + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *GetBlockBegin(const void *ptr) {
|
|
|
|
uptr p = reinterpret_cast<uptr>(ptr);
|
|
|
|
SpinMutexLock l(&mutex_);
|
|
|
|
uptr nearest_chunk = 0;
|
2019-01-28 06:45:11 +08:00
|
|
|
Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
|
2016-07-21 06:06:41 +08:00
|
|
|
// Cache-friendly linear search.
|
|
|
|
for (uptr i = 0; i < n_chunks_; i++) {
|
2019-01-28 06:45:11 +08:00
|
|
|
uptr ch = reinterpret_cast<uptr>(chunks[i]);
|
2016-07-21 06:06:41 +08:00
|
|
|
if (p < ch) continue; // p is at left to this chunk, skip it.
|
|
|
|
if (p - ch < p - nearest_chunk)
|
|
|
|
nearest_chunk = ch;
|
|
|
|
}
|
|
|
|
if (!nearest_chunk)
|
|
|
|
return nullptr;
|
2019-01-28 06:45:11 +08:00
|
|
|
const Header *h =
|
|
|
|
AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
|
|
|
|
Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
|
2016-07-21 06:06:41 +08:00
|
|
|
CHECK_GE(nearest_chunk, h->map_beg);
|
|
|
|
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
|
|
|
|
CHECK_LE(nearest_chunk, p);
|
|
|
|
if (h->map_beg + h->map_size <= p)
|
|
|
|
return nullptr;
|
2019-01-28 06:45:11 +08:00
|
|
|
return GetUser(h_ptr);
|
2016-07-21 06:06:41 +08:00
|
|
|
}
|
|
|
|
|
2016-11-11 01:27:28 +08:00
|
|
|
void EnsureSortedChunks() {
|
|
|
|
if (chunks_sorted_) return;
|
2018-12-29 03:30:51 +08:00
|
|
|
Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
|
2018-11-15 22:20:28 +08:00
|
|
|
Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
|
2016-11-11 01:27:28 +08:00
|
|
|
for (uptr i = 0; i < n_chunks_; i++)
|
2018-12-29 03:30:51 +08:00
|
|
|
AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
|
2016-11-11 01:27:28 +08:00
|
|
|
chunks_sorted_ = true;
|
|
|
|
}
|
|
|
|
|
2016-07-21 06:06:41 +08:00
|
|
|
// This function does the same as GetBlockBegin, but is much faster.
|
|
|
|
// Must be called with the allocator locked.
|
|
|
|
void *GetBlockBeginFastLocked(void *ptr) {
|
|
|
|
mutex_.CheckLocked();
|
|
|
|
uptr p = reinterpret_cast<uptr>(ptr);
|
|
|
|
uptr n = n_chunks_;
|
|
|
|
if (!n) return nullptr;
|
2016-11-11 01:27:28 +08:00
|
|
|
EnsureSortedChunks();
|
2019-01-28 06:45:11 +08:00
|
|
|
Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
|
|
|
|
auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
|
|
|
|
auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
|
|
|
|
AddressSpaceView::Load(chunks[n - 1])->map_size;
|
2016-07-21 06:06:41 +08:00
|
|
|
if (p < min_mmap_ || p >= max_mmap_)
|
|
|
|
return nullptr;
|
|
|
|
uptr beg = 0, end = n - 1;
|
|
|
|
// This loop is a log(n) lower_bound. It does not check for the exact match
|
|
|
|
// to avoid expensive cache-thrashing loads.
|
|
|
|
while (end - beg >= 2) {
|
|
|
|
uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
|
2019-01-28 06:45:11 +08:00
|
|
|
if (p < reinterpret_cast<uptr>(chunks[mid]))
|
|
|
|
end = mid - 1; // We are not interested in chunks[mid].
|
2016-07-21 06:06:41 +08:00
|
|
|
else
|
2019-01-28 06:45:11 +08:00
|
|
|
beg = mid; // chunks[mid] may still be what we want.
|
2016-07-21 06:06:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (beg < end) {
|
|
|
|
CHECK_EQ(beg + 1, end);
|
|
|
|
// There are 2 chunks left, choose one.
|
2019-01-28 06:45:11 +08:00
|
|
|
if (p >= reinterpret_cast<uptr>(chunks[end]))
|
2016-07-21 06:06:41 +08:00
|
|
|
beg = end;
|
|
|
|
}
|
|
|
|
|
2019-01-28 06:45:11 +08:00
|
|
|
const Header *h = AddressSpaceView::Load(chunks[beg]);
|
|
|
|
Header *h_ptr = chunks[beg];
|
2016-07-21 06:06:41 +08:00
|
|
|
if (h->map_beg + h->map_size <= p || p < h->map_beg)
|
|
|
|
return nullptr;
|
2019-01-28 06:45:11 +08:00
|
|
|
return GetUser(h_ptr);
|
2016-07-21 06:06:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PrintStats() {
|
|
|
|
Printf("Stats: LargeMmapAllocator: allocated %zd times, "
|
|
|
|
"remains %zd (%zd K) max %zd M; by size logs: ",
|
|
|
|
stats.n_allocs, stats.n_allocs - stats.n_frees,
|
|
|
|
stats.currently_allocated >> 10, stats.max_allocated >> 20);
|
|
|
|
for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
|
|
|
|
uptr c = stats.by_size_log[i];
|
|
|
|
if (!c) continue;
|
|
|
|
Printf("%zd:%zd; ", i, c);
|
|
|
|
}
|
|
|
|
Printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
|
|
|
// introspection API.
|
|
|
|
void ForceLock() {
|
|
|
|
mutex_.Lock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ForceUnlock() {
|
|
|
|
mutex_.Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over all existing chunks.
|
|
|
|
// The allocator must be locked when calling this function.
|
|
|
|
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
2016-11-11 01:27:28 +08:00
|
|
|
EnsureSortedChunks(); // Avoid doing the sort while iterating.
|
2018-12-29 03:30:51 +08:00
|
|
|
const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
|
2016-11-11 01:27:28 +08:00
|
|
|
for (uptr i = 0; i < n_chunks_; i++) {
|
2018-12-29 03:30:51 +08:00
|
|
|
const Header *t = chunks[i];
|
2018-02-24 09:00:27 +08:00
|
|
|
callback(reinterpret_cast<uptr>(GetUser(t)), arg);
|
2016-11-11 01:27:28 +08:00
|
|
|
// Consistency check: verify that the array did not change.
|
2018-11-15 22:20:28 +08:00
|
|
|
CHECK_EQ(chunks[i], t);
|
|
|
|
CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
|
2016-11-11 01:27:28 +08:00
|
|
|
}
|
2016-07-21 06:06:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
struct Header {
|
|
|
|
uptr map_beg;
|
|
|
|
uptr map_size;
|
|
|
|
uptr size;
|
|
|
|
uptr chunk_idx;
|
|
|
|
};
|
|
|
|
|
|
|
|
Header *GetHeader(uptr p) {
|
|
|
|
CHECK(IsAligned(p, page_size_));
|
|
|
|
return reinterpret_cast<Header*>(p - page_size_);
|
|
|
|
}
|
|
|
|
Header *GetHeader(const void *p) {
|
|
|
|
return GetHeader(reinterpret_cast<uptr>(p));
|
|
|
|
}
|
|
|
|
|
2018-12-29 03:30:51 +08:00
|
|
|
void *GetUser(const Header *h) {
|
2016-07-21 06:06:41 +08:00
|
|
|
CHECK(IsAligned((uptr)h, page_size_));
|
|
|
|
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr RoundUpMapSize(uptr size) {
|
|
|
|
return RoundUpTo(size, page_size_) + page_size_;
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr page_size_;
|
2018-02-24 09:00:27 +08:00
|
|
|
Header **chunks_;
|
|
|
|
PtrArrayT ptr_array_;
|
2016-07-21 06:06:41 +08:00
|
|
|
uptr n_chunks_;
|
|
|
|
bool chunks_sorted_;
|
|
|
|
struct Stats {
|
|
|
|
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
|
|
|
} stats;
|
2018-06-14 05:45:01 +08:00
|
|
|
StaticSpinMutex mutex_;
|
2016-07-21 06:06:41 +08:00
|
|
|
};
|