2013-01-11 16:07:43 +08:00
|
|
|
//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2013-01-11 16:07:43 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Memory quarantine for AddressSanitizer and potentially other tools.
|
|
|
|
// Quarantine caches some specified amount of memory in per-thread caches,
|
|
|
|
// then evicts to global FIFO queue. When the queue reaches specified threshold,
|
|
|
|
// oldest memory is recycled.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef SANITIZER_QUARANTINE_H
|
|
|
|
#define SANITIZER_QUARANTINE_H
|
|
|
|
|
|
|
|
#include "sanitizer_internal_defs.h"
|
|
|
|
#include "sanitizer_mutex.h"
|
2013-01-11 19:03:35 +08:00
|
|
|
#include "sanitizer_list.h"
|
2013-01-11 16:07:43 +08:00
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
|
|
|
template<typename Node> class QuarantineCache;
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
struct QuarantineBatch {
|
2013-10-21 16:36:10 +08:00
|
|
|
static const uptr kSize = 1021;
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineBatch *next;
|
|
|
|
uptr size;
|
|
|
|
uptr count;
|
|
|
|
void *batch[kSize];
|
2017-01-20 06:15:54 +08:00
|
|
|
|
|
|
|
void init(void *ptr, uptr size) {
|
|
|
|
count = 1;
|
|
|
|
batch[0] = ptr;
|
|
|
|
this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
|
|
|
|
}
|
|
|
|
|
|
|
|
// The total size of quarantined nodes recorded in this batch.
|
|
|
|
uptr quarantined_size() const {
|
|
|
|
return size - sizeof(QuarantineBatch);
|
|
|
|
}
|
|
|
|
|
|
|
|
void push_back(void *ptr, uptr size) {
|
|
|
|
CHECK_LT(count, kSize);
|
|
|
|
batch[count++] = ptr;
|
|
|
|
this->size += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool can_merge(const QuarantineBatch* const from) const {
|
|
|
|
return count + from->count <= kSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
void merge(QuarantineBatch* const from) {
|
|
|
|
CHECK_LE(count + from->count, kSize);
|
|
|
|
CHECK_GE(size, sizeof(QuarantineBatch));
|
|
|
|
|
|
|
|
for (uptr i = 0; i < from->count; ++i)
|
|
|
|
batch[count + i] = from->batch[i];
|
|
|
|
count += from->count;
|
|
|
|
size += from->quarantined_size();
|
|
|
|
|
|
|
|
from->count = 0;
|
|
|
|
from->size = sizeof(QuarantineBatch);
|
|
|
|
}
|
2013-01-11 19:03:35 +08:00
|
|
|
};
|
|
|
|
|
2013-10-21 16:36:10 +08:00
|
|
|
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
|
|
|
|
|
2013-01-11 16:07:43 +08:00
|
|
|
// The callback interface is:
|
2013-01-11 19:03:35 +08:00
|
|
|
// void Callback::Recycle(Node *ptr);
|
|
|
|
// void *cb.Allocate(uptr size);
|
|
|
|
// void cb.Deallocate(void *ptr);
|
2013-01-11 16:07:43 +08:00
|
|
|
template<typename Callback, typename Node>
|
|
|
|
class Quarantine {
|
|
|
|
public:
|
2013-01-11 19:03:35 +08:00
|
|
|
typedef QuarantineCache<Callback> Cache;
|
2013-01-11 16:07:43 +08:00
|
|
|
|
|
|
|
explicit Quarantine(LinkerInitialized)
|
|
|
|
: cache_(LINKER_INITIALIZED) {
|
|
|
|
}
|
|
|
|
|
|
|
|
void Init(uptr size, uptr cache_size) {
|
2017-01-13 02:51:25 +08:00
|
|
|
// Thread local quarantine size can be zero only when global quarantine size
|
|
|
|
// is zero (it allows us to perform just one atomic read per Put() call).
|
|
|
|
CHECK((size == 0 && cache_size == 0) || cache_size != 0);
|
|
|
|
|
2017-10-24 01:12:07 +08:00
|
|
|
atomic_store_relaxed(&max_size_, size);
|
|
|
|
atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
|
|
|
|
atomic_store_relaxed(&max_cache_size_, cache_size);
|
2018-06-14 05:45:01 +08:00
|
|
|
|
|
|
|
cache_mutex_.Init();
|
|
|
|
recycle_mutex_.Init();
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2017-10-24 01:12:07 +08:00
|
|
|
uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
|
2017-01-13 02:51:25 +08:00
|
|
|
uptr GetCacheSize() const {
|
2017-10-24 01:12:07 +08:00
|
|
|
return atomic_load_relaxed(&max_cache_size_);
|
2017-01-13 02:51:25 +08:00
|
|
|
}
|
2014-12-20 04:35:50 +08:00
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
|
2017-01-13 02:51:25 +08:00
|
|
|
uptr cache_size = GetCacheSize();
|
|
|
|
if (cache_size) {
|
|
|
|
c->Enqueue(cb, ptr, size);
|
|
|
|
} else {
|
2017-01-20 06:15:54 +08:00
|
|
|
// GetCacheSize() == 0 only when GetSize() == 0 (see Init).
|
2017-01-13 02:51:25 +08:00
|
|
|
cb.Recycle(ptr);
|
|
|
|
}
|
|
|
|
// Check cache size anyway to accommodate for runtime cache_size change.
|
|
|
|
if (c->Size() > cache_size)
|
2013-01-11 16:07:43 +08:00
|
|
|
Drain(c, cb);
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void NOINLINE Drain(Cache *c, Callback cb) {
|
|
|
|
{
|
|
|
|
SpinMutexLock l(&cache_mutex_);
|
|
|
|
cache_.Transfer(c);
|
|
|
|
}
|
2014-12-20 04:35:50 +08:00
|
|
|
if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
|
2017-10-24 01:12:07 +08:00
|
|
|
Recycle(atomic_load_relaxed(&min_size_), cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
|
|
|
|
{
|
|
|
|
SpinMutexLock l(&cache_mutex_);
|
|
|
|
cache_.Transfer(c);
|
|
|
|
}
|
|
|
|
recycle_mutex_.Lock();
|
|
|
|
Recycle(0, cb);
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2017-01-06 06:17:53 +08:00
|
|
|
void PrintStats() const {
|
|
|
|
// It assumes that the world is stopped, just as the allocator's PrintStats.
|
2017-01-20 06:15:54 +08:00
|
|
|
Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
|
|
|
|
GetSize() >> 20, GetCacheSize() >> 10);
|
2017-01-06 06:17:53 +08:00
|
|
|
cache_.PrintStats();
|
|
|
|
}
|
|
|
|
|
2013-01-11 16:07:43 +08:00
|
|
|
private:
|
2013-01-11 19:03:35 +08:00
|
|
|
// Read-only data.
|
|
|
|
char pad0_[kCacheLineSize];
|
2014-12-20 04:35:50 +08:00
|
|
|
atomic_uintptr_t max_size_;
|
|
|
|
atomic_uintptr_t min_size_;
|
2017-01-13 02:51:25 +08:00
|
|
|
atomic_uintptr_t max_cache_size_;
|
2013-01-11 19:03:35 +08:00
|
|
|
char pad1_[kCacheLineSize];
|
2018-06-14 05:45:01 +08:00
|
|
|
StaticSpinMutex cache_mutex_;
|
|
|
|
StaticSpinMutex recycle_mutex_;
|
2013-01-11 16:07:43 +08:00
|
|
|
Cache cache_;
|
2013-01-11 19:03:35 +08:00
|
|
|
char pad2_[kCacheLineSize];
|
2013-01-11 19:39:59 +08:00
|
|
|
|
2017-10-24 01:12:07 +08:00
|
|
|
void NOINLINE Recycle(uptr min_size, Callback cb) {
|
2013-01-11 19:39:59 +08:00
|
|
|
Cache tmp;
|
|
|
|
{
|
|
|
|
SpinMutexLock l(&cache_mutex_);
|
2017-01-20 06:15:54 +08:00
|
|
|
// Go over the batches and merge partially filled ones to
|
|
|
|
// save some memory, otherwise batches themselves (since the memory used
|
|
|
|
// by them is counted against quarantine limit) can overcome the actual
|
|
|
|
// user's quarantined chunks, which diminishes the purpose of the
|
|
|
|
// quarantine.
|
|
|
|
uptr cache_size = cache_.Size();
|
|
|
|
uptr overhead_size = cache_.OverheadSize();
|
|
|
|
CHECK_GE(cache_size, overhead_size);
|
|
|
|
// Do the merge only when overhead exceeds this predefined limit (might
|
|
|
|
// require some tuning). It saves us merge attempt when the batch list
|
|
|
|
// quarantine is unlikely to contain batches suitable for merge.
|
|
|
|
const uptr kOverheadThresholdPercents = 100;
|
|
|
|
if (cache_size > overhead_size &&
|
|
|
|
overhead_size * (100 + kOverheadThresholdPercents) >
|
|
|
|
cache_size * kOverheadThresholdPercents) {
|
|
|
|
cache_.MergeBatches(&tmp);
|
|
|
|
}
|
|
|
|
// Extract enough chunks from the quarantine to get below the max
|
|
|
|
// quarantine size and leave some leeway for the newly quarantined chunks.
|
2014-12-20 04:35:50 +08:00
|
|
|
while (cache_.Size() > min_size) {
|
2017-01-20 06:15:54 +08:00
|
|
|
tmp.EnqueueBatch(cache_.DequeueBatch());
|
2013-01-11 19:39:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
recycle_mutex_.Unlock();
|
2013-01-12 00:40:01 +08:00
|
|
|
DoRecycle(&tmp, cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE DoRecycle(Cache *c, Callback cb) {
|
|
|
|
while (QuarantineBatch *b = c->DequeueBatch()) {
|
|
|
|
const uptr kPrefetch = 16;
|
2016-04-14 19:40:08 +08:00
|
|
|
CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
|
2013-01-12 00:40:01 +08:00
|
|
|
for (uptr i = 0; i < kPrefetch; i++)
|
|
|
|
PREFETCH(b->batch[i]);
|
2016-04-14 17:52:33 +08:00
|
|
|
for (uptr i = 0, count = b->count; i < count; i++) {
|
|
|
|
if (i + kPrefetch < count)
|
|
|
|
PREFETCH(b->batch[i + kPrefetch]);
|
2013-01-11 19:39:59 +08:00
|
|
|
cb.Recycle((Node*)b->batch[i]);
|
2013-01-12 00:40:01 +08:00
|
|
|
}
|
2013-01-11 19:39:59 +08:00
|
|
|
cb.Deallocate(b);
|
|
|
|
}
|
|
|
|
}
|
2013-01-11 16:07:43 +08:00
|
|
|
};
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
// Per-thread cache of memory blocks.
|
|
|
|
template<typename Callback>
|
2013-01-11 16:07:43 +08:00
|
|
|
class QuarantineCache {
|
|
|
|
public:
|
|
|
|
explicit QuarantineCache(LinkerInitialized) {
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineCache()
|
|
|
|
: size_() {
|
|
|
|
list_.clear();
|
|
|
|
}
|
|
|
|
|
2017-01-20 06:15:54 +08:00
|
|
|
// Total memory used, including internal accounting.
|
2013-01-11 16:07:43 +08:00
|
|
|
uptr Size() const {
|
2017-10-24 01:12:07 +08:00
|
|
|
return atomic_load_relaxed(&size_);
|
2013-01-11 19:03:35 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 06:15:54 +08:00
|
|
|
// Memory used for internal accounting.
|
|
|
|
uptr OverheadSize() const {
|
|
|
|
return list_.size() * sizeof(QuarantineBatch);
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void Enqueue(Callback cb, void *ptr, uptr size) {
|
2013-10-21 16:36:10 +08:00
|
|
|
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
|
2017-01-20 06:15:54 +08:00
|
|
|
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
|
|
|
|
CHECK(b);
|
|
|
|
b->init(ptr, size);
|
|
|
|
EnqueueBatch(b);
|
|
|
|
} else {
|
|
|
|
list_.back()->push_back(ptr, size);
|
|
|
|
SizeAdd(size);
|
2013-10-21 16:36:10 +08:00
|
|
|
}
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 06:15:54 +08:00
|
|
|
void Transfer(QuarantineCache *from_cache) {
|
|
|
|
list_.append_back(&from_cache->list_);
|
|
|
|
SizeAdd(from_cache->Size());
|
|
|
|
|
2017-10-24 01:12:07 +08:00
|
|
|
atomic_store_relaxed(&from_cache->size_, 0);
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void EnqueueBatch(QuarantineBatch *b) {
|
|
|
|
list_.push_back(b);
|
|
|
|
SizeAdd(b->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
QuarantineBatch *DequeueBatch() {
|
|
|
|
if (list_.empty())
|
2015-09-30 02:23:36 +08:00
|
|
|
return nullptr;
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineBatch *b = list_.front();
|
|
|
|
list_.pop_front();
|
2013-10-21 16:36:10 +08:00
|
|
|
SizeSub(b->size);
|
2013-01-11 19:03:35 +08:00
|
|
|
return b;
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 06:15:54 +08:00
|
|
|
void MergeBatches(QuarantineCache *to_deallocate) {
|
|
|
|
uptr extracted_size = 0;
|
|
|
|
QuarantineBatch *current = list_.front();
|
|
|
|
while (current && current->next) {
|
|
|
|
if (current->can_merge(current->next)) {
|
|
|
|
QuarantineBatch *extracted = current->next;
|
|
|
|
// Move all the chunks into the current batch.
|
|
|
|
current->merge(extracted);
|
|
|
|
CHECK_EQ(extracted->count, 0);
|
|
|
|
CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
|
|
|
|
// Remove the next batch from the list and account for its size.
|
|
|
|
list_.extract(current, extracted);
|
|
|
|
extracted_size += extracted->size;
|
|
|
|
// Add it to deallocation list.
|
|
|
|
to_deallocate->EnqueueBatch(extracted);
|
|
|
|
} else {
|
|
|
|
current = current->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SizeSub(extracted_size);
|
|
|
|
}
|
|
|
|
|
2017-01-06 06:17:53 +08:00
|
|
|
void PrintStats() const {
|
|
|
|
uptr batch_count = 0;
|
2017-01-20 06:15:54 +08:00
|
|
|
uptr total_overhead_bytes = 0;
|
|
|
|
uptr total_bytes = 0;
|
2017-01-06 06:17:53 +08:00
|
|
|
uptr total_quarantine_chunks = 0;
|
|
|
|
for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
|
|
|
|
batch_count++;
|
2017-01-20 06:15:54 +08:00
|
|
|
total_bytes += (*it).size;
|
|
|
|
total_overhead_bytes += (*it).size - (*it).quarantined_size();
|
2017-01-06 06:17:53 +08:00
|
|
|
total_quarantine_chunks += (*it).count;
|
|
|
|
}
|
2017-01-20 06:15:54 +08:00
|
|
|
uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
|
|
|
|
int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
|
|
|
|
0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
|
|
|
|
uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
|
|
|
|
int memory_overhead_percent = total_quarantined_bytes == 0 ?
|
|
|
|
0 : total_overhead_bytes * 100 / total_quarantined_bytes;
|
|
|
|
Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
|
|
|
|
"chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
|
|
|
|
"\n",
|
|
|
|
batch_count, total_bytes, total_quarantined_bytes,
|
|
|
|
total_quarantine_chunks, quarantine_chunks_capacity,
|
|
|
|
chunks_usage_percent, memory_overhead_percent);
|
2017-01-06 06:17:53 +08:00
|
|
|
}
|
|
|
|
|
2013-01-11 16:07:43 +08:00
|
|
|
private:
|
2017-01-06 06:17:53 +08:00
|
|
|
typedef IntrusiveList<QuarantineBatch> List;
|
|
|
|
|
|
|
|
List list_;
|
2013-01-11 19:03:35 +08:00
|
|
|
atomic_uintptr_t size_;
|
|
|
|
|
|
|
|
void SizeAdd(uptr add) {
|
2017-10-24 01:12:07 +08:00
|
|
|
atomic_store_relaxed(&size_, Size() + add);
|
2013-01-11 19:03:35 +08:00
|
|
|
}
|
2013-10-21 16:36:10 +08:00
|
|
|
void SizeSub(uptr sub) {
|
2017-10-24 01:12:07 +08:00
|
|
|
atomic_store_relaxed(&size_, Size() - sub);
|
2013-10-21 16:36:10 +08:00
|
|
|
}
|
2013-01-11 16:07:43 +08:00
|
|
|
};
|
2017-01-13 02:51:25 +08:00
|
|
|
|
2015-09-30 02:23:36 +08:00
|
|
|
} // namespace __sanitizer
|
2013-01-11 16:07:43 +08:00
|
|
|
|
2015-09-30 02:23:36 +08:00
|
|
|
#endif // SANITIZER_QUARANTINE_H
|