2013-01-11 16:07:43 +08:00
|
|
|
//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Memory quarantine for AddressSanitizer and potentially other tools.
|
|
|
|
// Quarantine caches some specified amount of memory in per-thread caches,
|
|
|
|
// then evicts to global FIFO queue. When the queue reaches specified threshold,
|
|
|
|
// oldest memory is recycled.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef SANITIZER_QUARANTINE_H
|
|
|
|
#define SANITIZER_QUARANTINE_H
|
|
|
|
|
|
|
|
#include "sanitizer_internal_defs.h"
|
|
|
|
#include "sanitizer_mutex.h"
|
2013-01-11 19:03:35 +08:00
|
|
|
#include "sanitizer_list.h"
|
2013-01-11 16:07:43 +08:00
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
|
|
|
template<typename Node> class QuarantineCache;
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
struct QuarantineBatch {
|
2013-10-21 16:36:10 +08:00
|
|
|
static const uptr kSize = 1021;
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineBatch *next;
|
|
|
|
uptr size;
|
|
|
|
uptr count;
|
|
|
|
void *batch[kSize];
|
|
|
|
};
|
|
|
|
|
2013-10-21 16:36:10 +08:00
|
|
|
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
|
|
|
|
|
2013-01-11 16:07:43 +08:00
|
|
|
// The callback interface is:
|
2013-01-11 19:03:35 +08:00
|
|
|
// void Callback::Recycle(Node *ptr);
|
|
|
|
// void *cb.Allocate(uptr size);
|
|
|
|
// void cb.Deallocate(void *ptr);
|
2013-01-11 16:07:43 +08:00
|
|
|
template<typename Callback, typename Node>
|
|
|
|
class Quarantine {
|
|
|
|
public:
|
2013-01-11 19:03:35 +08:00
|
|
|
typedef QuarantineCache<Callback> Cache;
|
2013-01-11 16:07:43 +08:00
|
|
|
|
|
|
|
explicit Quarantine(LinkerInitialized)
|
|
|
|
: cache_(LINKER_INITIALIZED) {
|
|
|
|
}
|
|
|
|
|
|
|
|
void Init(uptr size, uptr cache_size) {
|
2014-12-20 04:35:50 +08:00
|
|
|
atomic_store(&max_size_, size, memory_order_release);
|
|
|
|
atomic_store(&min_size_, size / 10 * 9,
|
|
|
|
memory_order_release); // 90% of max size.
|
2013-01-11 16:07:43 +08:00
|
|
|
max_cache_size_ = cache_size;
|
|
|
|
}
|
|
|
|
|
2014-12-20 04:35:50 +08:00
|
|
|
uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
|
|
|
|
c->Enqueue(cb, ptr, size);
|
2013-01-11 16:07:43 +08:00
|
|
|
if (c->Size() > max_cache_size_)
|
|
|
|
Drain(c, cb);
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void NOINLINE Drain(Cache *c, Callback cb) {
|
|
|
|
{
|
|
|
|
SpinMutexLock l(&cache_mutex_);
|
|
|
|
cache_.Transfer(c);
|
|
|
|
}
|
2014-12-20 04:35:50 +08:00
|
|
|
if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
|
2013-01-11 19:39:59 +08:00
|
|
|
Recycle(cb);
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2013-01-11 19:03:35 +08:00
|
|
|
// Read-only data.
|
|
|
|
char pad0_[kCacheLineSize];
|
2014-12-20 04:35:50 +08:00
|
|
|
atomic_uintptr_t max_size_;
|
|
|
|
atomic_uintptr_t min_size_;
|
2013-01-11 16:07:43 +08:00
|
|
|
uptr max_cache_size_;
|
2013-01-11 19:03:35 +08:00
|
|
|
char pad1_[kCacheLineSize];
|
|
|
|
SpinMutex cache_mutex_;
|
|
|
|
SpinMutex recycle_mutex_;
|
2013-01-11 16:07:43 +08:00
|
|
|
Cache cache_;
|
2013-01-11 19:03:35 +08:00
|
|
|
char pad2_[kCacheLineSize];
|
2013-01-11 19:39:59 +08:00
|
|
|
|
|
|
|
void NOINLINE Recycle(Callback cb) {
|
|
|
|
Cache tmp;
|
2014-12-20 04:35:50 +08:00
|
|
|
uptr min_size = atomic_load(&min_size_, memory_order_acquire);
|
2013-01-11 19:39:59 +08:00
|
|
|
{
|
|
|
|
SpinMutexLock l(&cache_mutex_);
|
2014-12-20 04:35:50 +08:00
|
|
|
while (cache_.Size() > min_size) {
|
2013-01-11 19:39:59 +08:00
|
|
|
QuarantineBatch *b = cache_.DequeueBatch();
|
|
|
|
tmp.EnqueueBatch(b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
recycle_mutex_.Unlock();
|
2013-01-12 00:40:01 +08:00
|
|
|
DoRecycle(&tmp, cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE DoRecycle(Cache *c, Callback cb) {
|
|
|
|
while (QuarantineBatch *b = c->DequeueBatch()) {
|
|
|
|
const uptr kPrefetch = 16;
|
|
|
|
for (uptr i = 0; i < kPrefetch; i++)
|
|
|
|
PREFETCH(b->batch[i]);
|
|
|
|
for (uptr i = 0; i < b->count; i++) {
|
|
|
|
PREFETCH(b->batch[i + kPrefetch]);
|
2013-01-11 19:39:59 +08:00
|
|
|
cb.Recycle((Node*)b->batch[i]);
|
2013-01-12 00:40:01 +08:00
|
|
|
}
|
2013-01-11 19:39:59 +08:00
|
|
|
cb.Deallocate(b);
|
|
|
|
}
|
|
|
|
}
|
2013-01-11 16:07:43 +08:00
|
|
|
};
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
// Per-thread cache of memory blocks.
|
|
|
|
template<typename Callback>
|
2013-01-11 16:07:43 +08:00
|
|
|
class QuarantineCache {
|
|
|
|
public:
|
|
|
|
explicit QuarantineCache(LinkerInitialized) {
|
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineCache()
|
|
|
|
: size_() {
|
|
|
|
list_.clear();
|
|
|
|
}
|
|
|
|
|
2013-01-11 16:07:43 +08:00
|
|
|
uptr Size() const {
|
2013-01-11 19:03:35 +08:00
|
|
|
return atomic_load(&size_, memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Enqueue(Callback cb, void *ptr, uptr size) {
|
2013-10-21 16:36:10 +08:00
|
|
|
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
|
2013-01-11 19:03:35 +08:00
|
|
|
AllocBatch(cb);
|
2013-10-21 16:36:10 +08:00
|
|
|
size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
|
|
|
|
}
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineBatch *b = list_.back();
|
2015-01-07 07:53:32 +08:00
|
|
|
CHECK(b);
|
2013-01-11 19:03:35 +08:00
|
|
|
b->batch[b->count++] = ptr;
|
|
|
|
b->size += size;
|
|
|
|
SizeAdd(size);
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void Transfer(QuarantineCache *c) {
|
|
|
|
list_.append_back(&c->list_);
|
|
|
|
SizeAdd(c->Size());
|
|
|
|
atomic_store(&c->size_, 0, memory_order_relaxed);
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
2013-01-11 19:03:35 +08:00
|
|
|
void EnqueueBatch(QuarantineBatch *b) {
|
|
|
|
list_.push_back(b);
|
|
|
|
SizeAdd(b->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
QuarantineBatch *DequeueBatch() {
|
|
|
|
if (list_.empty())
|
2015-09-30 02:23:36 +08:00
|
|
|
return nullptr;
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineBatch *b = list_.front();
|
|
|
|
list_.pop_front();
|
2013-10-21 16:36:10 +08:00
|
|
|
SizeSub(b->size);
|
2013-01-11 19:03:35 +08:00
|
|
|
return b;
|
2013-01-11 16:07:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2013-01-11 19:03:35 +08:00
|
|
|
IntrusiveList<QuarantineBatch> list_;
|
|
|
|
atomic_uintptr_t size_;
|
|
|
|
|
|
|
|
void SizeAdd(uptr add) {
|
|
|
|
atomic_store(&size_, Size() + add, memory_order_relaxed);
|
|
|
|
}
|
2013-10-21 16:36:10 +08:00
|
|
|
void SizeSub(uptr sub) {
|
|
|
|
atomic_store(&size_, Size() - sub, memory_order_relaxed);
|
|
|
|
}
|
2013-01-11 19:03:35 +08:00
|
|
|
|
2013-02-08 20:02:00 +08:00
|
|
|
NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
|
2013-01-11 19:03:35 +08:00
|
|
|
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
|
2015-01-07 07:53:32 +08:00
|
|
|
CHECK(b);
|
2013-01-11 19:03:35 +08:00
|
|
|
b->count = 0;
|
|
|
|
b->size = 0;
|
|
|
|
list_.push_back(b);
|
|
|
|
return b;
|
|
|
|
}
|
2013-01-11 16:07:43 +08:00
|
|
|
};
|
2015-09-30 02:23:36 +08:00
|
|
|
} // namespace __sanitizer
|
2013-01-11 16:07:43 +08:00
|
|
|
|
2015-09-30 02:23:36 +08:00
|
|
|
#endif // SANITIZER_QUARANTINE_H
|