2019-05-07 23:40:09 +08:00
|
|
|
//===-- quarantine.h --------------------------------------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef SCUDO_QUARANTINE_H_
|
|
|
|
#define SCUDO_QUARANTINE_H_
|
|
|
|
|
|
|
|
#include "list.h"
|
|
|
|
#include "mutex.h"
|
|
|
|
#include "string_utils.h"
|
|
|
|
|
|
|
|
namespace scudo {
|
|
|
|
|
|
|
|
struct QuarantineBatch {
|
|
|
|
// With the following count, a batch (and the header that protects it) occupy
|
|
|
|
// 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
|
|
|
|
static const u32 MaxCount = 1019;
|
|
|
|
QuarantineBatch *Next;
|
|
|
|
uptr Size;
|
|
|
|
u32 Count;
|
|
|
|
void *Batch[MaxCount];
|
|
|
|
|
|
|
|
void init(void *Ptr, uptr Size) {
|
|
|
|
Count = 1;
|
|
|
|
Batch[0] = Ptr;
|
|
|
|
this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
|
|
|
|
}
|
|
|
|
|
|
|
|
// The total size of quarantined nodes recorded in this batch.
|
|
|
|
uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
|
|
|
|
|
|
|
|
void push_back(void *Ptr, uptr Size) {
|
|
|
|
DCHECK_LT(Count, MaxCount);
|
|
|
|
Batch[Count++] = Ptr;
|
|
|
|
this->Size += Size;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool canMerge(const QuarantineBatch *const From) const {
|
|
|
|
return Count + From->Count <= MaxCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
void merge(QuarantineBatch *const From) {
|
|
|
|
DCHECK_LE(Count + From->Count, MaxCount);
|
|
|
|
DCHECK_GE(Size, sizeof(QuarantineBatch));
|
|
|
|
|
|
|
|
for (uptr I = 0; I < From->Count; ++I)
|
|
|
|
Batch[Count + I] = From->Batch[I];
|
|
|
|
Count += From->Count;
|
|
|
|
Size += From->getQuarantinedSize();
|
|
|
|
|
|
|
|
From->Count = 0;
|
|
|
|
From->Size = sizeof(QuarantineBatch);
|
|
|
|
}
|
|
|
|
|
|
|
|
void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
|
|
|
|
};
|
|
|
|
|
2019-11-28 01:35:47 +08:00
|
|
|
static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
|
2019-05-07 23:40:09 +08:00
|
|
|
|
|
|
|
// Per-thread cache of memory blocks.
|
|
|
|
template <typename Callback> class QuarantineCache {
|
|
|
|
public:
|
|
|
|
void initLinkerInitialized() {}
|
|
|
|
void init() {
|
|
|
|
memset(this, 0, sizeof(*this));
|
|
|
|
initLinkerInitialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Total memory used, including internal accounting.
|
|
|
|
uptr getSize() const { return atomic_load_relaxed(&Size); }
|
|
|
|
// Memory used for internal accounting.
|
|
|
|
uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
|
|
|
|
|
|
|
|
void enqueue(Callback Cb, void *Ptr, uptr Size) {
|
|
|
|
if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
|
|
|
|
QuarantineBatch *B =
|
|
|
|
reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
|
|
|
|
DCHECK(B);
|
|
|
|
B->init(Ptr, Size);
|
|
|
|
enqueueBatch(B);
|
|
|
|
} else {
|
|
|
|
List.back()->push_back(Ptr, Size);
|
|
|
|
addToSize(Size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void transfer(QuarantineCache *From) {
|
|
|
|
List.append_back(&From->List);
|
|
|
|
addToSize(From->getSize());
|
|
|
|
atomic_store_relaxed(&From->Size, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void enqueueBatch(QuarantineBatch *B) {
|
|
|
|
List.push_back(B);
|
|
|
|
addToSize(B->Size);
|
|
|
|
}
|
|
|
|
|
|
|
|
QuarantineBatch *dequeueBatch() {
|
|
|
|
if (List.empty())
|
|
|
|
return nullptr;
|
|
|
|
QuarantineBatch *B = List.front();
|
|
|
|
List.pop_front();
|
|
|
|
subFromSize(B->Size);
|
|
|
|
return B;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mergeBatches(QuarantineCache *ToDeallocate) {
|
|
|
|
uptr ExtractedSize = 0;
|
|
|
|
QuarantineBatch *Current = List.front();
|
|
|
|
while (Current && Current->Next) {
|
|
|
|
if (Current->canMerge(Current->Next)) {
|
|
|
|
QuarantineBatch *Extracted = Current->Next;
|
|
|
|
// Move all the chunks into the current batch.
|
|
|
|
Current->merge(Extracted);
|
|
|
|
DCHECK_EQ(Extracted->Count, 0);
|
|
|
|
DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
|
|
|
|
// Remove the next batch From the list and account for its Size.
|
|
|
|
List.extract(Current, Extracted);
|
|
|
|
ExtractedSize += Extracted->Size;
|
|
|
|
// Add it to deallocation list.
|
|
|
|
ToDeallocate->enqueueBatch(Extracted);
|
|
|
|
} else {
|
|
|
|
Current = Current->Next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
subFromSize(ExtractedSize);
|
|
|
|
}
|
|
|
|
|
[scudo][standalone] Get statistics in a char buffer
Summary:
Following up on D68471, this CL introduces some `getStats` APIs to
gather statistics in char buffers (`ScopedString` really) instead of
printing them out right away. Ultimately `printStats` will just
output the buffer, but that allows us to potentially do some work
on the intermediate buffer, and can be used for a `mallocz` type
of functionality. This allows us to pretty much get rid of all the
`Printf` calls around, but I am keeping the function in for
debugging purposes.
This changes the existing tests to use the new APIs when required.
I will add new tests as suggested in D68471 in another CL.
Reviewers: morehouse, hctim, vitalybuka, eugenis, cferris
Reviewed By: morehouse
Subscribers: delcypher, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D68653
llvm-svn: 374173
2019-10-09 23:09:28 +08:00
|
|
|
void getStats(ScopedString *Str) const {
|
2019-05-07 23:40:09 +08:00
|
|
|
uptr BatchCount = 0;
|
|
|
|
uptr TotalOverheadBytes = 0;
|
|
|
|
uptr TotalBytes = 0;
|
|
|
|
uptr TotalQuarantineChunks = 0;
|
|
|
|
for (const QuarantineBatch &Batch : List) {
|
|
|
|
BatchCount++;
|
|
|
|
TotalBytes += Batch.Size;
|
|
|
|
TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
|
|
|
|
TotalQuarantineChunks += Batch.Count;
|
|
|
|
}
|
|
|
|
const uptr QuarantineChunksCapacity =
|
|
|
|
BatchCount * QuarantineBatch::MaxCount;
|
|
|
|
const uptr ChunksUsagePercent =
|
|
|
|
(QuarantineChunksCapacity == 0)
|
|
|
|
? 0
|
|
|
|
: TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
|
|
|
|
const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
|
|
|
|
const uptr MemoryOverheadPercent =
|
|
|
|
(TotalQuarantinedBytes == 0)
|
|
|
|
? 0
|
|
|
|
: TotalOverheadBytes * 100 / TotalQuarantinedBytes;
|
[scudo][standalone] Get statistics in a char buffer
Summary:
Following up on D68471, this CL introduces some `getStats` APIs to
gather statistics in char buffers (`ScopedString` really) instead of
printing them out right away. Ultimately `printStats` will just
output the buffer, but that allows us to potentially do some work
on the intermediate buffer, and can be used for a `mallocz` type
of functionality. This allows us to pretty much get rid of all the
`Printf` calls around, but I am keeping the function in for
debugging purposes.
This changes the existing tests to use the new APIs when required.
I will add new tests as suggested in D68471 in another CL.
Reviewers: morehouse, hctim, vitalybuka, eugenis, cferris
Reviewed By: morehouse
Subscribers: delcypher, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D68653
llvm-svn: 374173
2019-10-09 23:09:28 +08:00
|
|
|
Str->append(
|
|
|
|
"Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
|
|
|
|
"(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
|
|
|
|
BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
|
|
|
|
QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
|
2019-05-07 23:40:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
[scudo][standalone] Consolidate lists
Summary:
This is a clean patch using the last diff of D69265, but using git
instead of svn, since svn went ro and arc was making my life harded
than it needed to be.
I was going to introduce a couple more lists and realized that our
lists are currently a bit all over the place. While we have a singly
linked list type relatively well defined, we are using doubly linked
lists defined on the fly for the stats and for the secondary blocks.
This CL adds a doubly linked list object, reorganizing the singly list
one to extract as much of the common code as possible. We use this
new type in the stats and the secondary. We also reorganize the list
tests to benefit from this consolidation.
There are a few side effect changes such as using for iterator loops
that are, in my opinion, cleaner in a couple of places.
Reviewers: hctim, morehouse, pcc, cferris
Reviewed By: hctim
Subscribers: jfb, #sanitizers, llvm-commits
Tags: #sanitizers, #llvm
Differential Revision: https://reviews.llvm.org/D69516
2019-10-29 00:25:04 +08:00
|
|
|
SinglyLinkedList<QuarantineBatch> List;
|
2019-05-07 23:40:09 +08:00
|
|
|
atomic_uptr Size;
|
|
|
|
|
|
|
|
void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
|
|
|
|
void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
|
|
|
|
};
|
|
|
|
|
|
|
|
// The callback interface is:
|
|
|
|
// void Callback::recycle(Node *Ptr);
|
|
|
|
// void *Callback::allocate(uptr Size);
|
|
|
|
// void Callback::deallocate(void *Ptr);
|
|
|
|
template <typename Callback, typename Node> class GlobalQuarantine {
|
|
|
|
public:
|
|
|
|
typedef QuarantineCache<Callback> CacheT;
|
|
|
|
|
|
|
|
void initLinkerInitialized(uptr Size, uptr CacheSize) {
|
|
|
|
// Thread local quarantine size can be zero only when global quarantine size
|
|
|
|
// is zero (it allows us to perform just one atomic read per put() call).
|
|
|
|
CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
|
|
|
|
|
|
|
|
atomic_store_relaxed(&MaxSize, Size);
|
|
|
|
atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
|
|
|
|
atomic_store_relaxed(&MaxCacheSize, CacheSize);
|
|
|
|
|
|
|
|
Cache.initLinkerInitialized();
|
|
|
|
}
|
|
|
|
void init(uptr Size, uptr CacheSize) {
|
2020-04-11 04:41:12 +08:00
|
|
|
CacheMutex.init();
|
|
|
|
Cache.init();
|
|
|
|
RecycleMutex.init();
|
|
|
|
MinSize = {};
|
|
|
|
MaxSize = {};
|
|
|
|
MaxCacheSize = {};
|
2019-05-07 23:40:09 +08:00
|
|
|
initLinkerInitialized(Size, CacheSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
|
|
|
|
uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
|
|
|
|
|
|
|
|
void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
|
|
|
|
C->enqueue(Cb, Ptr, Size);
|
|
|
|
if (C->getSize() > getCacheSize())
|
|
|
|
drain(C, Cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE drain(CacheT *C, Callback Cb) {
|
|
|
|
{
|
[scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one
Summary:
We ran into a problem on Fuchsia where yielding threads would never
be deboosted, ultimately resulting in several threads spinning on the
same TSD, and no possibility for another thread to be scheduled,
dead-locking the process.
While this was fixed in Zircon, this lead to discussions about if
spinning without a break condition was a good decision, and settled on
a new hybrid model that would spin for a while then block.
Currently we are using a number of iterations for spinning that is
mostly arbitrary (based on sanitizer_common values), but this can
be tuned in the future.
Since we are touching `common.h`, we also use this change as a vehicle
for an Android optimization (the page size is fixed in Bionic, so use
a fixed value too).
Reviewers: morehouse, hctim, eugenis, dvyukov, vitalybuka
Reviewed By: hctim
Subscribers: srhines, delcypher, jfb, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D64358
llvm-svn: 365790
2019-07-11 23:32:26 +08:00
|
|
|
ScopedLock L(CacheMutex);
|
2019-05-07 23:40:09 +08:00
|
|
|
Cache.transfer(C);
|
|
|
|
}
|
2020-01-10 03:43:16 +08:00
|
|
|
if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
|
2019-05-07 23:40:09 +08:00
|
|
|
recycle(atomic_load_relaxed(&MinSize), Cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
|
|
|
|
{
|
[scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one
Summary:
We ran into a problem on Fuchsia where yielding threads would never
be deboosted, ultimately resulting in several threads spinning on the
same TSD, and no possibility for another thread to be scheduled,
dead-locking the process.
While this was fixed in Zircon, this lead to discussions about if
spinning without a break condition was a good decision, and settled on
a new hybrid model that would spin for a while then block.
Currently we are using a number of iterations for spinning that is
mostly arbitrary (based on sanitizer_common values), but this can
be tuned in the future.
Since we are touching `common.h`, we also use this change as a vehicle
for an Android optimization (the page size is fixed in Bionic, so use
a fixed value too).
Reviewers: morehouse, hctim, eugenis, dvyukov, vitalybuka
Reviewed By: hctim
Subscribers: srhines, delcypher, jfb, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D64358
llvm-svn: 365790
2019-07-11 23:32:26 +08:00
|
|
|
ScopedLock L(CacheMutex);
|
2019-05-07 23:40:09 +08:00
|
|
|
Cache.transfer(C);
|
|
|
|
}
|
2020-01-10 03:43:16 +08:00
|
|
|
RecycleMutex.lock();
|
2019-05-07 23:40:09 +08:00
|
|
|
recycle(0, Cb);
|
|
|
|
}
|
|
|
|
|
[scudo][standalone] Get statistics in a char buffer
Summary:
Following up on D68471, this CL introduces some `getStats` APIs to
gather statistics in char buffers (`ScopedString` really) instead of
printing them out right away. Ultimately `printStats` will just
output the buffer, but that allows us to potentially do some work
on the intermediate buffer, and can be used for a `mallocz` type
of functionality. This allows us to pretty much get rid of all the
`Printf` calls around, but I am keeping the function in for
debugging purposes.
This changes the existing tests to use the new APIs when required.
I will add new tests as suggested in D68471 in another CL.
Reviewers: morehouse, hctim, vitalybuka, eugenis, cferris
Reviewed By: morehouse
Subscribers: delcypher, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D68653
llvm-svn: 374173
2019-10-09 23:09:28 +08:00
|
|
|
void getStats(ScopedString *Str) const {
|
2019-05-07 23:40:09 +08:00
|
|
|
// It assumes that the world is stopped, just as the allocator's printStats.
|
[scudo][standalone] Get statistics in a char buffer
Summary:
Following up on D68471, this CL introduces some `getStats` APIs to
gather statistics in char buffers (`ScopedString` really) instead of
printing them out right away. Ultimately `printStats` will just
output the buffer, but that allows us to potentially do some work
on the intermediate buffer, and can be used for a `mallocz` type
of functionality. This allows us to pretty much get rid of all the
`Printf` calls around, but I am keeping the function in for
debugging purposes.
This changes the existing tests to use the new APIs when required.
I will add new tests as suggested in D68471 in another CL.
Reviewers: morehouse, hctim, vitalybuka, eugenis, cferris
Reviewed By: morehouse
Subscribers: delcypher, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D68653
llvm-svn: 374173
2019-10-09 23:09:28 +08:00
|
|
|
Cache.getStats(Str);
|
|
|
|
Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
|
|
|
|
getMaxSize() >> 10, getCacheSize() >> 10);
|
2019-05-07 23:40:09 +08:00
|
|
|
}
|
|
|
|
|
2020-01-10 03:43:16 +08:00
|
|
|
void disable() {
|
|
|
|
// RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
|
|
|
|
RecycleMutex.lock();
|
|
|
|
CacheMutex.lock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void enable() {
|
|
|
|
CacheMutex.unlock();
|
|
|
|
RecycleMutex.unlock();
|
|
|
|
}
|
|
|
|
|
2019-05-07 23:40:09 +08:00
|
|
|
private:
|
|
|
|
// Read-only data.
|
[scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one
Summary:
We ran into a problem on Fuchsia where yielding threads would never
be deboosted, ultimately resulting in several threads spinning on the
same TSD, and no possibility for another thread to be scheduled,
dead-locking the process.
While this was fixed in Zircon, this lead to discussions about if
spinning without a break condition was a good decision, and settled on
a new hybrid model that would spin for a while then block.
Currently we are using a number of iterations for spinning that is
mostly arbitrary (based on sanitizer_common values), but this can
be tuned in the future.
Since we are touching `common.h`, we also use this change as a vehicle
for an Android optimization (the page size is fixed in Bionic, so use
a fixed value too).
Reviewers: morehouse, hctim, eugenis, dvyukov, vitalybuka
Reviewed By: hctim
Subscribers: srhines, delcypher, jfb, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D64358
llvm-svn: 365790
2019-07-11 23:32:26 +08:00
|
|
|
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
|
2019-05-07 23:40:09 +08:00
|
|
|
CacheT Cache;
|
2020-01-10 03:43:16 +08:00
|
|
|
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
|
2019-05-07 23:40:09 +08:00
|
|
|
atomic_uptr MinSize;
|
|
|
|
atomic_uptr MaxSize;
|
|
|
|
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
|
|
|
|
|
|
|
|
void NOINLINE recycle(uptr MinSize, Callback Cb) {
|
|
|
|
CacheT Tmp;
|
|
|
|
Tmp.init();
|
|
|
|
{
|
[scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one
Summary:
We ran into a problem on Fuchsia where yielding threads would never
be deboosted, ultimately resulting in several threads spinning on the
same TSD, and no possibility for another thread to be scheduled,
dead-locking the process.
While this was fixed in Zircon, this lead to discussions about if
spinning without a break condition was a good decision, and settled on
a new hybrid model that would spin for a while then block.
Currently we are using a number of iterations for spinning that is
mostly arbitrary (based on sanitizer_common values), but this can
be tuned in the future.
Since we are touching `common.h`, we also use this change as a vehicle
for an Android optimization (the page size is fixed in Bionic, so use
a fixed value too).
Reviewers: morehouse, hctim, eugenis, dvyukov, vitalybuka
Reviewed By: hctim
Subscribers: srhines, delcypher, jfb, #sanitizers, llvm-commits
Tags: #llvm, #sanitizers
Differential Revision: https://reviews.llvm.org/D64358
llvm-svn: 365790
2019-07-11 23:32:26 +08:00
|
|
|
ScopedLock L(CacheMutex);
|
2019-05-07 23:40:09 +08:00
|
|
|
// Go over the batches and merge partially filled ones to
|
|
|
|
// save some memory, otherwise batches themselves (since the memory used
|
|
|
|
// by them is counted against quarantine limit) can overcome the actual
|
|
|
|
// user's quarantined chunks, which diminishes the purpose of the
|
|
|
|
// quarantine.
|
|
|
|
const uptr CacheSize = Cache.getSize();
|
|
|
|
const uptr OverheadSize = Cache.getOverheadSize();
|
|
|
|
DCHECK_GE(CacheSize, OverheadSize);
|
|
|
|
// Do the merge only when overhead exceeds this predefined limit (might
|
|
|
|
// require some tuning). It saves us merge attempt when the batch list
|
|
|
|
// quarantine is unlikely to contain batches suitable for merge.
|
|
|
|
constexpr uptr OverheadThresholdPercents = 100;
|
|
|
|
if (CacheSize > OverheadSize &&
|
|
|
|
OverheadSize * (100 + OverheadThresholdPercents) >
|
|
|
|
CacheSize * OverheadThresholdPercents) {
|
|
|
|
Cache.mergeBatches(&Tmp);
|
|
|
|
}
|
|
|
|
// Extract enough chunks from the quarantine to get below the max
|
|
|
|
// quarantine size and leave some leeway for the newly quarantined chunks.
|
|
|
|
while (Cache.getSize() > MinSize)
|
|
|
|
Tmp.enqueueBatch(Cache.dequeueBatch());
|
|
|
|
}
|
2020-01-10 03:43:16 +08:00
|
|
|
RecycleMutex.unlock();
|
2019-05-07 23:40:09 +08:00
|
|
|
doRecycle(&Tmp, Cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void NOINLINE doRecycle(CacheT *C, Callback Cb) {
|
|
|
|
while (QuarantineBatch *B = C->dequeueBatch()) {
|
|
|
|
const u32 Seed = static_cast<u32>(
|
|
|
|
(reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
|
|
|
|
B->shuffle(Seed);
|
|
|
|
constexpr uptr NumberOfPrefetch = 8UL;
|
|
|
|
CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
|
|
|
|
for (uptr I = 0; I < NumberOfPrefetch; I++)
|
|
|
|
PREFETCH(B->Batch[I]);
|
|
|
|
for (uptr I = 0, Count = B->Count; I < Count; I++) {
|
|
|
|
if (I + NumberOfPrefetch < Count)
|
|
|
|
PREFETCH(B->Batch[I + NumberOfPrefetch]);
|
|
|
|
Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
|
|
|
|
}
|
|
|
|
Cb.deallocate(B);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace scudo
|
|
|
|
|
|
|
|
#endif // SCUDO_QUARANTINE_H_
|