forked from OSchip/llvm-project
Revert "sanitizer_common: optimize memory drain"
Breaks https://lab.llvm.org/buildbot/#/builders/anitizer-windows
This reverts commit d89d3dfae1
.
This commit is contained in:
parent
7efe388785
commit
ba8dcaef0d
|
@ -17,7 +17,6 @@
|
||||||
template <class SizeClassAllocator>
|
template <class SizeClassAllocator>
|
||||||
struct SizeClassAllocator64LocalCache {
|
struct SizeClassAllocator64LocalCache {
|
||||||
typedef SizeClassAllocator Allocator;
|
typedef SizeClassAllocator Allocator;
|
||||||
typedef MemoryMapper<Allocator> MemoryMapperT;
|
|
||||||
|
|
||||||
void Init(AllocatorGlobalStats *s) {
|
void Init(AllocatorGlobalStats *s) {
|
||||||
stats_.Init();
|
stats_.Init();
|
||||||
|
@ -54,7 +53,7 @@ struct SizeClassAllocator64LocalCache {
|
||||||
PerClass *c = &per_class_[class_id];
|
PerClass *c = &per_class_[class_id];
|
||||||
InitCache(c);
|
InitCache(c);
|
||||||
if (UNLIKELY(c->count == c->max_count))
|
if (UNLIKELY(c->count == c->max_count))
|
||||||
Drain(c, allocator, class_id);
|
Drain(c, allocator, class_id, c->max_count / 2);
|
||||||
CompactPtrT chunk = allocator->PointerToCompactPtr(
|
CompactPtrT chunk = allocator->PointerToCompactPtr(
|
||||||
allocator->GetRegionBeginBySizeClass(class_id),
|
allocator->GetRegionBeginBySizeClass(class_id),
|
||||||
reinterpret_cast<uptr>(p));
|
reinterpret_cast<uptr>(p));
|
||||||
|
@ -63,10 +62,10 @@ struct SizeClassAllocator64LocalCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Drain(SizeClassAllocator *allocator) {
|
void Drain(SizeClassAllocator *allocator) {
|
||||||
MemoryMapperT memory_mapper(*allocator);
|
|
||||||
for (uptr i = 1; i < kNumClasses; i++) {
|
for (uptr i = 1; i < kNumClasses; i++) {
|
||||||
PerClass *c = &per_class_[i];
|
PerClass *c = &per_class_[i];
|
||||||
while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
|
while (c->count > 0)
|
||||||
|
Drain(c, allocator, i, c->count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,18 +106,12 @@ struct SizeClassAllocator64LocalCache {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
|
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
|
||||||
uptr class_id) {
|
uptr count) {
|
||||||
MemoryMapperT memory_mapper(*allocator);
|
|
||||||
Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Drain(MemoryMapperT *memory_mapper, PerClass *c,
|
|
||||||
SizeClassAllocator *allocator, uptr class_id, uptr count) {
|
|
||||||
CHECK_GE(c->count, count);
|
CHECK_GE(c->count, count);
|
||||||
const uptr first_idx_to_drain = c->count - count;
|
const uptr first_idx_to_drain = c->count - count;
|
||||||
c->count -= count;
|
c->count -= count;
|
||||||
allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
|
allocator->ReturnToAllocator(&stats_, class_id,
|
||||||
&c->chunks[first_idx_to_drain], count);
|
&c->chunks[first_idx_to_drain], count);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -42,60 +42,6 @@ struct SizeClassAllocator64FlagMasks { // Bit masks.
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Allocator>
|
|
||||||
class MemoryMapper {
|
|
||||||
public:
|
|
||||||
typedef typename Allocator::CompactPtrT CompactPtrT;
|
|
||||||
|
|
||||||
explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
|
|
||||||
|
|
||||||
~MemoryMapper() {
|
|
||||||
if (buffer_)
|
|
||||||
UnmapOrDie(buffer_, buffer_size_);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GetAndResetStats(uptr &ranges, uptr &bytes) {
|
|
||||||
ranges = released_ranges_count_;
|
|
||||||
released_ranges_count_ = 0;
|
|
||||||
bytes = released_bytes_;
|
|
||||||
released_bytes_ = 0;
|
|
||||||
return ranges != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *MapPackedCounterArrayBuffer(uptr buffer_size) {
|
|
||||||
// TODO(alekseyshl): The idea to explore is to check if we have enough
|
|
||||||
// space between num_freed_chunks*sizeof(CompactPtrT) and
|
|
||||||
// mapped_free_array to fit buffer_size bytes and use that space instead
|
|
||||||
// of mapping a temporary one.
|
|
||||||
if (buffer_size_ < buffer_size) {
|
|
||||||
if (buffer_)
|
|
||||||
UnmapOrDie(buffer_, buffer_size_);
|
|
||||||
buffer_ = MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
|
|
||||||
buffer_size_ = buffer_size;
|
|
||||||
} else {
|
|
||||||
internal_memset(buffer_, 0, buffer_size);
|
|
||||||
}
|
|
||||||
return buffer_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Releases [from, to) range of pages back to OS.
|
|
||||||
void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to, uptr class_id) {
|
|
||||||
const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
|
|
||||||
const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
|
|
||||||
const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
|
|
||||||
ReleaseMemoryPagesToOS(from_page, to_page);
|
|
||||||
released_ranges_count_++;
|
|
||||||
released_bytes_ += to_page - from_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const Allocator &allocator_;
|
|
||||||
uptr released_ranges_count_ = 0;
|
|
||||||
uptr released_bytes_ = 0;
|
|
||||||
void *buffer_ = nullptr;
|
|
||||||
uptr buffer_size_ = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class Params>
|
template <class Params>
|
||||||
class SizeClassAllocator64 {
|
class SizeClassAllocator64 {
|
||||||
public:
|
public:
|
||||||
|
@ -111,7 +57,6 @@ class SizeClassAllocator64 {
|
||||||
|
|
||||||
typedef SizeClassAllocator64<Params> ThisT;
|
typedef SizeClassAllocator64<Params> ThisT;
|
||||||
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
|
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
|
||||||
typedef MemoryMapper<ThisT> MemoryMapperT;
|
|
||||||
|
|
||||||
// When we know the size class (the region base) we can represent a pointer
|
// When we know the size class (the region base) we can represent a pointer
|
||||||
// as a 4-byte integer (offset from the region start shifted right by 4).
|
// as a 4-byte integer (offset from the region start shifted right by 4).
|
||||||
|
@ -175,10 +120,9 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceReleaseToOS() {
|
void ForceReleaseToOS() {
|
||||||
MemoryMapperT memory_mapper(*this);
|
|
||||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||||
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
|
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
|
||||||
MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
|
MaybeReleaseToOS(class_id, true /*force*/);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,8 +131,7 @@ class SizeClassAllocator64 {
|
||||||
alignment <= SizeClassMap::kMaxSize;
|
alignment <= SizeClassMap::kMaxSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
|
NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
|
||||||
AllocatorStats *stat, uptr class_id,
|
|
||||||
const CompactPtrT *chunks, uptr n_chunks) {
|
const CompactPtrT *chunks, uptr n_chunks) {
|
||||||
RegionInfo *region = GetRegionInfo(class_id);
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||||
|
@ -211,7 +154,7 @@ class SizeClassAllocator64 {
|
||||||
region->num_freed_chunks = new_num_freed_chunks;
|
region->num_freed_chunks = new_num_freed_chunks;
|
||||||
region->stats.n_freed += n_chunks;
|
region->stats.n_freed += n_chunks;
|
||||||
|
|
||||||
MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
|
MaybeReleaseToOS(class_id, false /*force*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
||||||
|
@ -419,10 +362,10 @@ class SizeClassAllocator64 {
|
||||||
// For the performance sake, none of the accessors check the validity of the
|
// For the performance sake, none of the accessors check the validity of the
|
||||||
// arguments, it is assumed that index is always in [0, n) range and the value
|
// arguments, it is assumed that index is always in [0, n) range and the value
|
||||||
// is not incremented past max_value.
|
// is not incremented past max_value.
|
||||||
template <typename MemoryMapper>
|
template<class MemoryMapperT>
|
||||||
class PackedCounterArray {
|
class PackedCounterArray {
|
||||||
public:
|
public:
|
||||||
PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)
|
PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
|
||||||
: n(num_counters), memory_mapper(mapper) {
|
: n(num_counters), memory_mapper(mapper) {
|
||||||
CHECK_GT(num_counters, 0);
|
CHECK_GT(num_counters, 0);
|
||||||
CHECK_GT(max_value, 0);
|
CHECK_GT(max_value, 0);
|
||||||
|
@ -446,6 +389,11 @@ class SizeClassAllocator64 {
|
||||||
buffer = reinterpret_cast<u64*>(
|
buffer = reinterpret_cast<u64*>(
|
||||||
memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
|
memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
|
||||||
}
|
}
|
||||||
|
~PackedCounterArray() {
|
||||||
|
if (buffer) {
|
||||||
|
memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool IsAllocated() const {
|
bool IsAllocated() const {
|
||||||
return !!buffer;
|
return !!buffer;
|
||||||
|
@ -482,21 +430,18 @@ class SizeClassAllocator64 {
|
||||||
u64 packing_ratio_log;
|
u64 packing_ratio_log;
|
||||||
u64 bit_offset_mask;
|
u64 bit_offset_mask;
|
||||||
|
|
||||||
MemoryMapper *const memory_mapper;
|
MemoryMapperT* const memory_mapper;
|
||||||
u64 buffer_size;
|
u64 buffer_size;
|
||||||
u64* buffer;
|
u64* buffer;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class MemoryMapperT>
|
template<class MemoryMapperT>
|
||||||
class FreePagesRangeTracker {
|
class FreePagesRangeTracker {
|
||||||
public:
|
public:
|
||||||
explicit FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
|
explicit FreePagesRangeTracker(MemoryMapperT* mapper)
|
||||||
: memory_mapper(mapper),
|
: memory_mapper(mapper),
|
||||||
class_id(class_id),
|
|
||||||
page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
|
page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
|
||||||
in_the_range(false),
|
in_the_range(false), current_page(0), current_range_start_page(0) {}
|
||||||
current_page(0),
|
|
||||||
current_range_start_page(0) {}
|
|
||||||
|
|
||||||
void NextPage(bool freed) {
|
void NextPage(bool freed) {
|
||||||
if (freed) {
|
if (freed) {
|
||||||
|
@ -518,14 +463,13 @@ class SizeClassAllocator64 {
|
||||||
void CloseOpenedRange() {
|
void CloseOpenedRange() {
|
||||||
if (in_the_range) {
|
if (in_the_range) {
|
||||||
memory_mapper->ReleasePageRangeToOS(
|
memory_mapper->ReleasePageRangeToOS(
|
||||||
class_id, current_range_start_page << page_size_scaled_log,
|
current_range_start_page << page_size_scaled_log,
|
||||||
current_page << page_size_scaled_log);
|
current_page << page_size_scaled_log);
|
||||||
in_the_range = false;
|
in_the_range = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryMapperT *const memory_mapper;
|
MemoryMapperT* const memory_mapper;
|
||||||
const uptr class_id;
|
|
||||||
const uptr page_size_scaled_log;
|
const uptr page_size_scaled_log;
|
||||||
bool in_the_range;
|
bool in_the_range;
|
||||||
uptr current_page;
|
uptr current_page;
|
||||||
|
@ -536,12 +480,11 @@ class SizeClassAllocator64 {
|
||||||
// chunks only and returns these pages back to OS.
|
// chunks only and returns these pages back to OS.
|
||||||
// allocated_pages_count is the total number of pages allocated for the
|
// allocated_pages_count is the total number of pages allocated for the
|
||||||
// current bucket.
|
// current bucket.
|
||||||
template <typename MemoryMapper>
|
template<class MemoryMapperT>
|
||||||
static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
|
static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
|
||||||
uptr free_array_count, uptr chunk_size,
|
uptr free_array_count, uptr chunk_size,
|
||||||
uptr allocated_pages_count,
|
uptr allocated_pages_count,
|
||||||
MemoryMapper *memory_mapper,
|
MemoryMapperT *memory_mapper) {
|
||||||
uptr class_id) {
|
|
||||||
const uptr page_size = GetPageSizeCached();
|
const uptr page_size = GetPageSizeCached();
|
||||||
|
|
||||||
// Figure out the number of chunks per page and whether we can take a fast
|
// Figure out the number of chunks per page and whether we can take a fast
|
||||||
|
@ -577,8 +520,9 @@ class SizeClassAllocator64 {
|
||||||
UNREACHABLE("All chunk_size/page_size ratios must be handled.");
|
UNREACHABLE("All chunk_size/page_size ratios must be handled.");
|
||||||
}
|
}
|
||||||
|
|
||||||
PackedCounterArray<MemoryMapper> counters(
|
PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
|
||||||
allocated_pages_count, full_pages_chunk_count_max, memory_mapper);
|
full_pages_chunk_count_max,
|
||||||
|
memory_mapper);
|
||||||
if (!counters.IsAllocated())
|
if (!counters.IsAllocated())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -603,7 +547,7 @@ class SizeClassAllocator64 {
|
||||||
|
|
||||||
// Iterate over pages detecting ranges of pages with chunk counters equal
|
// Iterate over pages detecting ranges of pages with chunk counters equal
|
||||||
// to the expected number of chunks for the particular page.
|
// to the expected number of chunks for the particular page.
|
||||||
FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
|
FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
|
||||||
if (same_chunk_count_per_page) {
|
if (same_chunk_count_per_page) {
|
||||||
// Fast path, every page has the same number of chunks affecting it.
|
// Fast path, every page has the same number of chunks affecting it.
|
||||||
for (uptr i = 0; i < counters.GetCount(); i++)
|
for (uptr i = 0; i < counters.GetCount(); i++)
|
||||||
|
@ -642,7 +586,7 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class MemoryMapper<ThisT>;
|
friend class MemoryMapper;
|
||||||
|
|
||||||
ReservedAddressRange address_range;
|
ReservedAddressRange address_range;
|
||||||
|
|
||||||
|
@ -876,13 +820,57 @@ class SizeClassAllocator64 {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class MemoryMapper {
|
||||||
|
public:
|
||||||
|
MemoryMapper(const ThisT& base_allocator, uptr class_id)
|
||||||
|
: allocator(base_allocator),
|
||||||
|
region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
|
||||||
|
released_ranges_count(0),
|
||||||
|
released_bytes(0) {
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr GetReleasedRangesCount() const {
|
||||||
|
return released_ranges_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr GetReleasedBytes() const {
|
||||||
|
return released_bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *MapPackedCounterArrayBuffer(uptr buffer_size) {
|
||||||
|
// TODO(alekseyshl): The idea to explore is to check if we have enough
|
||||||
|
// space between num_freed_chunks*sizeof(CompactPtrT) and
|
||||||
|
// mapped_free_array to fit buffer_size bytes and use that space instead
|
||||||
|
// of mapping a temporary one.
|
||||||
|
return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
|
||||||
|
UnmapOrDie(buffer, buffer_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Releases [from, to) range of pages back to OS.
|
||||||
|
void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
|
||||||
|
const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
|
||||||
|
const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
|
||||||
|
ReleaseMemoryPagesToOS(from_page, to_page);
|
||||||
|
released_ranges_count++;
|
||||||
|
released_bytes += to_page - from_page;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const ThisT& allocator;
|
||||||
|
const uptr region_base;
|
||||||
|
uptr released_ranges_count;
|
||||||
|
uptr released_bytes;
|
||||||
|
};
|
||||||
|
|
||||||
// Attempts to release RAM occupied by freed chunks back to OS. The region is
|
// Attempts to release RAM occupied by freed chunks back to OS. The region is
|
||||||
// expected to be locked.
|
// expected to be locked.
|
||||||
//
|
//
|
||||||
// TODO(morehouse): Support a callback on memory release so HWASan can release
|
// TODO(morehouse): Support a callback on memory release so HWASan can release
|
||||||
// aliases as well.
|
// aliases as well.
|
||||||
void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
|
void MaybeReleaseToOS(uptr class_id, bool force) {
|
||||||
bool force) {
|
|
||||||
RegionInfo *region = GetRegionInfo(class_id);
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
const uptr chunk_size = ClassIdToSize(class_id);
|
const uptr chunk_size = ClassIdToSize(class_id);
|
||||||
const uptr page_size = GetPageSizeCached();
|
const uptr page_size = GetPageSizeCached();
|
||||||
|
@ -906,16 +894,17 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ReleaseFreeMemoryToOS(
|
MemoryMapper memory_mapper(*this, class_id);
|
||||||
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
|
|
||||||
RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
|
|
||||||
class_id);
|
|
||||||
|
|
||||||
uptr ranges, bytes;
|
ReleaseFreeMemoryToOS<MemoryMapper>(
|
||||||
if (memory_mapper->GetAndResetStats(ranges, bytes)) {
|
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
|
||||||
|
RoundUpTo(region->allocated_user, page_size) / page_size,
|
||||||
|
&memory_mapper);
|
||||||
|
|
||||||
|
if (memory_mapper.GetReleasedRangesCount() > 0) {
|
||||||
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
|
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
|
||||||
region->rtoi.num_releases += ranges;
|
region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
|
||||||
region->rtoi.last_released_bytes = bytes;
|
region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
|
||||||
}
|
}
|
||||||
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1243,7 +1243,7 @@ class RangeRecorder {
|
||||||
Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
|
Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
|
||||||
last_page_reported(0) {}
|
last_page_reported(0) {}
|
||||||
|
|
||||||
void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
|
void ReleasePageRangeToOS(u32 from, u32 to) {
|
||||||
from >>= page_size_scaled_log;
|
from >>= page_size_scaled_log;
|
||||||
to >>= page_size_scaled_log;
|
to >>= page_size_scaled_log;
|
||||||
ASSERT_LT(from, to);
|
ASSERT_LT(from, to);
|
||||||
|
@ -1253,7 +1253,6 @@ class RangeRecorder {
|
||||||
reported_pages.append(to - from, 'x');
|
reported_pages.append(to - from, 'x');
|
||||||
last_page_reported = to;
|
last_page_reported = to;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const uptr page_size_scaled_log;
|
const uptr page_size_scaled_log;
|
||||||
u32 last_page_reported;
|
u32 last_page_reported;
|
||||||
|
@ -1283,7 +1282,7 @@ TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
|
||||||
|
|
||||||
for (auto test_case : test_cases) {
|
for (auto test_case : test_cases) {
|
||||||
RangeRecorder range_recorder;
|
RangeRecorder range_recorder;
|
||||||
RangeTracker tracker(&range_recorder, 1);
|
RangeTracker tracker(&range_recorder);
|
||||||
for (int i = 0; test_case[i] != 0; i++)
|
for (int i = 0; test_case[i] != 0; i++)
|
||||||
tracker.NextPage(test_case[i] == 'x');
|
tracker.NextPage(test_case[i] == 'x');
|
||||||
tracker.Done();
|
tracker.Done();
|
||||||
|
@ -1309,7 +1308,7 @@ class ReleasedPagesTrackingMemoryMapper {
|
||||||
free(buffer);
|
free(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
|
void ReleasePageRangeToOS(u32 from, u32 to) {
|
||||||
uptr page_size_scaled =
|
uptr page_size_scaled =
|
||||||
GetPageSizeCached() >> Allocator64::kCompactPtrScale;
|
GetPageSizeCached() >> Allocator64::kCompactPtrScale;
|
||||||
for (u32 i = from; i < to; i += page_size_scaled)
|
for (u32 i = from; i < to; i += page_size_scaled)
|
||||||
|
@ -1353,7 +1352,7 @@ void TestReleaseFreeMemoryToOS() {
|
||||||
|
|
||||||
Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
|
Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
|
||||||
chunk_size, kAllocatedPagesCount,
|
chunk_size, kAllocatedPagesCount,
|
||||||
&memory_mapper, class_id);
|
&memory_mapper);
|
||||||
|
|
||||||
// Verify that there are no released pages touched by used chunks and all
|
// Verify that there are no released pages touched by used chunks and all
|
||||||
// ranges of free chunks big enough to contain the entire memory pages had
|
// ranges of free chunks big enough to contain the entire memory pages had
|
||||||
|
|
Loading…
Reference in New Issue