2012-06-25 23:09:24 +08:00
|
|
|
//===-- sanitizer_allocator_test.cc ---------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-06-25 23:09:24 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2012-06-30 01:32:18 +08:00
|
|
|
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
2012-12-05 18:09:15 +08:00
|
|
|
// Tests for sanitizer_allocator.h.
|
2012-06-25 23:09:24 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-12-05 18:09:15 +08:00
|
|
|
#include "sanitizer_common/sanitizer_allocator.h"
|
2013-05-29 17:15:39 +08:00
|
|
|
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
2012-06-25 23:09:24 +08:00
|
|
|
#include "sanitizer_common/sanitizer_common.h"
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2013-01-14 23:12:26 +08:00
|
|
|
#include "sanitizer_test_utils.h"
|
2014-05-13 20:02:53 +08:00
|
|
|
#include "sanitizer_pthread_wrappers.h"
|
2013-01-14 23:12:26 +08:00
|
|
|
|
2012-06-25 23:09:24 +08:00
|
|
|
#include "gtest/gtest.h"
|
2012-12-05 18:09:15 +08:00
|
|
|
|
[Sanitizers] Allocator: new "release memory to OS" implementation
Summary:
The current implementation of the allocator returning freed memory
back to OS (controlled by allocator_release_to_os_interval_ms flag)
requires sorting of the free chunks list, which has two major issues,
first, when free list grows to millions of chunks, sorting, even the
fastest one, is just too slow, and second, sorting chunks in place
is unacceptable for Scudo allocator as it makes allocations more
predictable and less secure.
The proposed approach is linear in complexity (altough requires quite
a bit more temporary memory). The idea is to count the number of free
chunks on each memory page and release pages containing free chunks
only. It requires one iteration over the free list of chunks and one
iteration over the array of page counters. The obvious disadvantage
is the allocation of the array of the counters, but even in the worst
case we support (4T allocator space, 64 buckets, 16 bytes bucket size,
full free list, which leads to 2 bytes per page counter and ~17M page
counters), requires just about 34Mb of the intermediate buffer (comparing
to ~64Gb of actually allocated chunks) and usually it stays under 100K
and released after each use. It is expected to be a relatively rare event,
releasing memory back to OS, keeping the buffer between those runs
and added complexity of the bookkeeping seems unnesessary here (it can
always be improved later, though, never say never).
The most interesting problem here is how to calculate the number of chunks
falling into each memory page in the bucket. Skipping all the details,
there are three cases when the number of chunks per page is constant:
1) P >= C, P % C == 0 --> N = P / C
2) C > P , C % P == 0 --> N = 1
3) C <= P, P % C != 0 && C % (P % C) == 0 --> N = P / C + 1
where P is page size, C is chunk size and N is the number of chunks per
page and the rest of the cases, where the number of chunks per page is
calculated on the go, during the page counter array iteration.
Among the rest, there are still cases where N can be deduced from the
page index, but they require not that much less calculations per page
than the current "brute force" way and 2/3 of the buckets fall into
the first three categories anyway, so, for the sake of simplicity,
it was decided to stick to those two variations. It can always be
refined and improved later, should we see that brute force way slows
us down unacceptably.
Reviewers: eugenis, cryptoad, dvyukov
Subscribers: kubamracek, mehdi_amini, llvm-commits
Differential Revision: https://reviews.llvm.org/D38245
llvm-svn: 314311
2017-09-27 23:38:05 +08:00
|
|
|
#include <stdio.h>
|
2012-06-25 23:09:24 +08:00
|
|
|
#include <stdlib.h>
|
2012-12-05 18:09:15 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <vector>
|
2017-02-08 07:13:10 +08:00
|
|
|
#include <random>
|
2013-03-15 19:39:41 +08:00
|
|
|
#include <set>
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2016-09-16 05:02:18 +08:00
|
|
|
using namespace __sanitizer;
|
|
|
|
|
2012-12-14 18:17:22 +08:00
|
|
|
// Too slow for debug build
|
2015-01-03 12:29:12 +08:00
|
|
|
#if !SANITIZER_DEBUG
|
2012-12-14 18:17:22 +08:00
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2016-07-08 01:44:08 +08:00
|
|
|
#if SANITIZER_WINDOWS
|
2016-08-05 02:15:38 +08:00
|
|
|
// On Windows 64-bit there is no easy way to find a large enough fixed address
|
|
|
|
// space that is always available. Thus, a dynamically allocated address space
|
|
|
|
// is used instead (i.e. ~(uptr)0).
|
|
|
|
static const uptr kAllocatorSpace = ~(uptr)0;
|
2016-09-16 00:00:46 +08:00
|
|
|
static const uptr kAllocatorSize = 0x8000000000ULL; // 500G
|
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 47;
|
2016-09-16 06:34:53 +08:00
|
|
|
typedef DefaultSizeClassMap SizeClassMap;
|
|
|
|
#elif SANITIZER_ANDROID && defined(__aarch64__)
|
|
|
|
static const uptr kAllocatorSpace = 0x3000000000ULL;
|
|
|
|
static const uptr kAllocatorSize = 0x2000000000ULL;
|
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 39;
|
|
|
|
typedef VeryCompactSizeClassMap SizeClassMap;
|
2016-07-08 01:44:08 +08:00
|
|
|
#else
|
2012-12-05 18:09:15 +08:00
|
|
|
static const uptr kAllocatorSpace = 0x700000000000ULL;
|
|
|
|
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
|
2012-12-06 20:49:28 +08:00
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 47;
|
2016-09-16 06:34:53 +08:00
|
|
|
typedef DefaultSizeClassMap SizeClassMap;
|
2016-07-08 01:44:08 +08:00
|
|
|
#endif
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceViewTy>
|
2016-08-26 04:23:08 +08:00
|
|
|
struct AP64 { // Allocator Params. Short name for shorter demangled names..
|
|
|
|
static const uptr kSpaceBeg = kAllocatorSpace;
|
|
|
|
static const uptr kSpaceSize = kAllocatorSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
2016-09-16 06:34:53 +08:00
|
|
|
typedef ::SizeClassMap SizeClassMap;
|
2016-08-26 04:23:08 +08:00
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
2016-08-26 08:06:03 +08:00
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
2016-08-26 04:23:08 +08:00
|
|
|
};
|
|
|
|
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceViewTy>
|
2016-08-26 04:23:08 +08:00
|
|
|
struct AP64Dyn {
|
|
|
|
static const uptr kSpaceBeg = ~(uptr)0;
|
|
|
|
static const uptr kSpaceSize = kAllocatorSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
2016-09-16 06:34:53 +08:00
|
|
|
typedef ::SizeClassMap SizeClassMap;
|
2016-08-26 04:23:08 +08:00
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
2016-08-26 08:06:03 +08:00
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
2016-08-26 04:23:08 +08:00
|
|
|
};
|
|
|
|
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceViewTy>
|
2016-08-26 04:23:08 +08:00
|
|
|
struct AP64Compact {
|
|
|
|
static const uptr kSpaceBeg = ~(uptr)0;
|
|
|
|
static const uptr kSpaceSize = kAllocatorSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
2016-09-01 01:52:55 +08:00
|
|
|
typedef CompactSizeClassMap SizeClassMap;
|
2016-08-26 04:23:08 +08:00
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
2016-08-26 08:06:03 +08:00
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
2016-08-26 04:23:08 +08:00
|
|
|
};
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceViewTy>
|
2016-09-01 01:52:55 +08:00
|
|
|
struct AP64VeryCompact {
|
|
|
|
static const uptr kSpaceBeg = ~(uptr)0;
|
|
|
|
static const uptr kSpaceSize = 1ULL << 37;
|
|
|
|
static const uptr kMetadataSize = 16;
|
|
|
|
typedef VeryCompactSizeClassMap SizeClassMap;
|
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
2016-09-01 01:52:55 +08:00
|
|
|
};
|
|
|
|
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceViewTy>
|
[sanitizer] Introduce a new SizeClassMap with minimal amount of cached entries
Summary:
_Note_: I am not attached to the name `DenseSizeClassMap`, so if someone has a
better idea, feel free to suggest it.
The current pre-defined `SizeClassMap` hold a decent amount of cached entries,
either in cheer number of, or in amount of memory cached.
Empirical testing shows that more compact per-class arrays (whose sizes are
directly correlated to the number of cached entries) are beneficial to
performances, particularly in highly threaded environments.
The new proposed `SizeClassMap` has the following properties:
```
c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
c01 => s: 16 diff: +16 00% l 4 cached: 8 128; id 1
c02 => s: 32 diff: +16 100% l 5 cached: 8 256; id 2
c03 => s: 48 diff: +16 50% l 5 cached: 8 384; id 3
c04 => s: 64 diff: +16 33% l 6 cached: 8 512; id 4
c05 => s: 80 diff: +16 25% l 6 cached: 8 640; id 5
c06 => s: 96 diff: +16 20% l 6 cached: 8 768; id 6
c07 => s: 112 diff: +16 16% l 6 cached: 8 896; id 7
c08 => s: 128 diff: +16 14% l 7 cached: 8 1024; id 8
c09 => s: 144 diff: +16 12% l 7 cached: 7 1008; id 9
c10 => s: 160 diff: +16 11% l 7 cached: 6 960; id 10
c11 => s: 176 diff: +16 10% l 7 cached: 5 880; id 11
c12 => s: 192 diff: +16 09% l 7 cached: 5 960; id 12
c13 => s: 208 diff: +16 08% l 7 cached: 4 832; id 13
c14 => s: 224 diff: +16 07% l 7 cached: 4 896; id 14
c15 => s: 240 diff: +16 07% l 7 cached: 4 960; id 15
c16 => s: 256 diff: +16 06% l 8 cached: 4 1024; id 16
c17 => s: 320 diff: +64 25% l 8 cached: 3 960; id 49
c18 => s: 384 diff: +64 20% l 8 cached: 2 768; id 50
c19 => s: 448 diff: +64 16% l 8 cached: 2 896; id 51
c20 => s: 512 diff: +64 14% l 9 cached: 2 1024; id 48
c21 => s: 640 diff: +128 25% l 9 cached: 1 640; id 49
c22 => s: 768 diff: +128 20% l 9 cached: 1 768; id 50
c23 => s: 896 diff: +128 16% l 9 cached: 1 896; id 51
c24 => s: 1024 diff: +128 14% l 10 cached: 1 1024; id 48
c25 => s: 1280 diff: +256 25% l 10 cached: 1 1280; id 49
c26 => s: 1536 diff: +256 20% l 10 cached: 1 1536; id 50
c27 => s: 1792 diff: +256 16% l 10 cached: 1 1792; id 51
c28 => s: 2048 diff: +256 14% l 11 cached: 1 2048; id 48
c29 => s: 2560 diff: +512 25% l 11 cached: 1 2560; id 49
c30 => s: 3072 diff: +512 20% l 11 cached: 1 3072; id 50
c31 => s: 3584 diff: +512 16% l 11 cached: 1 3584; id 51
c32 => s: 4096 diff: +512 14% l 12 cached: 1 4096; id 48
c33 => s: 5120 diff: +1024 25% l 12 cached: 1 5120; id 49
c34 => s: 6144 diff: +1024 20% l 12 cached: 1 6144; id 50
c35 => s: 7168 diff: +1024 16% l 12 cached: 1 7168; id 51
c36 => s: 8192 diff: +1024 14% l 13 cached: 1 8192; id 48
c37 => s: 10240 diff: +2048 25% l 13 cached: 1 10240; id 49
c38 => s: 12288 diff: +2048 20% l 13 cached: 1 12288; id 50
c39 => s: 14336 diff: +2048 16% l 13 cached: 1 14336; id 51
c40 => s: 16384 diff: +2048 14% l 14 cached: 1 16384; id 48
c41 => s: 20480 diff: +4096 25% l 14 cached: 1 20480; id 49
c42 => s: 24576 diff: +4096 20% l 14 cached: 1 24576; id 50
c43 => s: 28672 diff: +4096 16% l 14 cached: 1 28672; id 51
c44 => s: 32768 diff: +4096 14% l 15 cached: 1 32768; id 48
c45 => s: 40960 diff: +8192 25% l 15 cached: 1 40960; id 49
c46 => s: 49152 diff: +8192 20% l 15 cached: 1 49152; id 50
c47 => s: 57344 diff: +8192 16% l 15 cached: 1 57344; id 51
c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 48
c53 => s: 64 diff: +0 00% l 0 cached: 8 512; id 4
Total cached: 864928 (152/432)
```
It holds a bit less of 1MB of cached entries at most, and the cache fits in a
page.
The plan is to use this map by default for Scudo once we make sure that there
is no unforeseen impact for any of current use case.
Benchmarks give the most increase in performance (with Scudo) when looking at
highly threaded/contentious environments. For example, rcp2-benchmark
experiences a 10K QPS increase (~3%), and a decrease of 50MB for the max RSS
(~10%). On platforms like Android where we only have a couple of caches,
performance remain similar.
Reviewers: eugenis, kcc
Reviewed By: eugenis
Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D52371
llvm-svn: 343246
2018-09-28 02:20:42 +08:00
|
|
|
struct AP64Dense {
|
|
|
|
static const uptr kSpaceBeg = kAllocatorSpace;
|
|
|
|
static const uptr kSpaceSize = kAllocatorSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
|
|
|
typedef DenseSizeClassMap SizeClassMap;
|
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
[sanitizer] Introduce a new SizeClassMap with minimal amount of cached entries
Summary:
_Note_: I am not attached to the name `DenseSizeClassMap`, so if someone has a
better idea, feel free to suggest it.
The current pre-defined `SizeClassMap` hold a decent amount of cached entries,
either in cheer number of, or in amount of memory cached.
Empirical testing shows that more compact per-class arrays (whose sizes are
directly correlated to the number of cached entries) are beneficial to
performances, particularly in highly threaded environments.
The new proposed `SizeClassMap` has the following properties:
```
c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
c01 => s: 16 diff: +16 00% l 4 cached: 8 128; id 1
c02 => s: 32 diff: +16 100% l 5 cached: 8 256; id 2
c03 => s: 48 diff: +16 50% l 5 cached: 8 384; id 3
c04 => s: 64 diff: +16 33% l 6 cached: 8 512; id 4
c05 => s: 80 diff: +16 25% l 6 cached: 8 640; id 5
c06 => s: 96 diff: +16 20% l 6 cached: 8 768; id 6
c07 => s: 112 diff: +16 16% l 6 cached: 8 896; id 7
c08 => s: 128 diff: +16 14% l 7 cached: 8 1024; id 8
c09 => s: 144 diff: +16 12% l 7 cached: 7 1008; id 9
c10 => s: 160 diff: +16 11% l 7 cached: 6 960; id 10
c11 => s: 176 diff: +16 10% l 7 cached: 5 880; id 11
c12 => s: 192 diff: +16 09% l 7 cached: 5 960; id 12
c13 => s: 208 diff: +16 08% l 7 cached: 4 832; id 13
c14 => s: 224 diff: +16 07% l 7 cached: 4 896; id 14
c15 => s: 240 diff: +16 07% l 7 cached: 4 960; id 15
c16 => s: 256 diff: +16 06% l 8 cached: 4 1024; id 16
c17 => s: 320 diff: +64 25% l 8 cached: 3 960; id 49
c18 => s: 384 diff: +64 20% l 8 cached: 2 768; id 50
c19 => s: 448 diff: +64 16% l 8 cached: 2 896; id 51
c20 => s: 512 diff: +64 14% l 9 cached: 2 1024; id 48
c21 => s: 640 diff: +128 25% l 9 cached: 1 640; id 49
c22 => s: 768 diff: +128 20% l 9 cached: 1 768; id 50
c23 => s: 896 diff: +128 16% l 9 cached: 1 896; id 51
c24 => s: 1024 diff: +128 14% l 10 cached: 1 1024; id 48
c25 => s: 1280 diff: +256 25% l 10 cached: 1 1280; id 49
c26 => s: 1536 diff: +256 20% l 10 cached: 1 1536; id 50
c27 => s: 1792 diff: +256 16% l 10 cached: 1 1792; id 51
c28 => s: 2048 diff: +256 14% l 11 cached: 1 2048; id 48
c29 => s: 2560 diff: +512 25% l 11 cached: 1 2560; id 49
c30 => s: 3072 diff: +512 20% l 11 cached: 1 3072; id 50
c31 => s: 3584 diff: +512 16% l 11 cached: 1 3584; id 51
c32 => s: 4096 diff: +512 14% l 12 cached: 1 4096; id 48
c33 => s: 5120 diff: +1024 25% l 12 cached: 1 5120; id 49
c34 => s: 6144 diff: +1024 20% l 12 cached: 1 6144; id 50
c35 => s: 7168 diff: +1024 16% l 12 cached: 1 7168; id 51
c36 => s: 8192 diff: +1024 14% l 13 cached: 1 8192; id 48
c37 => s: 10240 diff: +2048 25% l 13 cached: 1 10240; id 49
c38 => s: 12288 diff: +2048 20% l 13 cached: 1 12288; id 50
c39 => s: 14336 diff: +2048 16% l 13 cached: 1 14336; id 51
c40 => s: 16384 diff: +2048 14% l 14 cached: 1 16384; id 48
c41 => s: 20480 diff: +4096 25% l 14 cached: 1 20480; id 49
c42 => s: 24576 diff: +4096 20% l 14 cached: 1 24576; id 50
c43 => s: 28672 diff: +4096 16% l 14 cached: 1 28672; id 51
c44 => s: 32768 diff: +4096 14% l 15 cached: 1 32768; id 48
c45 => s: 40960 diff: +8192 25% l 15 cached: 1 40960; id 49
c46 => s: 49152 diff: +8192 20% l 15 cached: 1 49152; id 50
c47 => s: 57344 diff: +8192 16% l 15 cached: 1 57344; id 51
c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 48
c53 => s: 64 diff: +0 00% l 0 cached: 8 512; id 4
Total cached: 864928 (152/432)
```
It holds a bit less of 1MB of cached entries at most, and the cache fits in a
page.
The plan is to use this map by default for Scudo once we make sure that there
is no unforeseen impact for any of current use case.
Benchmarks give the most increase in performance (with Scudo) when looking at
highly threaded/contentious environments. For example, rcp2-benchmark
experiences a 10K QPS increase (~3%), and a decrease of 50MB for the max RSS
(~10%). On platforms like Android where we only have a couple of caches,
performance remain similar.
Reviewers: eugenis, kcc
Reviewed By: eugenis
Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D52371
llvm-svn: 343246
2018-09-28 02:20:42 +08:00
|
|
|
};
|
2016-09-01 01:52:55 +08:00
|
|
|
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
|
|
|
|
using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
|
|
|
|
|
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
|
|
|
|
using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
|
|
|
|
|
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using Allocator64CompactASVT =
|
|
|
|
SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
|
|
|
|
using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
|
|
|
|
|
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using Allocator64VeryCompactASVT =
|
|
|
|
SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
|
|
|
|
using Allocator64VeryCompact =
|
|
|
|
Allocator64VeryCompactASVT<LocalAddressSpaceView>;
|
|
|
|
|
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
|
|
|
|
using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#elif defined(__mips64)
|
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 40;
|
2015-07-18 06:29:05 +08:00
|
|
|
#elif defined(__aarch64__)
|
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 39;
|
2016-04-14 20:56:15 +08:00
|
|
|
#elif defined(__s390x__)
|
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 53;
|
|
|
|
#elif defined(__s390__)
|
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 31;
|
2012-12-06 20:49:28 +08:00
|
|
|
#else
|
|
|
|
static const u64 kAddressSpaceSize = 1ULL << 32;
|
2012-12-05 18:09:15 +08:00
|
|
|
#endif
|
|
|
|
|
2013-05-20 15:29:21 +08:00
|
|
|
static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
|
|
|
|
static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
|
|
|
|
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
template <typename AddressSpaceViewTy>
|
2017-05-15 22:47:19 +08:00
|
|
|
struct AP32Compact {
|
|
|
|
static const uptr kSpaceBeg = 0;
|
|
|
|
static const u64 kSpaceSize = kAddressSpaceSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
|
|
|
typedef CompactSizeClassMap SizeClassMap;
|
|
|
|
static const uptr kRegionSizeLog = ::kRegionSizeLog;
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
|
|
|
using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
|
2017-05-15 22:47:19 +08:00
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
|
|
|
static const uptr kFlags = 0;
|
|
|
|
};
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using Allocator32CompactASVT =
|
|
|
|
SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
|
|
|
|
using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
|
2012-12-06 20:49:28 +08:00
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
template <class SizeClassMap>
|
|
|
|
void TestSizeClassMap() {
|
|
|
|
typedef SizeClassMap SCMap;
|
2016-09-01 01:52:55 +08:00
|
|
|
SCMap::Print();
|
2012-12-24 21:41:07 +08:00
|
|
|
SCMap::Validate();
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, DefaultSizeClassMap) {
|
|
|
|
TestSizeClassMap<DefaultSizeClassMap>();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, CompactSizeClassMap) {
|
|
|
|
TestSizeClassMap<CompactSizeClassMap>();
|
|
|
|
}
|
|
|
|
|
2016-09-01 01:52:55 +08:00
|
|
|
TEST(SanitizerCommon, VeryCompactSizeClassMap) {
|
|
|
|
TestSizeClassMap<VeryCompactSizeClassMap>();
|
|
|
|
}
|
|
|
|
|
2013-05-29 17:15:39 +08:00
|
|
|
TEST(SanitizerCommon, InternalSizeClassMap) {
|
|
|
|
TestSizeClassMap<InternalSizeClassMap>();
|
|
|
|
}
|
|
|
|
|
[sanitizer] Introduce a new SizeClassMap with minimal amount of cached entries
Summary:
_Note_: I am not attached to the name `DenseSizeClassMap`, so if someone has a
better idea, feel free to suggest it.
The current pre-defined `SizeClassMap` hold a decent amount of cached entries,
either in cheer number of, or in amount of memory cached.
Empirical testing shows that more compact per-class arrays (whose sizes are
directly correlated to the number of cached entries) are beneficial to
performances, particularly in highly threaded environments.
The new proposed `SizeClassMap` has the following properties:
```
c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
c01 => s: 16 diff: +16 00% l 4 cached: 8 128; id 1
c02 => s: 32 diff: +16 100% l 5 cached: 8 256; id 2
c03 => s: 48 diff: +16 50% l 5 cached: 8 384; id 3
c04 => s: 64 diff: +16 33% l 6 cached: 8 512; id 4
c05 => s: 80 diff: +16 25% l 6 cached: 8 640; id 5
c06 => s: 96 diff: +16 20% l 6 cached: 8 768; id 6
c07 => s: 112 diff: +16 16% l 6 cached: 8 896; id 7
c08 => s: 128 diff: +16 14% l 7 cached: 8 1024; id 8
c09 => s: 144 diff: +16 12% l 7 cached: 7 1008; id 9
c10 => s: 160 diff: +16 11% l 7 cached: 6 960; id 10
c11 => s: 176 diff: +16 10% l 7 cached: 5 880; id 11
c12 => s: 192 diff: +16 09% l 7 cached: 5 960; id 12
c13 => s: 208 diff: +16 08% l 7 cached: 4 832; id 13
c14 => s: 224 diff: +16 07% l 7 cached: 4 896; id 14
c15 => s: 240 diff: +16 07% l 7 cached: 4 960; id 15
c16 => s: 256 diff: +16 06% l 8 cached: 4 1024; id 16
c17 => s: 320 diff: +64 25% l 8 cached: 3 960; id 49
c18 => s: 384 diff: +64 20% l 8 cached: 2 768; id 50
c19 => s: 448 diff: +64 16% l 8 cached: 2 896; id 51
c20 => s: 512 diff: +64 14% l 9 cached: 2 1024; id 48
c21 => s: 640 diff: +128 25% l 9 cached: 1 640; id 49
c22 => s: 768 diff: +128 20% l 9 cached: 1 768; id 50
c23 => s: 896 diff: +128 16% l 9 cached: 1 896; id 51
c24 => s: 1024 diff: +128 14% l 10 cached: 1 1024; id 48
c25 => s: 1280 diff: +256 25% l 10 cached: 1 1280; id 49
c26 => s: 1536 diff: +256 20% l 10 cached: 1 1536; id 50
c27 => s: 1792 diff: +256 16% l 10 cached: 1 1792; id 51
c28 => s: 2048 diff: +256 14% l 11 cached: 1 2048; id 48
c29 => s: 2560 diff: +512 25% l 11 cached: 1 2560; id 49
c30 => s: 3072 diff: +512 20% l 11 cached: 1 3072; id 50
c31 => s: 3584 diff: +512 16% l 11 cached: 1 3584; id 51
c32 => s: 4096 diff: +512 14% l 12 cached: 1 4096; id 48
c33 => s: 5120 diff: +1024 25% l 12 cached: 1 5120; id 49
c34 => s: 6144 diff: +1024 20% l 12 cached: 1 6144; id 50
c35 => s: 7168 diff: +1024 16% l 12 cached: 1 7168; id 51
c36 => s: 8192 diff: +1024 14% l 13 cached: 1 8192; id 48
c37 => s: 10240 diff: +2048 25% l 13 cached: 1 10240; id 49
c38 => s: 12288 diff: +2048 20% l 13 cached: 1 12288; id 50
c39 => s: 14336 diff: +2048 16% l 13 cached: 1 14336; id 51
c40 => s: 16384 diff: +2048 14% l 14 cached: 1 16384; id 48
c41 => s: 20480 diff: +4096 25% l 14 cached: 1 20480; id 49
c42 => s: 24576 diff: +4096 20% l 14 cached: 1 24576; id 50
c43 => s: 28672 diff: +4096 16% l 14 cached: 1 28672; id 51
c44 => s: 32768 diff: +4096 14% l 15 cached: 1 32768; id 48
c45 => s: 40960 diff: +8192 25% l 15 cached: 1 40960; id 49
c46 => s: 49152 diff: +8192 20% l 15 cached: 1 49152; id 50
c47 => s: 57344 diff: +8192 16% l 15 cached: 1 57344; id 51
c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 48
c53 => s: 64 diff: +0 00% l 0 cached: 8 512; id 4
Total cached: 864928 (152/432)
```
It holds a bit less of 1MB of cached entries at most, and the cache fits in a
page.
The plan is to use this map by default for Scudo once we make sure that there
is no unforeseen impact for any of current use case.
Benchmarks give the most increase in performance (with Scudo) when looking at
highly threaded/contentious environments. For example, rcp2-benchmark
experiences a 10K QPS increase (~3%), and a decrease of 50MB for the max RSS
(~10%). On platforms like Android where we only have a couple of caches,
performance remain similar.
Reviewers: eugenis, kcc
Reviewed By: eugenis
Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D52371
llvm-svn: 343246
2018-09-28 02:20:42 +08:00
|
|
|
TEST(SanitizerCommon, DenseSizeClassMap) {
|
|
|
|
TestSizeClassMap<VeryCompactSizeClassMap>();
|
|
|
|
}
|
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
template <class Allocator>
|
|
|
|
void TestSizeClassAllocator() {
|
2012-12-06 20:49:28 +08:00
|
|
|
Allocator *a = new Allocator;
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2013-01-12 00:41:19 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator> cache;
|
2013-01-24 17:08:03 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2016-08-25 05:20:10 +08:00
|
|
|
static const uptr sizes[] = {
|
|
|
|
1, 16, 30, 40, 100, 1000, 10000,
|
|
|
|
50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
|
|
|
|
};
|
2012-12-05 18:09:15 +08:00
|
|
|
|
|
|
|
std::vector<void *> allocated;
|
|
|
|
|
|
|
|
uptr last_total_allocated = 0;
|
2012-12-19 14:51:45 +08:00
|
|
|
for (int i = 0; i < 3; i++) {
|
2012-12-05 18:09:15 +08:00
|
|
|
// Allocate a bunch of chunks.
|
2012-12-06 20:49:28 +08:00
|
|
|
for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
|
2012-12-05 18:09:15 +08:00
|
|
|
uptr size = sizes[s];
|
2012-12-06 20:49:28 +08:00
|
|
|
if (!a->CanAllocate(size, 1)) continue;
|
2012-12-05 18:09:15 +08:00
|
|
|
// printf("s = %ld\n", size);
|
2015-10-02 01:59:08 +08:00
|
|
|
uptr n_iter = std::max((uptr)6, 4000000 / size);
|
2012-12-19 14:51:45 +08:00
|
|
|
// fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
|
2012-12-05 18:09:15 +08:00
|
|
|
for (uptr i = 0; i < n_iter; i++) {
|
2013-01-12 00:41:19 +08:00
|
|
|
uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
|
|
|
|
char *x = (char*)cache.Allocate(a, class_id0);
|
2012-12-16 02:36:23 +08:00
|
|
|
x[0] = 0;
|
|
|
|
x[size - 1] = 0;
|
|
|
|
x[size / 2] = 0;
|
2012-12-05 18:09:15 +08:00
|
|
|
allocated.push_back(x);
|
2012-12-06 21:13:58 +08:00
|
|
|
CHECK_EQ(x, a->GetBlockBegin(x));
|
2012-12-16 02:36:23 +08:00
|
|
|
CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
|
2012-12-06 20:49:28 +08:00
|
|
|
CHECK(a->PointerIsMine(x));
|
2012-12-18 22:56:38 +08:00
|
|
|
CHECK(a->PointerIsMine(x + size - 1));
|
|
|
|
CHECK(a->PointerIsMine(x + size / 2));
|
2012-12-06 20:49:28 +08:00
|
|
|
CHECK_GE(a->GetActuallyAllocatedSize(x), size);
|
|
|
|
uptr class_id = a->GetSizeClass(x);
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
|
2012-12-06 20:49:28 +08:00
|
|
|
uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
|
2012-12-05 18:09:15 +08:00
|
|
|
metadata[0] = reinterpret_cast<uptr>(x) + 1;
|
|
|
|
metadata[1] = 0xABCD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Deallocate all.
|
|
|
|
for (uptr i = 0; i < allocated.size(); i++) {
|
|
|
|
void *x = allocated[i];
|
2012-12-06 20:49:28 +08:00
|
|
|
uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
|
|
|
|
CHECK_EQ(metadata[1], 0xABCD);
|
2013-01-12 00:41:19 +08:00
|
|
|
cache.Deallocate(a, a->GetSizeClass(x), x);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
allocated.clear();
|
2012-12-06 20:49:28 +08:00
|
|
|
uptr total_allocated = a->TotalMemoryUsed();
|
2012-12-05 18:09:15 +08:00
|
|
|
if (last_total_allocated == 0)
|
|
|
|
last_total_allocated = total_allocated;
|
|
|
|
CHECK_EQ(last_total_allocated, total_allocated);
|
|
|
|
}
|
|
|
|
|
2013-03-11 17:43:12 +08:00
|
|
|
// Check that GetBlockBegin never crashes.
|
|
|
|
for (uptr x = 0, step = kAddressSpaceSize / 100000;
|
|
|
|
x < kAddressSpaceSize - step; x += step)
|
|
|
|
if (a->PointerIsMine(reinterpret_cast<void *>(x)))
|
|
|
|
Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
|
|
|
|
|
2012-12-06 20:49:28 +08:00
|
|
|
a->TestOnlyUnmap();
|
|
|
|
delete a;
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2016-08-06 06:36:30 +08:00
|
|
|
// These tests can fail on Windows if memory is somewhat full and lit happens
|
|
|
|
// to run them all at the same time. FIXME: Make them not flaky and reenable.
|
|
|
|
#if !SANITIZER_WINDOWS
|
2012-12-05 18:09:15 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64) {
|
|
|
|
TestSizeClassAllocator<Allocator64>();
|
|
|
|
}
|
|
|
|
|
2016-08-05 02:15:38 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
|
|
|
|
TestSizeClassAllocator<Allocator64Dynamic>();
|
|
|
|
}
|
|
|
|
|
2016-09-16 06:34:53 +08:00
|
|
|
#if !SANITIZER_ANDROID
|
2018-09-28 03:15:40 +08:00
|
|
|
//FIXME(kostyak): find values so that those work on Android as well.
|
2012-12-05 18:09:15 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64Compact) {
|
|
|
|
TestSizeClassAllocator<Allocator64Compact>();
|
|
|
|
}
|
2018-09-28 03:15:40 +08:00
|
|
|
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64Dense) {
|
|
|
|
TestSizeClassAllocator<Allocator64Dense>();
|
|
|
|
}
|
2016-09-16 06:34:53 +08:00
|
|
|
#endif
|
|
|
|
|
2016-09-01 01:52:55 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
|
|
|
|
TestSizeClassAllocator<Allocator64VeryCompact>();
|
|
|
|
}
|
2012-12-05 18:09:15 +08:00
|
|
|
#endif
|
2016-08-06 06:36:30 +08:00
|
|
|
#endif
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2012-12-06 20:49:28 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator32Compact) {
|
|
|
|
TestSizeClassAllocator<Allocator32Compact>();
|
|
|
|
}
|
|
|
|
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
template <typename AddressSpaceViewTy>
|
2017-08-28 23:20:02 +08:00
|
|
|
struct AP32SeparateBatches {
|
|
|
|
static const uptr kSpaceBeg = 0;
|
|
|
|
static const u64 kSpaceSize = kAddressSpaceSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
|
|
|
typedef DefaultSizeClassMap SizeClassMap;
|
|
|
|
static const uptr kRegionSizeLog = ::kRegionSizeLog;
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
|
|
|
using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
|
2017-08-28 23:20:02 +08:00
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
|
|
|
static const uptr kFlags =
|
|
|
|
SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
|
|
|
|
};
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using Allocator32SeparateBatchesASVT =
|
|
|
|
SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
|
|
|
|
using Allocator32SeparateBatches =
|
|
|
|
Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
|
2017-08-28 23:20:02 +08:00
|
|
|
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
|
|
|
|
TestSizeClassAllocator<Allocator32SeparateBatches>();
|
|
|
|
}
|
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
template <class Allocator>
|
2012-12-06 21:00:11 +08:00
|
|
|
void SizeClassAllocatorMetadataStress() {
|
|
|
|
Allocator *a = new Allocator;
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2013-01-12 00:41:19 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator> cache;
|
2013-01-24 17:08:03 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2013-05-16 15:11:16 +08:00
|
|
|
const uptr kNumAllocs = 1 << 13;
|
2012-12-05 18:09:15 +08:00
|
|
|
void *allocated[kNumAllocs];
|
2013-05-16 15:11:16 +08:00
|
|
|
void *meta[kNumAllocs];
|
2012-12-05 18:09:15 +08:00
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
2016-09-16 06:34:53 +08:00
|
|
|
void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
|
2012-12-05 18:09:15 +08:00
|
|
|
allocated[i] = x;
|
2013-05-16 15:11:16 +08:00
|
|
|
meta[i] = a->GetMetaData(x);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
// Get Metadata kNumAllocs^2 times.
|
|
|
|
for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
|
2013-05-16 15:11:16 +08:00
|
|
|
uptr idx = i % kNumAllocs;
|
|
|
|
void *m = a->GetMetaData(allocated[idx]);
|
|
|
|
EXPECT_EQ(m, meta[idx]);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
2016-09-16 06:34:53 +08:00
|
|
|
cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
|
2012-12-06 21:00:11 +08:00
|
|
|
a->TestOnlyUnmap();
|
|
|
|
delete a;
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2016-08-06 06:51:10 +08:00
|
|
|
// These tests can fail on Windows if memory is somewhat full and lit happens
|
|
|
|
// to run them all at the same time. FIXME: Make them not flaky and reenable.
|
|
|
|
#if !SANITIZER_WINDOWS
|
2012-12-05 18:09:15 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
|
2012-12-06 21:00:11 +08:00
|
|
|
SizeClassAllocatorMetadataStress<Allocator64>();
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
2012-06-25 23:09:24 +08:00
|
|
|
|
2016-08-05 02:15:38 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
|
|
|
|
SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
|
|
|
|
}
|
|
|
|
|
2016-09-16 06:34:53 +08:00
|
|
|
#if !SANITIZER_ANDROID
|
2012-12-05 18:09:15 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
|
2012-12-06 21:00:11 +08:00
|
|
|
SizeClassAllocatorMetadataStress<Allocator64Compact>();
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
2016-09-16 06:34:53 +08:00
|
|
|
#endif
|
|
|
|
|
2016-08-06 06:51:10 +08:00
|
|
|
#endif
|
2014-12-12 15:08:12 +08:00
|
|
|
#endif // SANITIZER_CAN_USE_ALLOCATOR64
|
2012-12-06 21:00:11 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
|
|
|
|
SizeClassAllocatorMetadataStress<Allocator32Compact>();
|
|
|
|
}
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2013-05-16 13:22:50 +08:00
|
|
|
template <class Allocator>
|
2016-09-01 01:52:55 +08:00
|
|
|
void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
|
2013-05-16 13:22:50 +08:00
|
|
|
Allocator *a = new Allocator;
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2013-05-16 13:22:50 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator> cache;
|
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
|
|
|
|
2016-08-06 09:24:11 +08:00
|
|
|
uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
|
2013-05-16 13:22:50 +08:00
|
|
|
uptr size = Allocator::SizeClassMapT::Size(max_size_class);
|
2013-05-16 15:11:16 +08:00
|
|
|
// Make sure we correctly compute GetBlockBegin() w/o overflow.
|
2016-09-01 01:52:55 +08:00
|
|
|
for (size_t i = 0; i <= TotalSize / size; i++) {
|
2013-05-16 13:22:50 +08:00
|
|
|
void *x = cache.Allocate(a, max_size_class);
|
|
|
|
void *beg = a->GetBlockBegin(x);
|
2013-05-17 19:54:37 +08:00
|
|
|
// if ((i & (i - 1)) == 0)
|
|
|
|
// fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
|
2013-05-16 13:22:50 +08:00
|
|
|
EXPECT_EQ(x, beg);
|
|
|
|
}
|
|
|
|
|
|
|
|
a->TestOnlyUnmap();
|
|
|
|
delete a;
|
|
|
|
}
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2016-08-06 06:36:30 +08:00
|
|
|
// These tests can fail on Windows if memory is somewhat full and lit happens
|
|
|
|
// to run them all at the same time. FIXME: Make them not flaky and reenable.
|
|
|
|
#if !SANITIZER_WINDOWS
|
2013-05-16 15:11:16 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
|
2016-09-16 06:34:53 +08:00
|
|
|
SizeClassAllocatorGetBlockBeginStress<Allocator64>(
|
|
|
|
1ULL << (SANITIZER_ANDROID ? 31 : 33));
|
2013-05-16 13:22:50 +08:00
|
|
|
}
|
2016-08-05 02:15:38 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
|
2016-09-16 06:34:53 +08:00
|
|
|
SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
|
|
|
|
1ULL << (SANITIZER_ANDROID ? 31 : 33));
|
2016-08-05 02:15:38 +08:00
|
|
|
}
|
2016-09-16 06:34:53 +08:00
|
|
|
#if !SANITIZER_ANDROID
|
2013-05-16 15:11:16 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
|
2016-09-01 01:52:55 +08:00
|
|
|
SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
|
|
|
|
}
|
2016-09-16 06:34:53 +08:00
|
|
|
#endif
|
2016-09-01 01:52:55 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
|
|
|
|
// Does not have > 4Gb for each class.
|
|
|
|
SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
|
2013-05-16 15:11:16 +08:00
|
|
|
}
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
|
2016-09-01 01:52:55 +08:00
|
|
|
SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
|
2013-05-16 15:11:16 +08:00
|
|
|
}
|
2016-08-06 06:36:30 +08:00
|
|
|
#endif
|
2014-12-12 15:08:12 +08:00
|
|
|
#endif // SANITIZER_CAN_USE_ALLOCATOR64
|
2013-05-16 13:22:50 +08:00
|
|
|
|
2012-12-12 22:32:18 +08:00
|
|
|
struct TestMapUnmapCallback {
|
|
|
|
static int map_count, unmap_count;
|
|
|
|
void OnMap(uptr p, uptr size) const { map_count++; }
|
|
|
|
void OnUnmap(uptr p, uptr size) const { unmap_count++; }
|
|
|
|
};
|
|
|
|
int TestMapUnmapCallback::map_count;
|
|
|
|
int TestMapUnmapCallback::unmap_count;
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2016-08-06 06:51:10 +08:00
|
|
|
// These tests can fail on Windows if memory is somewhat full and lit happens
|
|
|
|
// to run them all at the same time. FIXME: Make them not flaky and reenable.
|
|
|
|
#if !SANITIZER_WINDOWS
|
2016-08-26 04:23:08 +08:00
|
|
|
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceViewTy = LocalAddressSpaceView>
|
2016-08-26 04:23:08 +08:00
|
|
|
struct AP64WithCallback {
|
2016-09-16 06:34:53 +08:00
|
|
|
static const uptr kSpaceBeg = kAllocatorSpace;
|
|
|
|
static const uptr kSpaceSize = kAllocatorSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
|
|
|
typedef ::SizeClassMap SizeClassMap;
|
|
|
|
typedef TestMapUnmapCallback MapUnmapCallback;
|
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
2016-08-26 04:23:08 +08:00
|
|
|
};
|
|
|
|
|
2012-12-12 22:32:18 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
|
|
|
|
TestMapUnmapCallback::map_count = 0;
|
|
|
|
TestMapUnmapCallback::unmap_count = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
|
2012-12-12 22:32:18 +08:00
|
|
|
Allocator64WithCallBack *a = new Allocator64WithCallBack;
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2012-12-12 22:32:18 +08:00
|
|
|
EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
|
2013-01-12 00:41:19 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
|
2013-01-24 17:08:03 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
|
|
|
AllocatorStats stats;
|
|
|
|
stats.Init();
|
2016-08-25 05:20:10 +08:00
|
|
|
const size_t kNumChunks = 128;
|
|
|
|
uint32_t chunks[kNumChunks];
|
2016-09-16 06:34:53 +08:00
|
|
|
a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
|
2016-08-25 05:20:10 +08:00
|
|
|
// State + alloc + metadata + freearray.
|
|
|
|
EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
|
2012-12-12 22:32:18 +08:00
|
|
|
a->TestOnlyUnmap();
|
|
|
|
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
|
|
|
|
delete a;
|
|
|
|
}
|
|
|
|
#endif
|
2016-08-06 06:51:10 +08:00
|
|
|
#endif
|
2012-12-12 22:32:18 +08:00
|
|
|
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
template <typename AddressSpaceViewTy = LocalAddressSpaceView>
|
2017-05-15 22:47:19 +08:00
|
|
|
struct AP32WithCallback {
|
|
|
|
static const uptr kSpaceBeg = 0;
|
|
|
|
static const u64 kSpaceSize = kAddressSpaceSize;
|
|
|
|
static const uptr kMetadataSize = 16;
|
|
|
|
typedef CompactSizeClassMap SizeClassMap;
|
|
|
|
static const uptr kRegionSizeLog = ::kRegionSizeLog;
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
|
|
|
using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
|
2017-05-15 22:47:19 +08:00
|
|
|
typedef TestMapUnmapCallback MapUnmapCallback;
|
|
|
|
static const uptr kFlags = 0;
|
|
|
|
};
|
|
|
|
|
2012-12-12 22:32:18 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
|
|
|
|
TestMapUnmapCallback::map_count = 0;
|
|
|
|
TestMapUnmapCallback::unmap_count = 0;
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
|
2012-12-12 22:32:18 +08:00
|
|
|
Allocator32WithCallBack *a = new Allocator32WithCallBack;
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2013-05-20 15:29:21 +08:00
|
|
|
EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
|
2013-01-12 00:41:19 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
|
2013-01-24 17:08:03 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
|
|
|
AllocatorStats stats;
|
|
|
|
stats.Init();
|
2013-03-12 16:44:40 +08:00
|
|
|
a->AllocateBatch(&stats, &cache, 32);
|
2013-05-20 15:29:21 +08:00
|
|
|
EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
|
2012-12-12 22:32:18 +08:00
|
|
|
a->TestOnlyUnmap();
|
2013-05-20 15:29:21 +08:00
|
|
|
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
|
2012-12-12 22:32:18 +08:00
|
|
|
delete a;
|
2012-12-14 18:12:14 +08:00
|
|
|
// fprintf(stderr, "Map: %d Unmap: %d\n",
|
|
|
|
// TestMapUnmapCallback::map_count,
|
|
|
|
// TestMapUnmapCallback::unmap_count);
|
2012-12-12 22:32:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
|
|
|
|
TestMapUnmapCallback::map_count = 0;
|
|
|
|
TestMapUnmapCallback::unmap_count = 0;
|
2018-01-18 07:20:36 +08:00
|
|
|
LargeMmapAllocator<TestMapUnmapCallback> a;
|
2017-06-21 05:23:02 +08:00
|
|
|
a.Init();
|
2013-01-24 17:08:03 +08:00
|
|
|
AllocatorStats stats;
|
|
|
|
stats.Init();
|
|
|
|
void *x = a.Allocate(&stats, 1 << 20, 1);
|
2012-12-12 22:32:18 +08:00
|
|
|
EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
|
2013-01-24 17:08:03 +08:00
|
|
|
a.Deallocate(&stats, x);
|
2012-12-12 22:32:18 +08:00
|
|
|
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
|
|
|
|
}
|
|
|
|
|
2017-06-27 06:54:10 +08:00
|
|
|
// Don't test OOM conditions on Win64 because it causes other tests on the same
|
|
|
|
// machine to OOM.
|
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
|
|
|
|
Allocator64 a;
|
2016-11-29 08:22:50 +08:00
|
|
|
a.Init(kReleaseToOSIntervalNever);
|
2017-06-27 06:54:10 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator64> cache;
|
2013-01-24 17:08:03 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
|
|
|
AllocatorStats stats;
|
|
|
|
stats.Init();
|
2017-06-27 06:54:10 +08:00
|
|
|
|
2016-08-25 05:20:10 +08:00
|
|
|
const size_t kNumChunks = 128;
|
|
|
|
uint32_t chunks[kNumChunks];
|
2017-06-27 06:54:10 +08:00
|
|
|
bool allocation_failed = false;
|
2012-12-05 18:09:15 +08:00
|
|
|
for (int i = 0; i < 1000000; i++) {
|
2017-06-27 06:54:10 +08:00
|
|
|
if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
|
|
|
|
allocation_failed = true;
|
|
|
|
break;
|
|
|
|
}
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
2017-06-27 06:54:10 +08:00
|
|
|
EXPECT_EQ(allocation_failed, true);
|
2012-12-05 18:09:15 +08:00
|
|
|
|
|
|
|
a.TestOnlyUnmap();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, LargeMmapAllocator) {
|
2018-01-18 07:20:36 +08:00
|
|
|
LargeMmapAllocator<NoOpMapUnmapCallback> a;
|
2017-06-21 05:23:02 +08:00
|
|
|
a.Init();
|
2013-01-24 17:08:03 +08:00
|
|
|
AllocatorStats stats;
|
|
|
|
stats.Init();
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2012-12-24 22:35:14 +08:00
|
|
|
static const int kNumAllocs = 1000;
|
2012-12-18 22:56:38 +08:00
|
|
|
char *allocated[kNumAllocs];
|
2012-12-24 22:35:14 +08:00
|
|
|
static const uptr size = 4000;
|
2012-12-05 18:09:15 +08:00
|
|
|
// Allocate some.
|
|
|
|
for (int i = 0; i < kNumAllocs; i++) {
|
2013-01-24 17:08:03 +08:00
|
|
|
allocated[i] = (char *)a.Allocate(&stats, size, 1);
|
2012-12-24 22:35:14 +08:00
|
|
|
CHECK(a.PointerIsMine(allocated[i]));
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
// Deallocate all.
|
|
|
|
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
|
|
|
|
for (int i = 0; i < kNumAllocs; i++) {
|
2012-12-18 22:56:38 +08:00
|
|
|
char *p = allocated[i];
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK(a.PointerIsMine(p));
|
2013-01-24 17:08:03 +08:00
|
|
|
a.Deallocate(&stats, p);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
// Check that non left.
|
|
|
|
CHECK_EQ(a.TotalMemoryUsed(), 0);
|
|
|
|
|
|
|
|
// Allocate some more, also add metadata.
|
|
|
|
for (int i = 0; i < kNumAllocs; i++) {
|
2013-01-24 17:08:03 +08:00
|
|
|
char *x = (char *)a.Allocate(&stats, size, 1);
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK_GE(a.GetActuallyAllocatedSize(x), size);
|
|
|
|
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
|
|
|
|
*meta = i;
|
|
|
|
allocated[i] = x;
|
|
|
|
}
|
2012-12-24 22:35:14 +08:00
|
|
|
for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
|
|
|
|
char *p = allocated[i % kNumAllocs];
|
|
|
|
CHECK(a.PointerIsMine(p));
|
|
|
|
CHECK(a.PointerIsMine(p + 2000));
|
|
|
|
}
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
|
|
|
|
// Deallocate all in reverse order.
|
|
|
|
for (int i = 0; i < kNumAllocs; i++) {
|
|
|
|
int idx = kNumAllocs - i - 1;
|
2012-12-18 22:56:38 +08:00
|
|
|
char *p = allocated[idx];
|
2012-12-05 18:09:15 +08:00
|
|
|
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
|
|
|
|
CHECK_EQ(*meta, idx);
|
|
|
|
CHECK(a.PointerIsMine(p));
|
2013-01-24 17:08:03 +08:00
|
|
|
a.Deallocate(&stats, p);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
CHECK_EQ(a.TotalMemoryUsed(), 0);
|
2012-12-24 22:35:14 +08:00
|
|
|
|
2016-07-27 01:59:09 +08:00
|
|
|
// Test alignments. Test with 512MB alignment on x64 non-Windows machines.
|
|
|
|
// Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
|
|
|
|
uptr max_alignment =
|
|
|
|
(SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
|
2012-12-05 18:09:15 +08:00
|
|
|
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
|
2012-12-24 22:35:14 +08:00
|
|
|
const uptr kNumAlignedAllocs = 100;
|
2012-12-24 22:53:13 +08:00
|
|
|
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
|
2012-12-05 18:09:15 +08:00
|
|
|
uptr size = ((i % 10) + 1) * 4096;
|
2013-01-24 17:08:03 +08:00
|
|
|
char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
|
2012-12-18 22:56:38 +08:00
|
|
|
CHECK_EQ(p, a.GetBlockBegin(p));
|
|
|
|
CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
|
|
|
|
CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK_EQ(0, (uptr)allocated[i] % alignment);
|
|
|
|
p[0] = p[size - 1] = 0;
|
|
|
|
}
|
2012-12-24 22:53:13 +08:00
|
|
|
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
|
2013-01-24 17:08:03 +08:00
|
|
|
a.Deallocate(&stats, allocated[i]);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
}
|
2013-04-08 16:43:22 +08:00
|
|
|
|
|
|
|
// Regression test for boundary condition in GetBlockBegin().
|
|
|
|
uptr page_size = GetPageSizeCached();
|
|
|
|
char *p = (char *)a.Allocate(&stats, page_size, 1);
|
|
|
|
CHECK_EQ(p, a.GetBlockBegin(p));
|
|
|
|
CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
|
|
|
|
CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
|
|
|
|
a.Deallocate(&stats, p);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
template
|
|
|
|
<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
|
|
|
|
void TestCombinedAllocator() {
|
2012-12-06 22:27:32 +08:00
|
|
|
typedef
|
|
|
|
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
|
|
|
|
Allocator;
|
|
|
|
Allocator *a = new Allocator;
|
2017-06-21 05:23:02 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2017-02-08 07:13:10 +08:00
|
|
|
std::mt19937 r;
|
2012-12-05 18:09:15 +08:00
|
|
|
|
|
|
|
AllocatorCache cache;
|
2013-01-24 17:08:03 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
a->InitCache(&cache);
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2012-12-06 22:27:32 +08:00
|
|
|
EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
|
|
|
|
EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
|
|
|
|
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
|
|
|
|
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
|
|
|
|
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
|
2018-01-18 07:20:36 +08:00
|
|
|
EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
|
2013-09-06 17:25:11 +08:00
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
const uptr kNumAllocs = 100000;
|
|
|
|
const uptr kNumIter = 10;
|
|
|
|
for (uptr iter = 0; iter < kNumIter; iter++) {
|
|
|
|
std::vector<void*> allocated;
|
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
|
|
|
uptr size = (i % (1 << 14)) + 1;
|
|
|
|
if ((i % 1024) == 0)
|
|
|
|
size = 1 << (10 + (i % 14));
|
2012-12-06 22:27:32 +08:00
|
|
|
void *x = a->Allocate(&cache, size, 1);
|
|
|
|
uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK_EQ(*meta, 0);
|
|
|
|
*meta = size;
|
|
|
|
allocated.push_back(x);
|
|
|
|
}
|
|
|
|
|
2017-02-08 07:13:10 +08:00
|
|
|
std::shuffle(allocated.begin(), allocated.end(), r);
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2018-12-04 22:03:55 +08:00
|
|
|
// Test ForEachChunk(...)
|
|
|
|
{
|
|
|
|
std::set<void *> reported_chunks;
|
|
|
|
auto cb = [](uptr chunk, void *arg) {
|
|
|
|
auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
|
|
|
|
auto pair =
|
|
|
|
reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
|
|
|
|
// Check chunk is never reported more than once.
|
|
|
|
ASSERT_TRUE(pair.second);
|
|
|
|
};
|
|
|
|
a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
|
|
|
|
for (const auto &allocated_ptr : allocated) {
|
|
|
|
ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
|
|
|
void *x = allocated[i];
|
2012-12-06 22:27:32 +08:00
|
|
|
uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
|
2012-12-05 18:09:15 +08:00
|
|
|
CHECK_NE(*meta, 0);
|
2012-12-06 22:27:32 +08:00
|
|
|
CHECK(a->PointerIsMine(x));
|
2012-12-05 18:09:15 +08:00
|
|
|
*meta = 0;
|
2012-12-06 22:27:32 +08:00
|
|
|
a->Deallocate(&cache, x);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
allocated.clear();
|
2012-12-06 22:27:32 +08:00
|
|
|
a->SwallowCache(&cache);
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
2013-01-24 17:08:03 +08:00
|
|
|
a->DestroyCache(&cache);
|
2012-12-06 22:27:32 +08:00
|
|
|
a->TestOnlyUnmap();
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2012-12-06 22:27:32 +08:00
|
|
|
TEST(SanitizerCommon, CombinedAllocator64) {
|
2012-12-05 18:09:15 +08:00
|
|
|
TestCombinedAllocator<Allocator64,
|
2012-12-12 22:32:18 +08:00
|
|
|
LargeMmapAllocator<>,
|
2012-12-05 18:09:15 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator64> > ();
|
|
|
|
}
|
2012-12-06 22:27:32 +08:00
|
|
|
|
2016-08-05 02:15:38 +08:00
|
|
|
TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
|
|
|
|
TestCombinedAllocator<Allocator64Dynamic,
|
|
|
|
LargeMmapAllocator<>,
|
|
|
|
SizeClassAllocatorLocalCache<Allocator64Dynamic> > ();
|
|
|
|
}
|
|
|
|
|
2016-09-16 06:34:53 +08:00
|
|
|
#if !SANITIZER_ANDROID
|
2012-12-06 22:27:32 +08:00
|
|
|
TEST(SanitizerCommon, CombinedAllocator64Compact) {
|
|
|
|
TestCombinedAllocator<Allocator64Compact,
|
2012-12-12 22:32:18 +08:00
|
|
|
LargeMmapAllocator<>,
|
2012-12-06 22:27:32 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator64Compact> > ();
|
|
|
|
}
|
2016-09-16 06:34:53 +08:00
|
|
|
#endif
|
2016-09-01 01:52:55 +08:00
|
|
|
|
|
|
|
TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
|
|
|
|
TestCombinedAllocator<Allocator64VeryCompact,
|
|
|
|
LargeMmapAllocator<>,
|
|
|
|
SizeClassAllocatorLocalCache<Allocator64VeryCompact> > ();
|
|
|
|
}
|
2012-12-05 18:09:15 +08:00
|
|
|
#endif
|
|
|
|
|
2012-12-06 22:27:32 +08:00
|
|
|
TEST(SanitizerCommon, CombinedAllocator32Compact) {
|
|
|
|
TestCombinedAllocator<Allocator32Compact,
|
2012-12-12 22:32:18 +08:00
|
|
|
LargeMmapAllocator<>,
|
2012-12-06 22:27:32 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator32Compact> > ();
|
|
|
|
}
|
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
template <class AllocatorCache>
|
|
|
|
void TestSizeClassAllocatorLocalCache() {
|
|
|
|
AllocatorCache cache;
|
2012-12-06 22:39:41 +08:00
|
|
|
typedef typename AllocatorCache::Allocator Allocator;
|
|
|
|
Allocator *a = new Allocator();
|
2012-12-05 18:09:15 +08:00
|
|
|
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2013-01-24 17:08:03 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
2012-12-05 18:09:15 +08:00
|
|
|
|
|
|
|
const uptr kNumAllocs = 10000;
|
|
|
|
const int kNumIter = 100;
|
|
|
|
uptr saved_total = 0;
|
2012-12-24 21:41:07 +08:00
|
|
|
for (int class_id = 1; class_id <= 5; class_id++) {
|
2012-12-24 22:35:14 +08:00
|
|
|
for (int it = 0; it < kNumIter; it++) {
|
2012-12-24 21:41:07 +08:00
|
|
|
void *allocated[kNumAllocs];
|
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
|
|
|
allocated[i] = cache.Allocate(a, class_id);
|
|
|
|
}
|
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
|
|
|
cache.Deallocate(a, class_id, allocated[i]);
|
|
|
|
}
|
|
|
|
cache.Drain(a);
|
|
|
|
uptr total_allocated = a->TotalMemoryUsed();
|
2012-12-24 22:35:14 +08:00
|
|
|
if (it)
|
2012-12-24 21:41:07 +08:00
|
|
|
CHECK_EQ(saved_total, total_allocated);
|
|
|
|
saved_total = total_allocated;
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-06 22:39:41 +08:00
|
|
|
a->TestOnlyUnmap();
|
|
|
|
delete a;
|
2012-12-05 18:09:15 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2016-08-06 06:36:30 +08:00
|
|
|
// These tests can fail on Windows if memory is somewhat full and lit happens
|
|
|
|
// to run them all at the same time. FIXME: Make them not flaky and reenable.
|
|
|
|
#if !SANITIZER_WINDOWS
|
2012-12-05 18:09:15 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
|
|
|
|
TestSizeClassAllocatorLocalCache<
|
|
|
|
SizeClassAllocatorLocalCache<Allocator64> >();
|
|
|
|
}
|
2012-12-06 22:39:41 +08:00
|
|
|
|
2016-08-05 02:15:38 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
|
|
|
|
TestSizeClassAllocatorLocalCache<
|
|
|
|
SizeClassAllocatorLocalCache<Allocator64Dynamic> >();
|
|
|
|
}
|
|
|
|
|
2016-09-16 06:34:53 +08:00
|
|
|
#if !SANITIZER_ANDROID
|
2012-12-06 22:39:41 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
|
|
|
|
TestSizeClassAllocatorLocalCache<
|
|
|
|
SizeClassAllocatorLocalCache<Allocator64Compact> >();
|
|
|
|
}
|
2016-09-16 06:34:53 +08:00
|
|
|
#endif
|
2016-09-01 01:52:55 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
|
|
|
|
TestSizeClassAllocatorLocalCache<
|
|
|
|
SizeClassAllocatorLocalCache<Allocator64VeryCompact> >();
|
|
|
|
}
|
2012-12-05 18:09:15 +08:00
|
|
|
#endif
|
2016-08-06 06:36:30 +08:00
|
|
|
#endif
|
2012-06-25 23:09:24 +08:00
|
|
|
|
2012-12-06 22:39:41 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
|
|
|
|
TestSizeClassAllocatorLocalCache<
|
|
|
|
SizeClassAllocatorLocalCache<Allocator32Compact> >();
|
|
|
|
}
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2012-12-14 22:20:29 +08:00
|
|
|
typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
|
2013-01-14 22:06:58 +08:00
|
|
|
static AllocatorCache static_allocator_cache;
|
2012-12-14 22:20:29 +08:00
|
|
|
|
|
|
|
void *AllocatorLeakTestWorker(void *arg) {
|
|
|
|
typedef AllocatorCache::Allocator Allocator;
|
|
|
|
Allocator *a = (Allocator*)(arg);
|
|
|
|
static_allocator_cache.Allocate(a, 10);
|
|
|
|
static_allocator_cache.Drain(a);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, AllocatorLeakTest) {
|
2012-12-14 23:37:35 +08:00
|
|
|
typedef AllocatorCache::Allocator Allocator;
|
2012-12-14 22:20:29 +08:00
|
|
|
Allocator a;
|
2016-11-29 08:22:50 +08:00
|
|
|
a.Init(kReleaseToOSIntervalNever);
|
2012-12-14 22:20:29 +08:00
|
|
|
uptr total_used_memory = 0;
|
|
|
|
for (int i = 0; i < 100; i++) {
|
|
|
|
pthread_t t;
|
2014-05-13 20:02:53 +08:00
|
|
|
PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
|
|
|
|
PTHREAD_JOIN(t, 0);
|
2012-12-14 22:20:29 +08:00
|
|
|
if (i == 0)
|
|
|
|
total_used_memory = a.TotalMemoryUsed();
|
|
|
|
EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
|
|
|
|
}
|
|
|
|
|
|
|
|
a.TestOnlyUnmap();
|
|
|
|
}
|
2013-03-06 22:54:08 +08:00
|
|
|
|
|
|
|
// Struct which is allocated to pass info to new threads. The new thread frees
|
|
|
|
// it.
|
|
|
|
struct NewThreadParams {
|
|
|
|
AllocatorCache *thread_cache;
|
|
|
|
AllocatorCache::Allocator *allocator;
|
|
|
|
uptr class_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Called in a new thread. Just frees its argument.
|
|
|
|
static void *DeallocNewThreadWorker(void *arg) {
|
|
|
|
NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
|
|
|
|
params->thread_cache->Deallocate(params->allocator, params->class_id, params);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The allocator cache is supposed to be POD and zero initialized. We should be
|
|
|
|
// able to call Deallocate on a zeroed cache, and it will self-initialize.
|
|
|
|
TEST(Allocator, AllocatorCacheDeallocNewThread) {
|
|
|
|
AllocatorCache::Allocator allocator;
|
2016-11-29 08:22:50 +08:00
|
|
|
allocator.Init(kReleaseToOSIntervalNever);
|
2013-03-06 22:54:08 +08:00
|
|
|
AllocatorCache main_cache;
|
|
|
|
AllocatorCache child_cache;
|
|
|
|
memset(&main_cache, 0, sizeof(main_cache));
|
|
|
|
memset(&child_cache, 0, sizeof(child_cache));
|
|
|
|
|
|
|
|
uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
|
|
|
|
NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
|
|
|
|
main_cache.Allocate(&allocator, class_id));
|
|
|
|
params->thread_cache = &child_cache;
|
|
|
|
params->allocator = &allocator;
|
|
|
|
params->class_id = class_id;
|
|
|
|
pthread_t t;
|
2014-05-13 20:02:53 +08:00
|
|
|
PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
|
|
|
|
PTHREAD_JOIN(t, 0);
|
2016-07-07 23:52:28 +08:00
|
|
|
|
|
|
|
allocator.TestOnlyUnmap();
|
2013-03-06 22:54:08 +08:00
|
|
|
}
|
2012-12-14 22:20:29 +08:00
|
|
|
#endif
|
|
|
|
|
2012-06-25 23:09:24 +08:00
|
|
|
TEST(Allocator, Basic) {
|
|
|
|
char *p = (char*)InternalAlloc(10);
|
|
|
|
EXPECT_NE(p, (char*)0);
|
|
|
|
char *p2 = (char*)InternalAlloc(20);
|
|
|
|
EXPECT_NE(p2, (char*)0);
|
|
|
|
EXPECT_NE(p2, p);
|
|
|
|
InternalFree(p);
|
|
|
|
InternalFree(p2);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(Allocator, Stress) {
|
|
|
|
const int kCount = 1000;
|
|
|
|
char *ptrs[kCount];
|
|
|
|
unsigned rnd = 42;
|
|
|
|
for (int i = 0; i < kCount; i++) {
|
2013-01-14 23:12:26 +08:00
|
|
|
uptr sz = my_rand_r(&rnd) % 1000;
|
2012-06-25 23:09:24 +08:00
|
|
|
char *p = (char*)InternalAlloc(sz);
|
|
|
|
EXPECT_NE(p, (char*)0);
|
|
|
|
ptrs[i] = p;
|
|
|
|
}
|
|
|
|
for (int i = 0; i < kCount; i++) {
|
|
|
|
InternalFree(ptrs[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-17 08:12:50 +08:00
|
|
|
TEST(Allocator, LargeAlloc) {
|
|
|
|
void *p = InternalAlloc(10 << 20);
|
|
|
|
InternalFree(p);
|
2013-05-29 17:15:39 +08:00
|
|
|
}
|
|
|
|
|
2012-08-21 16:13:37 +08:00
|
|
|
TEST(Allocator, ScopedBuffer) {
|
|
|
|
const int kSize = 512;
|
|
|
|
{
|
2018-05-07 13:56:36 +08:00
|
|
|
InternalMmapVector<int> int_buf(kSize);
|
2018-05-07 09:08:13 +08:00
|
|
|
EXPECT_EQ((uptr)kSize, int_buf.size()); // NOLINT
|
2012-08-21 16:13:37 +08:00
|
|
|
}
|
2018-05-07 13:56:36 +08:00
|
|
|
InternalMmapVector<char> char_buf(kSize);
|
2018-05-07 09:08:13 +08:00
|
|
|
EXPECT_EQ((uptr)kSize, char_buf.size()); // NOLINT
|
2012-12-07 18:13:10 +08:00
|
|
|
internal_memset(char_buf.data(), 'c', kSize);
|
2012-08-21 16:13:37 +08:00
|
|
|
for (int i = 0; i < kSize; i++) {
|
|
|
|
EXPECT_EQ('c', char_buf[i]);
|
|
|
|
}
|
|
|
|
}
|
2012-12-14 18:17:22 +08:00
|
|
|
|
2013-06-24 16:34:50 +08:00
|
|
|
void IterationTestCallback(uptr chunk, void *arg) {
|
|
|
|
reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
|
2013-06-24 17:12:11 +08:00
|
|
|
}
|
2013-03-15 19:39:41 +08:00
|
|
|
|
|
|
|
template <class Allocator>
|
|
|
|
void TestSizeClassAllocatorIteration() {
|
|
|
|
Allocator *a = new Allocator;
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2013-03-15 19:39:41 +08:00
|
|
|
SizeClassAllocatorLocalCache<Allocator> cache;
|
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
|
|
|
|
|
|
|
static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
|
|
|
|
50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
|
|
|
|
|
|
|
|
std::vector<void *> allocated;
|
|
|
|
|
|
|
|
// Allocate a bunch of chunks.
|
|
|
|
for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
|
|
|
|
uptr size = sizes[s];
|
|
|
|
if (!a->CanAllocate(size, 1)) continue;
|
|
|
|
// printf("s = %ld\n", size);
|
|
|
|
uptr n_iter = std::max((uptr)6, 80000 / size);
|
|
|
|
// fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
|
|
|
|
for (uptr j = 0; j < n_iter; j++) {
|
|
|
|
uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
|
|
|
|
void *x = cache.Allocate(a, class_id0);
|
|
|
|
allocated.push_back(x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-24 16:34:50 +08:00
|
|
|
std::set<uptr> reported_chunks;
|
2013-03-15 19:39:41 +08:00
|
|
|
a->ForceLock();
|
2013-06-24 16:34:50 +08:00
|
|
|
a->ForEachChunk(IterationTestCallback, &reported_chunks);
|
2013-03-15 19:39:41 +08:00
|
|
|
a->ForceUnlock();
|
|
|
|
|
|
|
|
for (uptr i = 0; i < allocated.size(); i++) {
|
|
|
|
// Don't use EXPECT_NE. Reporting the first mismatch is enough.
|
2013-06-24 16:34:50 +08:00
|
|
|
ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
|
|
|
|
reported_chunks.end());
|
2013-03-15 19:39:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
a->TestOnlyUnmap();
|
|
|
|
delete a;
|
|
|
|
}
|
|
|
|
|
2014-12-12 15:08:12 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
2016-08-06 06:51:10 +08:00
|
|
|
// These tests can fail on Windows if memory is somewhat full and lit happens
|
|
|
|
// to run them all at the same time. FIXME: Make them not flaky and reenable.
|
|
|
|
#if !SANITIZER_WINDOWS
|
2013-03-15 19:39:41 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
|
|
|
|
TestSizeClassAllocatorIteration<Allocator64>();
|
|
|
|
}
|
2016-08-05 02:15:38 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
|
|
|
|
TestSizeClassAllocatorIteration<Allocator64Dynamic>();
|
|
|
|
}
|
2013-03-15 19:39:41 +08:00
|
|
|
#endif
|
2016-08-06 06:51:10 +08:00
|
|
|
#endif
|
2013-03-15 19:39:41 +08:00
|
|
|
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
|
|
|
|
TestSizeClassAllocatorIteration<Allocator32Compact>();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
|
2018-01-18 07:20:36 +08:00
|
|
|
LargeMmapAllocator<NoOpMapUnmapCallback> a;
|
2017-06-21 05:23:02 +08:00
|
|
|
a.Init();
|
2013-03-15 19:39:41 +08:00
|
|
|
AllocatorStats stats;
|
|
|
|
stats.Init();
|
|
|
|
|
2013-03-15 20:27:52 +08:00
|
|
|
static const uptr kNumAllocs = 1000;
|
2013-03-15 19:39:41 +08:00
|
|
|
char *allocated[kNumAllocs];
|
|
|
|
static const uptr size = 40;
|
|
|
|
// Allocate some.
|
2013-05-30 16:43:30 +08:00
|
|
|
for (uptr i = 0; i < kNumAllocs; i++)
|
2013-03-15 19:39:41 +08:00
|
|
|
allocated[i] = (char *)a.Allocate(&stats, size, 1);
|
|
|
|
|
2013-06-24 16:34:50 +08:00
|
|
|
std::set<uptr> reported_chunks;
|
2013-03-15 19:39:41 +08:00
|
|
|
a.ForceLock();
|
2013-06-24 16:34:50 +08:00
|
|
|
a.ForEachChunk(IterationTestCallback, &reported_chunks);
|
2013-03-15 19:39:41 +08:00
|
|
|
a.ForceUnlock();
|
|
|
|
|
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
|
|
|
// Don't use EXPECT_NE. Reporting the first mismatch is enough.
|
2013-06-24 16:34:50 +08:00
|
|
|
ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
|
|
|
|
reported_chunks.end());
|
2013-03-15 19:39:41 +08:00
|
|
|
}
|
2013-05-30 16:43:30 +08:00
|
|
|
for (uptr i = 0; i < kNumAllocs; i++)
|
|
|
|
a.Deallocate(&stats, allocated[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
|
2018-01-18 07:20:36 +08:00
|
|
|
LargeMmapAllocator<NoOpMapUnmapCallback> a;
|
2017-06-21 05:23:02 +08:00
|
|
|
a.Init();
|
2013-05-30 16:43:30 +08:00
|
|
|
AllocatorStats stats;
|
|
|
|
stats.Init();
|
|
|
|
|
|
|
|
static const uptr kNumAllocs = 1024;
|
|
|
|
static const uptr kNumExpectedFalseLookups = 10000000;
|
|
|
|
char *allocated[kNumAllocs];
|
|
|
|
static const uptr size = 4096;
|
|
|
|
// Allocate some.
|
|
|
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
|
|
|
allocated[i] = (char *)a.Allocate(&stats, size, 1);
|
|
|
|
}
|
|
|
|
|
2013-10-17 19:18:11 +08:00
|
|
|
a.ForceLock();
|
2013-05-30 16:43:30 +08:00
|
|
|
for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
|
|
|
|
// if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
|
|
|
|
char *p1 = allocated[i % kNumAllocs];
|
2013-05-31 19:33:21 +08:00
|
|
|
EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
|
|
|
|
EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
|
|
|
|
EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
|
|
|
|
EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
|
2013-05-30 16:43:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
|
|
|
|
void *p = reinterpret_cast<void *>(i % 1024);
|
2013-05-31 19:33:21 +08:00
|
|
|
EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
|
2013-05-30 16:43:30 +08:00
|
|
|
p = reinterpret_cast<void *>(~0L - (i % 1024));
|
2013-05-31 19:33:21 +08:00
|
|
|
EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
|
2013-05-30 16:43:30 +08:00
|
|
|
}
|
2013-10-17 19:18:11 +08:00
|
|
|
a.ForceUnlock();
|
2013-05-30 16:43:30 +08:00
|
|
|
|
|
|
|
for (uptr i = 0; i < kNumAllocs; i++)
|
|
|
|
a.Deallocate(&stats, allocated[i]);
|
2013-03-15 19:39:41 +08:00
|
|
|
}
|
|
|
|
|
2013-05-30 16:43:30 +08:00
|
|
|
|
2016-07-23 02:41:22 +08:00
|
|
|
// Don't test OOM conditions on Win64 because it causes other tests on the same
|
|
|
|
// machine to OOM.
|
2016-09-16 06:34:53 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
|
2019-02-16 05:48:57 +08:00
|
|
|
typedef __sanitizer::SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap;
|
2018-12-22 05:09:31 +08:00
|
|
|
template <typename AddressSpaceViewTy = LocalAddressSpaceView>
|
2016-08-26 04:23:08 +08:00
|
|
|
struct AP64_SpecialSizeClassMap {
|
|
|
|
static const uptr kSpaceBeg = kAllocatorSpace;
|
|
|
|
static const uptr kSpaceSize = kAllocatorSize;
|
|
|
|
static const uptr kMetadataSize = 0;
|
|
|
|
typedef SpecialSizeClassMap SizeClassMap;
|
|
|
|
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
2016-08-26 08:06:03 +08:00
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = AddressSpaceViewTy;
|
2016-08-26 04:23:08 +08:00
|
|
|
};
|
|
|
|
|
2013-05-16 20:58:34 +08:00
|
|
|
// Regression test for out-of-memory condition in PopulateFreeList().
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
|
|
|
|
// In a world where regions are small and chunks are huge...
|
2018-12-22 05:09:31 +08:00
|
|
|
typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
|
2013-05-16 20:58:34 +08:00
|
|
|
const uptr kRegionSize =
|
|
|
|
kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
|
|
|
|
SpecialAllocator64 *a = new SpecialAllocator64;
|
2016-11-29 08:22:50 +08:00
|
|
|
a->Init(kReleaseToOSIntervalNever);
|
2013-05-16 20:58:34 +08:00
|
|
|
SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
|
|
|
|
memset(&cache, 0, sizeof(cache));
|
|
|
|
cache.Init(0);
|
|
|
|
|
|
|
|
// ...one man is on a mission to overflow a region with a series of
|
|
|
|
// successive allocations.
|
2016-09-10 05:42:33 +08:00
|
|
|
|
2013-05-16 20:58:34 +08:00
|
|
|
const uptr kClassID = 107;
|
2016-09-10 05:42:33 +08:00
|
|
|
const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
|
2013-05-16 20:58:34 +08:00
|
|
|
ASSERT_LT(2 * kAllocationSize, kRegionSize);
|
|
|
|
ASSERT_GT(3 * kAllocationSize, kRegionSize);
|
2017-06-27 06:54:10 +08:00
|
|
|
EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
|
|
|
|
EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
|
|
|
|
EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
|
2016-09-10 05:42:33 +08:00
|
|
|
|
|
|
|
const uptr Class2 = 100;
|
|
|
|
const uptr Size2 = SpecialSizeClassMap::Size(Class2);
|
|
|
|
ASSERT_EQ(Size2 * 8, kRegionSize);
|
|
|
|
char *p[7];
|
|
|
|
for (int i = 0; i < 7; i++) {
|
|
|
|
p[i] = (char*)cache.Allocate(a, Class2);
|
2017-06-27 06:54:10 +08:00
|
|
|
EXPECT_NE(p[i], nullptr);
|
2016-09-10 05:42:33 +08:00
|
|
|
fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
|
|
|
|
p[i][Size2 - 1] = 42;
|
|
|
|
if (i) ASSERT_LT(p[i - 1], p[i]);
|
|
|
|
}
|
2017-06-27 06:54:10 +08:00
|
|
|
EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
|
2016-09-10 05:42:33 +08:00
|
|
|
cache.Deallocate(a, Class2, p[0]);
|
|
|
|
cache.Drain(a);
|
|
|
|
ASSERT_EQ(p[6][Size2 - 1], 42);
|
2013-05-16 20:58:34 +08:00
|
|
|
a->TestOnlyUnmap();
|
|
|
|
delete a;
|
|
|
|
}
|
2016-09-10 05:42:33 +08:00
|
|
|
|
2013-05-16 20:58:34 +08:00
|
|
|
#endif
|
|
|
|
|
[Sanitizers] Allocator: new "release memory to OS" implementation
Summary:
The current implementation of the allocator returning freed memory
back to OS (controlled by allocator_release_to_os_interval_ms flag)
requires sorting of the free chunks list, which has two major issues,
first, when free list grows to millions of chunks, sorting, even the
fastest one, is just too slow, and second, sorting chunks in place
is unacceptable for Scudo allocator as it makes allocations more
predictable and less secure.
The proposed approach is linear in complexity (altough requires quite
a bit more temporary memory). The idea is to count the number of free
chunks on each memory page and release pages containing free chunks
only. It requires one iteration over the free list of chunks and one
iteration over the array of page counters. The obvious disadvantage
is the allocation of the array of the counters, but even in the worst
case we support (4T allocator space, 64 buckets, 16 bytes bucket size,
full free list, which leads to 2 bytes per page counter and ~17M page
counters), requires just about 34Mb of the intermediate buffer (comparing
to ~64Gb of actually allocated chunks) and usually it stays under 100K
and released after each use. It is expected to be a relatively rare event,
releasing memory back to OS, keeping the buffer between those runs
and added complexity of the bookkeeping seems unnesessary here (it can
always be improved later, though, never say never).
The most interesting problem here is how to calculate the number of chunks
falling into each memory page in the bucket. Skipping all the details,
there are three cases when the number of chunks per page is constant:
1) P >= C, P % C == 0 --> N = P / C
2) C > P , C % P == 0 --> N = 1
3) C <= P, P % C != 0 && C % (P % C) == 0 --> N = P / C + 1
where P is page size, C is chunk size and N is the number of chunks per
page and the rest of the cases, where the number of chunks per page is
calculated on the go, during the page counter array iteration.
Among the rest, there are still cases where N can be deduced from the
page index, but they require not that much less calculations per page
than the current "brute force" way and 2/3 of the buckets fall into
the first three categories anyway, so, for the sake of simplicity,
it was decided to stick to those two variations. It can always be
refined and improved later, should we see that brute force way slows
us down unacceptably.
Reviewers: eugenis, cryptoad, dvyukov
Subscribers: kubamracek, mehdi_amini, llvm-commits
Differential Revision: https://reviews.llvm.org/D38245
llvm-svn: 314311
2017-09-27 23:38:05 +08:00
|
|
|
#if SANITIZER_CAN_USE_ALLOCATOR64
|
|
|
|
|
|
|
|
class NoMemoryMapper {
|
|
|
|
public:
|
|
|
|
uptr last_request_buffer_size;
|
|
|
|
|
|
|
|
NoMemoryMapper() : last_request_buffer_size(0) {}
|
|
|
|
|
|
|
|
uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
|
|
|
|
last_request_buffer_size = buffer_size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
class RedZoneMemoryMapper {
|
|
|
|
public:
|
|
|
|
RedZoneMemoryMapper() {
|
|
|
|
const auto page_size = GetPageSize();
|
|
|
|
buffer = MmapOrDie(3ULL * page_size, "");
|
|
|
|
MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
|
|
|
|
MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
|
|
|
|
}
|
|
|
|
~RedZoneMemoryMapper() {
|
|
|
|
UnmapOrDie(buffer, 3 * GetPageSize());
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
|
|
|
|
const auto page_size = GetPageSize();
|
|
|
|
CHECK_EQ(buffer_size, page_size);
|
|
|
|
memset(reinterpret_cast<void*>(reinterpret_cast<uptr>(buffer) + page_size),
|
|
|
|
0, page_size);
|
|
|
|
return reinterpret_cast<uptr>(buffer) + page_size;
|
|
|
|
}
|
|
|
|
void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void *buffer;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
|
|
|
|
NoMemoryMapper no_memory_mapper;
|
|
|
|
typedef Allocator64::PackedCounterArray<NoMemoryMapper>
|
|
|
|
NoMemoryPackedCounterArray;
|
|
|
|
|
|
|
|
for (int i = 0; i < 64; i++) {
|
|
|
|
// Various valid counter's max values packed into one word.
|
|
|
|
NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper);
|
|
|
|
EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
|
|
|
|
|
|
|
|
// Check the "all bit set" values too.
|
|
|
|
NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper);
|
|
|
|
EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
|
|
|
|
|
|
|
|
// Verify the packing ratio, the counter is expected to be packed into the
|
|
|
|
// closest power of 2 bits.
|
|
|
|
NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
|
|
|
|
EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
|
|
|
|
no_memory_mapper.last_request_buffer_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
RedZoneMemoryMapper memory_mapper;
|
|
|
|
typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper>
|
|
|
|
RedZonePackedCounterArray;
|
|
|
|
// Go through 1, 2, 4, 8, .. 64 bits per counter.
|
|
|
|
for (int i = 0; i < 7; i++) {
|
|
|
|
// Make sure counters request one memory page for the buffer.
|
|
|
|
const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
|
|
|
|
RedZonePackedCounterArray counters(kNumCounters,
|
|
|
|
1ULL << ((1 << i) - 1),
|
|
|
|
&memory_mapper);
|
|
|
|
counters.Inc(0);
|
|
|
|
for (u64 c = 1; c < kNumCounters - 1; c++) {
|
|
|
|
ASSERT_EQ(0ULL, counters.Get(c));
|
|
|
|
counters.Inc(c);
|
|
|
|
ASSERT_EQ(1ULL, counters.Get(c - 1));
|
|
|
|
}
|
|
|
|
ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
|
|
|
|
counters.Inc(kNumCounters - 1);
|
|
|
|
|
|
|
|
if (i > 0) {
|
|
|
|
counters.IncRange(0, kNumCounters - 1);
|
|
|
|
for (u64 c = 0; c < kNumCounters; c++)
|
|
|
|
ASSERT_EQ(2ULL, counters.Get(c));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class RangeRecorder {
|
|
|
|
public:
|
|
|
|
std::string reported_pages;
|
|
|
|
|
|
|
|
RangeRecorder()
|
|
|
|
: page_size_scaled_log(
|
|
|
|
Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
|
|
|
|
last_page_reported(0) {}
|
|
|
|
|
|
|
|
void ReleasePageRangeToOS(u32 from, u32 to) {
|
|
|
|
from >>= page_size_scaled_log;
|
|
|
|
to >>= page_size_scaled_log;
|
|
|
|
ASSERT_LT(from, to);
|
|
|
|
if (!reported_pages.empty())
|
|
|
|
ASSERT_LT(last_page_reported, from);
|
|
|
|
reported_pages.append(from - last_page_reported, '.');
|
|
|
|
reported_pages.append(to - from, 'x');
|
|
|
|
last_page_reported = to;
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
const uptr page_size_scaled_log;
|
|
|
|
u32 last_page_reported;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
|
|
|
|
typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
|
|
|
|
|
|
|
|
// 'x' denotes a page to be released, '.' denotes a page to be kept around.
|
|
|
|
const char* test_cases[] = {
|
|
|
|
"",
|
|
|
|
".",
|
|
|
|
"x",
|
|
|
|
"........",
|
|
|
|
"xxxxxxxxxxx",
|
|
|
|
"..............xxxxx",
|
|
|
|
"xxxxxxxxxxxxxxxxxx.....",
|
|
|
|
"......xxxxxxxx........",
|
|
|
|
"xxx..........xxxxxxxxxxxxxxx",
|
|
|
|
"......xxxx....xxxx........",
|
|
|
|
"xxx..........xxxxxxxx....xxxxxxx",
|
|
|
|
"x.x.x.x.x.x.x.x.x.x.x.x.",
|
|
|
|
".x.x.x.x.x.x.x.x.x.x.x.x",
|
|
|
|
".x.x.x.x.x.x.x.x.x.x.x.x.",
|
|
|
|
"x.x.x.x.x.x.x.x.x.x.x.x.x",
|
|
|
|
};
|
|
|
|
|
|
|
|
for (auto test_case : test_cases) {
|
|
|
|
RangeRecorder range_recorder;
|
|
|
|
RangeTracker tracker(&range_recorder);
|
|
|
|
for (int i = 0; test_case[i] != 0; i++)
|
|
|
|
tracker.NextPage(test_case[i] == 'x');
|
|
|
|
tracker.Done();
|
|
|
|
// Strip trailing '.'-pages before comparing the results as they are not
|
|
|
|
// going to be reported to range_recorder anyway.
|
|
|
|
const char* last_x = strrchr(test_case, 'x');
|
|
|
|
std::string expected(
|
|
|
|
test_case,
|
|
|
|
last_x == nullptr ? 0 : (last_x - test_case + 1));
|
|
|
|
EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class ReleasedPagesTrackingMemoryMapper {
|
|
|
|
public:
|
|
|
|
std::set<u32> reported_pages;
|
|
|
|
|
|
|
|
uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
|
|
|
|
reported_pages.clear();
|
|
|
|
return reinterpret_cast<uptr>(calloc(1, buffer_size));
|
|
|
|
}
|
|
|
|
void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
|
|
|
|
free(reinterpret_cast<void*>(buffer));
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReleasePageRangeToOS(u32 from, u32 to) {
|
|
|
|
uptr page_size_scaled =
|
|
|
|
GetPageSizeCached() >> Allocator64::kCompactPtrScale;
|
|
|
|
for (u32 i = from; i < to; i += page_size_scaled)
|
|
|
|
reported_pages.insert(i);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <class Allocator>
|
|
|
|
void TestReleaseFreeMemoryToOS() {
|
|
|
|
ReleasedPagesTrackingMemoryMapper memory_mapper;
|
|
|
|
const uptr kAllocatedPagesCount = 1024;
|
|
|
|
const uptr page_size = GetPageSizeCached();
|
|
|
|
const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
|
|
|
|
std::mt19937 r;
|
|
|
|
uint32_t rnd_state = 42;
|
|
|
|
|
|
|
|
for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
|
|
|
|
class_id++) {
|
|
|
|
const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
|
|
|
|
const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
|
|
|
|
const uptr max_chunks =
|
|
|
|
kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
|
|
|
|
|
|
|
|
// Generate the random free list.
|
|
|
|
std::vector<u32> free_array;
|
|
|
|
bool in_free_range = false;
|
|
|
|
uptr current_range_end = 0;
|
|
|
|
for (uptr i = 0; i < max_chunks; i++) {
|
|
|
|
if (i == current_range_end) {
|
|
|
|
in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
|
|
|
|
current_range_end += my_rand_r(&rnd_state) % 100 + 1;
|
|
|
|
}
|
|
|
|
if (in_free_range)
|
|
|
|
free_array.push_back(i * chunk_size_scaled);
|
|
|
|
}
|
|
|
|
if (free_array.empty())
|
|
|
|
continue;
|
|
|
|
// Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
|
|
|
|
// the list ordering.
|
|
|
|
std::shuffle(free_array.begin(), free_array.end(), r);
|
|
|
|
|
|
|
|
Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
|
|
|
|
chunk_size, kAllocatedPagesCount,
|
|
|
|
&memory_mapper);
|
|
|
|
|
|
|
|
// Verify that there are no released pages touched by used chunks and all
|
|
|
|
// ranges of free chunks big enough to contain the entire memory pages had
|
|
|
|
// these pages released.
|
|
|
|
uptr verified_released_pages = 0;
|
|
|
|
std::set<u32> free_chunks(free_array.begin(), free_array.end());
|
|
|
|
|
|
|
|
u32 current_chunk = 0;
|
|
|
|
in_free_range = false;
|
|
|
|
u32 current_free_range_start = 0;
|
|
|
|
for (uptr i = 0; i <= max_chunks; i++) {
|
|
|
|
bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
|
|
|
|
|
|
|
|
if (is_free_chunk) {
|
|
|
|
if (!in_free_range) {
|
|
|
|
in_free_range = true;
|
|
|
|
current_free_range_start = current_chunk;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Verify that this used chunk does not touch any released page.
|
|
|
|
for (uptr i_page = current_chunk / page_size_scaled;
|
|
|
|
i_page <= (current_chunk + chunk_size_scaled - 1) /
|
|
|
|
page_size_scaled;
|
|
|
|
i_page++) {
|
|
|
|
bool page_released =
|
|
|
|
memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
|
|
|
|
memory_mapper.reported_pages.end();
|
|
|
|
ASSERT_EQ(false, page_released);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (in_free_range) {
|
|
|
|
in_free_range = false;
|
|
|
|
// Verify that all entire memory pages covered by this range of free
|
|
|
|
// chunks were released.
|
|
|
|
u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
|
|
|
|
while (page + page_size_scaled <= current_chunk) {
|
|
|
|
bool page_released =
|
|
|
|
memory_mapper.reported_pages.find(page) !=
|
|
|
|
memory_mapper.reported_pages.end();
|
|
|
|
ASSERT_EQ(true, page_released);
|
|
|
|
verified_released_pages++;
|
|
|
|
page += page_size_scaled;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
current_chunk += chunk_size_scaled;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
|
|
|
|
TestReleaseFreeMemoryToOS<Allocator64>();
|
|
|
|
}
|
|
|
|
|
2017-09-28 01:10:49 +08:00
|
|
|
#if !SANITIZER_ANDROID
|
[Sanitizers] Allocator: new "release memory to OS" implementation
Summary:
The current implementation of the allocator returning freed memory
back to OS (controlled by allocator_release_to_os_interval_ms flag)
requires sorting of the free chunks list, which has two major issues,
first, when free list grows to millions of chunks, sorting, even the
fastest one, is just too slow, and second, sorting chunks in place
is unacceptable for Scudo allocator as it makes allocations more
predictable and less secure.
The proposed approach is linear in complexity (altough requires quite
a bit more temporary memory). The idea is to count the number of free
chunks on each memory page and release pages containing free chunks
only. It requires one iteration over the free list of chunks and one
iteration over the array of page counters. The obvious disadvantage
is the allocation of the array of the counters, but even in the worst
case we support (4T allocator space, 64 buckets, 16 bytes bucket size,
full free list, which leads to 2 bytes per page counter and ~17M page
counters), requires just about 34Mb of the intermediate buffer (comparing
to ~64Gb of actually allocated chunks) and usually it stays under 100K
and released after each use. It is expected to be a relatively rare event,
releasing memory back to OS, keeping the buffer between those runs
and added complexity of the bookkeeping seems unnesessary here (it can
always be improved later, though, never say never).
The most interesting problem here is how to calculate the number of chunks
falling into each memory page in the bucket. Skipping all the details,
there are three cases when the number of chunks per page is constant:
1) P >= C, P % C == 0 --> N = P / C
2) C > P , C % P == 0 --> N = 1
3) C <= P, P % C != 0 && C % (P % C) == 0 --> N = P / C + 1
where P is page size, C is chunk size and N is the number of chunks per
page and the rest of the cases, where the number of chunks per page is
calculated on the go, during the page counter array iteration.
Among the rest, there are still cases where N can be deduced from the
page index, but they require not that much less calculations per page
than the current "brute force" way and 2/3 of the buckets fall into
the first three categories anyway, so, for the sake of simplicity,
it was decided to stick to those two variations. It can always be
refined and improved later, should we see that brute force way slows
us down unacceptably.
Reviewers: eugenis, cryptoad, dvyukov
Subscribers: kubamracek, mehdi_amini, llvm-commits
Differential Revision: https://reviews.llvm.org/D38245
llvm-svn: 314311
2017-09-27 23:38:05 +08:00
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
|
|
|
|
TestReleaseFreeMemoryToOS<Allocator64Compact>();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
|
|
|
|
TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
|
|
|
|
}
|
2017-09-28 01:10:49 +08:00
|
|
|
#endif // !SANITIZER_ANDROID
|
[Sanitizers] Allocator: new "release memory to OS" implementation
Summary:
The current implementation of the allocator returning freed memory
back to OS (controlled by allocator_release_to_os_interval_ms flag)
requires sorting of the free chunks list, which has two major issues,
first, when free list grows to millions of chunks, sorting, even the
fastest one, is just too slow, and second, sorting chunks in place
is unacceptable for Scudo allocator as it makes allocations more
predictable and less secure.
The proposed approach is linear in complexity (altough requires quite
a bit more temporary memory). The idea is to count the number of free
chunks on each memory page and release pages containing free chunks
only. It requires one iteration over the free list of chunks and one
iteration over the array of page counters. The obvious disadvantage
is the allocation of the array of the counters, but even in the worst
case we support (4T allocator space, 64 buckets, 16 bytes bucket size,
full free list, which leads to 2 bytes per page counter and ~17M page
counters), requires just about 34Mb of the intermediate buffer (comparing
to ~64Gb of actually allocated chunks) and usually it stays under 100K
and released after each use. It is expected to be a relatively rare event,
releasing memory back to OS, keeping the buffer between those runs
and added complexity of the bookkeeping seems unnesessary here (it can
always be improved later, though, never say never).
The most interesting problem here is how to calculate the number of chunks
falling into each memory page in the bucket. Skipping all the details,
there are three cases when the number of chunks per page is constant:
1) P >= C, P % C == 0 --> N = P / C
2) C > P , C % P == 0 --> N = 1
3) C <= P, P % C != 0 && C % (P % C) == 0 --> N = P / C + 1
where P is page size, C is chunk size and N is the number of chunks per
page and the rest of the cases, where the number of chunks per page is
calculated on the go, during the page counter array iteration.
Among the rest, there are still cases where N can be deduced from the
page index, but they require not that much less calculations per page
than the current "brute force" way and 2/3 of the buckets fall into
the first three categories anyway, so, for the sake of simplicity,
it was decided to stick to those two variations. It can always be
refined and improved later, should we see that brute force way slows
us down unacceptably.
Reviewers: eugenis, cryptoad, dvyukov
Subscribers: kubamracek, mehdi_amini, llvm-commits
Differential Revision: https://reviews.llvm.org/D38245
llvm-svn: 314311
2017-09-27 23:38:05 +08:00
|
|
|
|
|
|
|
#endif // SANITIZER_CAN_USE_ALLOCATOR64
|
|
|
|
|
2013-11-25 19:33:41 +08:00
|
|
|
TEST(SanitizerCommon, TwoLevelByteMap) {
|
|
|
|
const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
|
|
|
|
const u64 n = kSize1 * kSize2;
|
|
|
|
TwoLevelByteMap<kSize1, kSize2> m;
|
2018-05-08 03:02:19 +08:00
|
|
|
m.Init();
|
2013-11-25 19:33:41 +08:00
|
|
|
for (u64 i = 0; i < n; i += 7) {
|
|
|
|
m.set(i, (i % 100) + 1);
|
|
|
|
}
|
|
|
|
for (u64 j = 0; j < n; j++) {
|
|
|
|
if (j % 7)
|
|
|
|
EXPECT_EQ(m[j], 0);
|
|
|
|
else
|
|
|
|
EXPECT_EQ(m[j], (j % 100) + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
m.TestOnlyUnmap();
|
|
|
|
}
|
|
|
|
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
template <typename AddressSpaceView>
|
|
|
|
using TestByteMapASVT =
|
|
|
|
TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>;
|
|
|
|
using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>;
|
2013-11-25 19:33:41 +08:00
|
|
|
|
|
|
|
struct TestByteMapParam {
|
|
|
|
TestByteMap *m;
|
|
|
|
size_t shard;
|
|
|
|
size_t num_shards;
|
|
|
|
};
|
|
|
|
|
|
|
|
void *TwoLevelByteMapUserThread(void *param) {
|
|
|
|
TestByteMapParam *p = (TestByteMapParam*)param;
|
|
|
|
for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
|
|
|
|
size_t val = (i % 100) + 1;
|
|
|
|
p->m->set(i, val);
|
|
|
|
EXPECT_EQ((*p->m)[i], val);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
|
|
|
|
TestByteMap m;
|
2018-05-08 03:02:19 +08:00
|
|
|
m.Init();
|
2013-11-25 19:33:41 +08:00
|
|
|
TestMapUnmapCallback::map_count = 0;
|
|
|
|
TestMapUnmapCallback::unmap_count = 0;
|
|
|
|
static const int kNumThreads = 4;
|
|
|
|
pthread_t t[kNumThreads];
|
|
|
|
TestByteMapParam p[kNumThreads];
|
|
|
|
for (int i = 0; i < kNumThreads; i++) {
|
|
|
|
p[i].m = &m;
|
|
|
|
p[i].shard = i;
|
|
|
|
p[i].num_shards = kNumThreads;
|
2014-05-13 20:02:53 +08:00
|
|
|
PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
|
2013-11-25 19:33:41 +08:00
|
|
|
}
|
|
|
|
for (int i = 0; i < kNumThreads; i++) {
|
2014-05-13 20:02:53 +08:00
|
|
|
PTHREAD_JOIN(t[i], 0);
|
2013-11-25 19:33:41 +08:00
|
|
|
}
|
|
|
|
EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
|
|
|
|
EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
|
|
|
|
m.TestOnlyUnmap();
|
|
|
|
EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
|
|
|
|
EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
|
|
|
|
}
|
|
|
|
|
2015-01-03 12:29:12 +08:00
|
|
|
#endif // #if !SANITIZER_DEBUG
|