[sanitizer] Align & pad the allocator structures to the cacheline size

Summary:
Both `SizeClassInfo` structures for the 32-bit primary & `RegionInfo`
structures for the 64-bit primary can be used by different threads, and as such
they should be aligned & padded to the cacheline size to avoid false sharing.
The former was padded but the array was not aligned, the latter was not padded
but we lucked up as the size of the structure was 192 bytes, and aligned by
the properties of `mmap`.

I plan on adding a couple of fields to the `RegionInfo`, and some highly
threaded tests pointed out that without proper padding & alignment, performance
was getting a hit - and it is going away with proper padding.

This patch makes sure that we are properly padded & aligned for both. I used
a template to avoid padding if the size is already a multiple of the cacheline
size. There might be a better way to do this, I am open to suggestions.

Reviewers: alekseyshl, dvyukov

Reviewed By: alekseyshl

Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits

Differential Revision: https://reviews.llvm.org/D44261

llvm-svn: 327145
This commit is contained in:
Kostya Kortchinsky 2018-03-09 16:18:38 +00:00
parent 3675b8cece
commit 69df838b52
3 changed files with 14 additions and 13 deletions

View File

@ -266,14 +266,12 @@ class SizeClassAllocator32 {
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
struct SizeClassInfo {
struct ALIGNED(kCacheLineSize) SizeClassInfo {
SpinMutex mutex;
IntrusiveList<TransferBatch> free_list;
u32 rand_state;
char padding[kCacheLineSize - 2 * sizeof(uptr) -
sizeof(IntrusiveList<TransferBatch>)];
};
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
uptr ComputeRegionId(uptr mem) {
const uptr res = mem >> kRegionSizeLog;
@ -299,7 +297,7 @@ class SizeClassAllocator32 {
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
DCHECK_LT(class_id, kNumClasses);
return &size_class_info_array[class_id];
}

View File

@ -80,6 +80,8 @@ class SizeClassAllocator64 {
}
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
// Check that the RegionInfo array is aligned on the CacheLine size.
DCHECK_EQ(SpaceEnd() & (kCacheLineSize - 1), 0);
}
s32 ReleaseToOSIntervalMs() const {
@ -302,7 +304,7 @@ class SizeClassAllocator64 {
static uptr AdditionalSize() {
return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
GetPageSizeCached());
GetPageSizeCached());
}
typedef SizeClassMap SizeClassMapT;
@ -584,7 +586,7 @@ class SizeClassAllocator64 {
u64 last_released_bytes;
};
struct RegionInfo {
struct ALIGNED(kCacheLineSize) RegionInfo {
BlockingMutex mutex;
uptr num_freed_chunks; // Number of elements in the freearray.
uptr mapped_free_array; // Bytes mapped for freearray.
@ -597,12 +599,11 @@ class SizeClassAllocator64 {
Stats stats;
ReleaseToOsInfo rtoi;
};
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
RegionInfo *GetRegionInfo(uptr class_id) const {
CHECK_LT(class_id, kNumClasses);
RegionInfo *regions =
reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
DCHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
return &regions[class_id];
}

View File

@ -40,10 +40,12 @@ const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
#if defined(__powerpc__) || defined(__powerpc64__)
const uptr kCacheLineSize = 128;
constexpr uptr kCacheLineSize = 128;
#else
const uptr kCacheLineSize = 64;
constexpr uptr kCacheLineSize = 64;
#endif
// Check that the CacheLine size is a power-of-two.
COMPILER_CHECK((kCacheLineSize & (kCacheLineSize - 1)) == 0);
const uptr kMaxPathLength = 4096;