[sanitizer] Do not use the alignment-rounded-up size when using the secondary

Summary:
The combined allocator rounds up the requested size with regard to the
alignment, which makes sense when being serviced by the primary as it comes
with alignment guarantees, but not with the secondary. For the rare case of
large alignments, it wastes memory, and entices unnecessarily large fields for
the Scudo header. With this patch, we pass the non-alignement-rounded-up size
to the secondary, and adapt the Scudo code for this change.

Reviewers: alekseyshl, kcc

Subscribers: llvm-commits, kubabrecka

Differential Revision: https://reviews.llvm.org/D27428

llvm-svn: 289088
This commit is contained in:
Kostya Kortchinsky 2016-12-08 19:05:46 +00:00
parent 235c275b20
commit 2defe4d9a1
3 changed files with 22 additions and 6 deletions

View File

@ -49,16 +49,29 @@ class CombinedAllocator {
size = 1; size = 1;
if (size + alignment < size) return ReturnNullOrDieOnBadRequest(); if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM(); if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
// or equal to 8 will prevent any further rounding up, as well as the later
// alignment check.
if (alignment > 8) if (alignment > 8)
size = RoundUpTo(size, alignment); size = RoundUpTo(size, alignment);
void *res; void *res;
bool from_primary = primary_.CanAllocate(size, alignment); bool from_primary = primary_.CanAllocate(size, alignment);
// The primary allocator should return a 2^x aligned allocation when
// requested 2^x bytes, hence using the rounded up 'size' when being
// serviced by the primary. The secondary takes care of the alignment
// without such requirement, and allocating 'size' would use extraneous
// memory, so we employ 'original_size'.
if (from_primary) if (from_primary)
res = cache->Allocate(&primary_, primary_.ClassID(size)); res = cache->Allocate(&primary_, primary_.ClassID(size));
else else
res = secondary_.Allocate(&stats_, size, alignment); res = secondary_.Allocate(&stats_, original_size, alignment);
if (alignment > 8) if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
// When serviced by the secondary, the chunk comes from a mmap allocation
// and will be zero'd out anyway. We only need to clear our the chunk if
// it was serviced by the primary, hence using the rounded up 'size'.
if (cleared && res && from_primary) if (cleared && res && from_primary)
internal_bzero_aligned16(res, RoundUpTo(size, 16)); internal_bzero_aligned16(res, RoundUpTo(size, 16));
return res; return res;

View File

@ -46,7 +46,7 @@ class ScudoLargeMmapAllocator {
uptr UserBeg = MapBeg + PageSize + HeadersSize; uptr UserBeg = MapBeg + PageSize + HeadersSize;
// In the event of larger alignments, we will attempt to fit the mmap area // In the event of larger alignments, we will attempt to fit the mmap area
// better and unmap extraneous memory. This will also ensure that the // better and unmap extraneous memory. This will also ensure that the
// offset field of the header stays small (it will always be 0). // offset and unused bytes field of the header stay small.
if (Alignment > MinAlignment) { if (Alignment > MinAlignment) {
if (UserBeg & (Alignment - 1)) if (UserBeg & (Alignment - 1))
UserBeg += Alignment - (UserBeg & (Alignment - 1)); UserBeg += Alignment - (UserBeg & (Alignment - 1));
@ -54,8 +54,9 @@ class ScudoLargeMmapAllocator {
uptr NewMapBeg = UserBeg - HeadersSize; uptr NewMapBeg = UserBeg - HeadersSize;
NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize; NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize;
CHECK_GE(NewMapBeg, MapBeg); CHECK_GE(NewMapBeg, MapBeg);
uptr NewMapSize = RoundUpTo(MapSize - Alignment, PageSize); uptr NewMapEnd =
uptr NewMapEnd = NewMapBeg + NewMapSize; RoundUpTo(UserBeg + Size - Alignment - AlignedChunkHeaderSize,
PageSize) + PageSize;
CHECK_LE(NewMapEnd, MapEnd); CHECK_LE(NewMapEnd, MapEnd);
// Unmap the extra memory if it's large enough. // Unmap the extra memory if it's large enough.
uptr Diff = NewMapBeg - MapBeg; uptr Diff = NewMapBeg - MapBeg;
@ -65,8 +66,8 @@ class ScudoLargeMmapAllocator {
if (Diff > PageSize) if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff); UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
MapBeg = NewMapBeg; MapBeg = NewMapBeg;
MapSize = NewMapSize;
MapEnd = NewMapEnd; MapEnd = NewMapEnd;
MapSize = NewMapEnd - NewMapBeg;
} }
uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size; uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size;
// For larger alignments, Alignment was added by the frontend to Size. // For larger alignments, Alignment was added by the frontend to Size.

View File

@ -17,7 +17,9 @@
#include <fcntl.h> #include <fcntl.h>
#include <stdarg.h> #include <stdarg.h>
#include <unistd.h> #include <unistd.h>
#include <cpuid.h> #if defined(__x86_64__) || defined(__i386__)
# include <cpuid.h>
#endif
#include <cstring> #include <cstring>