forked from OSchip/llvm-project
Corrected D27428: Do not use the alignment-rounded-up size with secondary
Summary: I atually had an integer overflow on 32-bit with D27428 that didn't reproduce locally, as the test servers would manage allocate addresses in the 0xffffxxxx range, which led to some issues when rounding addresses. At this point, I feel that Scudo could benefit from having its own combined allocator, as we don't get any benefit from the current one, but have to work around some hurdles (alignment checks, rounding up that is no longer needed, extraneous code). Reviewers: kcc, alekseyshl Subscribers: llvm-commits, kubabrecka Differential Revision: https://reviews.llvm.org/D27681 llvm-svn: 289572
This commit is contained in:
parent
215f6e788e
commit
c74da7ce58
|
@ -49,16 +49,30 @@ class CombinedAllocator {
|
||||||
size = 1;
|
size = 1;
|
||||||
if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
|
if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
|
||||||
if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
|
if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
|
||||||
|
uptr original_size = size;
|
||||||
|
// If alignment requirements are to be fulfilled by the frontend allocator
|
||||||
|
// rather than by the primary or secondary, passing an alignment lower than
|
||||||
|
// or equal to 8 will prevent any further rounding up, as well as the later
|
||||||
|
// alignment check.
|
||||||
if (alignment > 8)
|
if (alignment > 8)
|
||||||
size = RoundUpTo(size, alignment);
|
size = RoundUpTo(size, alignment);
|
||||||
void *res;
|
void *res;
|
||||||
bool from_primary = primary_.CanAllocate(size, alignment);
|
bool from_primary = primary_.CanAllocate(size, alignment);
|
||||||
|
// The primary allocator should return a 2^x aligned allocation when
|
||||||
|
// requested 2^x bytes, hence using the rounded up 'size' when being
|
||||||
|
// serviced by the primary (this is no longer true when the primary is
|
||||||
|
// using a non-fixed base address). The secondary takes care of the
|
||||||
|
// alignment without such requirement, and allocating 'size' would use
|
||||||
|
// extraneous memory, so we employ 'original_size'.
|
||||||
if (from_primary)
|
if (from_primary)
|
||||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||||
else
|
else
|
||||||
res = secondary_.Allocate(&stats_, size, alignment);
|
res = secondary_.Allocate(&stats_, original_size, alignment);
|
||||||
if (alignment > 8)
|
if (alignment > 8)
|
||||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||||
|
// When serviced by the secondary, the chunk comes from a mmap allocation
|
||||||
|
// and will be zero'd out anyway. We only need to clear our the chunk if
|
||||||
|
// it was serviced by the primary, hence using the rounded up 'size'.
|
||||||
if (cleared && res && from_primary)
|
if (cleared && res && from_primary)
|
||||||
internal_bzero_aligned16(res, RoundUpTo(size, 16));
|
internal_bzero_aligned16(res, RoundUpTo(size, 16));
|
||||||
return res;
|
return res;
|
||||||
|
|
|
@ -402,12 +402,18 @@ struct Allocator {
|
||||||
Size = 1;
|
Size = 1;
|
||||||
if (Size >= MaxAllowedMallocSize)
|
if (Size >= MaxAllowedMallocSize)
|
||||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||||
uptr RoundedSize = RoundUpTo(Size, MinAlignment);
|
|
||||||
uptr NeededSize = RoundedSize + AlignedChunkHeaderSize;
|
uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
|
||||||
if (Alignment > MinAlignment)
|
if (Alignment > MinAlignment)
|
||||||
NeededSize += Alignment;
|
NeededSize += Alignment;
|
||||||
if (NeededSize >= MaxAllowedMallocSize)
|
if (NeededSize >= MaxAllowedMallocSize)
|
||||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||||
|
|
||||||
|
// Primary backed and Secondary backed allocations have a different
|
||||||
|
// treatment. We deal with alignment requirements of Primary serviced
|
||||||
|
// allocations here, but the Secondary will take care of its own alignment
|
||||||
|
// needs, which means we also have to work around some limitations of the
|
||||||
|
// combined allocator to accommodate the situation.
|
||||||
bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
|
bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
|
||||||
|
|
||||||
void *Ptr;
|
void *Ptr;
|
||||||
|
@ -426,8 +432,11 @@ struct Allocator {
|
||||||
// If the allocation was serviced by the secondary, the returned pointer
|
// If the allocation was serviced by the secondary, the returned pointer
|
||||||
// accounts for ChunkHeaderSize to pass the alignment check of the combined
|
// accounts for ChunkHeaderSize to pass the alignment check of the combined
|
||||||
// allocator. Adjust it here.
|
// allocator. Adjust it here.
|
||||||
if (!FromPrimary)
|
if (!FromPrimary) {
|
||||||
AllocBeg -= AlignedChunkHeaderSize;
|
AllocBeg -= AlignedChunkHeaderSize;
|
||||||
|
if (Alignment > MinAlignment)
|
||||||
|
NeededSize -= Alignment;
|
||||||
|
}
|
||||||
|
|
||||||
uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
|
uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
|
||||||
reinterpret_cast<void *>(AllocBeg));
|
reinterpret_cast<void *>(AllocBeg));
|
||||||
|
|
|
@ -32,32 +32,39 @@ class ScudoLargeMmapAllocator {
|
||||||
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
||||||
// The Scudo frontend prevents us from allocating more than
|
// The Scudo frontend prevents us from allocating more than
|
||||||
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
|
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
|
||||||
uptr HeadersSize = sizeof(SecondaryHeader) + AlignedChunkHeaderSize;
|
uptr MapSize = Size + SecondaryHeaderSize;
|
||||||
uptr MapSize = RoundUpTo(Size + sizeof(SecondaryHeader), PageSize);
|
MapSize = RoundUpTo(MapSize, PageSize);
|
||||||
// Account for 2 guard pages, one before and one after the chunk.
|
// Account for 2 guard pages, one before and one after the chunk.
|
||||||
MapSize += 2 * PageSize;
|
MapSize += 2 * PageSize;
|
||||||
// Adding an extra Alignment is not required, it was done by the frontend.
|
// The size passed to the Secondary comprises the alignment, if large
|
||||||
|
// enough. Subtract it here to get the requested size, including header.
|
||||||
|
if (Alignment > MinAlignment)
|
||||||
|
Size -= Alignment;
|
||||||
|
|
||||||
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
|
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
|
||||||
if (MapBeg == ~static_cast<uptr>(0))
|
if (MapBeg == ~static_cast<uptr>(0))
|
||||||
return ReturnNullOrDieOnOOM();
|
return ReturnNullOrDieOnOOM();
|
||||||
// A page-aligned pointer is assumed after that, so check it now.
|
// A page-aligned pointer is assumed after that, so check it now.
|
||||||
CHECK(IsAligned(MapBeg, PageSize));
|
CHECK(IsAligned(MapBeg, PageSize));
|
||||||
uptr MapEnd = MapBeg + MapSize;
|
uptr MapEnd = MapBeg + MapSize;
|
||||||
|
// The beginning of the user area for that allocation comes after the
|
||||||
|
// initial guard page, and both headers. This is the pointer that has to
|
||||||
|
// abide by alignment requirements.
|
||||||
uptr UserBeg = MapBeg + PageSize + HeadersSize;
|
uptr UserBeg = MapBeg + PageSize + HeadersSize;
|
||||||
// In the event of larger alignments, we will attempt to fit the mmap area
|
|
||||||
// better and unmap extraneous memory. This will also ensure that the
|
// In the rare event of larger alignments, we will attempt to fit the mmap
|
||||||
|
// area better and unmap extraneous memory. This will also ensure that the
|
||||||
// offset and unused bytes field of the header stay small.
|
// offset and unused bytes field of the header stay small.
|
||||||
if (Alignment > MinAlignment) {
|
if (Alignment > MinAlignment) {
|
||||||
if (UserBeg & (Alignment - 1))
|
if (UserBeg & (Alignment - 1))
|
||||||
UserBeg += Alignment - (UserBeg & (Alignment - 1));
|
UserBeg += Alignment - (UserBeg & (Alignment - 1));
|
||||||
CHECK_GE(UserBeg, MapBeg);
|
CHECK_GE(UserBeg, MapBeg);
|
||||||
uptr NewMapBeg = UserBeg - HeadersSize;
|
uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
|
||||||
NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize;
|
|
||||||
CHECK_GE(NewMapBeg, MapBeg);
|
CHECK_GE(NewMapBeg, MapBeg);
|
||||||
uptr NewMapSize = RoundUpTo(MapSize - Alignment, PageSize);
|
uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
|
||||||
uptr NewMapEnd = NewMapBeg + NewMapSize;
|
PageSize) + PageSize;
|
||||||
CHECK_LE(NewMapEnd, MapEnd);
|
CHECK_LE(NewMapEnd, MapEnd);
|
||||||
// Unmap the extra memory if it's large enough.
|
// Unmap the extra memory if it's large enough, on both sides.
|
||||||
uptr Diff = NewMapBeg - MapBeg;
|
uptr Diff = NewMapBeg - MapBeg;
|
||||||
if (Diff > PageSize)
|
if (Diff > PageSize)
|
||||||
UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
|
UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
|
||||||
|
@ -65,14 +72,13 @@ class ScudoLargeMmapAllocator {
|
||||||
if (Diff > PageSize)
|
if (Diff > PageSize)
|
||||||
UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
|
UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
|
||||||
MapBeg = NewMapBeg;
|
MapBeg = NewMapBeg;
|
||||||
MapSize = NewMapSize;
|
|
||||||
MapEnd = NewMapEnd;
|
MapEnd = NewMapEnd;
|
||||||
|
MapSize = NewMapEnd - NewMapBeg;
|
||||||
}
|
}
|
||||||
uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size;
|
|
||||||
// For larger alignments, Alignment was added by the frontend to Size.
|
uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
|
||||||
if (Alignment > MinAlignment)
|
|
||||||
UserEnd -= Alignment;
|
|
||||||
CHECK_LE(UserEnd, MapEnd - PageSize);
|
CHECK_LE(UserEnd, MapEnd - PageSize);
|
||||||
|
// Actually mmap the memory, preserving the guard pages on either side.
|
||||||
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
|
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
|
||||||
MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
|
MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
|
||||||
uptr Ptr = UserBeg - AlignedChunkHeaderSize;
|
uptr Ptr = UserBeg - AlignedChunkHeaderSize;
|
||||||
|
@ -84,7 +90,7 @@ class ScudoLargeMmapAllocator {
|
||||||
// the guard pages.
|
// the guard pages.
|
||||||
Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
|
Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
|
||||||
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
|
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
|
||||||
CHECK(IsAligned(UserBeg, Alignment));
|
|
||||||
return reinterpret_cast<void *>(UserBeg);
|
return reinterpret_cast<void *>(UserBeg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,6 +179,8 @@ class ScudoLargeMmapAllocator {
|
||||||
return getHeader(reinterpret_cast<uptr>(Ptr));
|
return getHeader(reinterpret_cast<uptr>(Ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
|
||||||
|
const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
|
||||||
uptr PageSize;
|
uptr PageSize;
|
||||||
atomic_uint8_t MayReturnNull;
|
atomic_uint8_t MayReturnNull;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue