2016-09-20 05:11:55 +08:00
|
|
|
//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-09-20 05:11:55 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
///
|
|
|
|
/// Scudo Secondary Allocator.
|
|
|
|
/// This services allocation that are too large to be serviced by the Primary
|
|
|
|
/// Allocator. It is directly backed by the memory mapping functions of the
|
|
|
|
/// operating system.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
|
|
|
|
#define SCUDO_ALLOCATOR_SECONDARY_H_
|
|
|
|
|
2016-10-27 00:16:58 +08:00
|
|
|
#ifndef SCUDO_ALLOCATOR_H_
|
|
|
|
# error "This file must be included inside scudo_allocator.h."
|
|
|
|
#endif
|
2016-09-20 05:11:55 +08:00
|
|
|
|
2018-03-13 03:29:38 +08:00
|
|
|
// Secondary backed allocations are standalone chunks that contain extra
|
|
|
|
// information stored in a LargeChunk::Header prior to the frontend's header.
|
|
|
|
//
|
|
|
|
// The secondary takes care of alignment requirements (so that it can release
|
|
|
|
// unnecessary pages in the rare event of larger alignments), and as such must
|
|
|
|
// know about the frontend's header size.
|
|
|
|
//
|
|
|
|
// Since Windows doesn't support partial releasing of a reserved memory region,
|
|
|
|
// we have to keep track of both the reserved and the committed memory.
|
|
|
|
//
|
|
|
|
// The resulting chunk resembles the following:
|
|
|
|
//
|
|
|
|
// +--------------------+
|
|
|
|
// | Guard page(s) |
|
|
|
|
// +--------------------+
|
|
|
|
// | Unused space* |
|
|
|
|
// +--------------------+
|
|
|
|
// | LargeChunk::Header |
|
|
|
|
// +--------------------+
|
|
|
|
// | {Unp,P}ackedHeader |
|
|
|
|
// +--------------------+
|
|
|
|
// | Data (aligned) |
|
|
|
|
// +--------------------+
|
|
|
|
// | Unused space** |
|
|
|
|
// +--------------------+
|
|
|
|
// | Guard page(s) |
|
|
|
|
// +--------------------+
|
|
|
|
|
|
|
|
namespace LargeChunk {
|
|
|
|
struct Header {
|
|
|
|
ReservedAddressRange StoredRange;
|
|
|
|
uptr CommittedSize;
|
|
|
|
uptr Size;
|
|
|
|
};
|
|
|
|
constexpr uptr getHeaderSize() {
|
|
|
|
return RoundUpTo(sizeof(Header), MinAlignment);
|
|
|
|
}
|
|
|
|
static Header *getHeader(uptr Ptr) {
|
|
|
|
return reinterpret_cast<Header *>(Ptr - getHeaderSize());
|
|
|
|
}
|
|
|
|
static Header *getHeader(const void *Ptr) {
|
|
|
|
return getHeader(reinterpret_cast<uptr>(Ptr));
|
|
|
|
}
|
|
|
|
} // namespace LargeChunk
|
|
|
|
|
2018-07-20 23:07:17 +08:00
|
|
|
class LargeMmapAllocator {
|
2016-09-20 05:11:55 +08:00
|
|
|
public:
|
2017-06-21 05:23:02 +08:00
|
|
|
void Init() {
|
2018-06-14 22:33:28 +08:00
|
|
|
internal_memset(this, 0, sizeof(*this));
|
2016-09-20 05:11:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
2018-02-28 00:14:49 +08:00
|
|
|
const uptr UserSize = Size - Chunk::getHeaderSize();
|
2016-09-20 05:11:55 +08:00
|
|
|
// The Scudo frontend prevents us from allocating more than
|
|
|
|
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
|
2018-03-13 03:29:38 +08:00
|
|
|
uptr ReservedSize = Size + LargeChunk::getHeaderSize();
|
|
|
|
if (UNLIKELY(Alignment > MinAlignment))
|
|
|
|
ReservedSize += Alignment;
|
|
|
|
const uptr PageSize = GetPageSizeCached();
|
|
|
|
ReservedSize = RoundUpTo(ReservedSize, PageSize);
|
2016-09-20 05:11:55 +08:00
|
|
|
// Account for 2 guard pages, one before and one after the chunk.
|
2018-03-13 03:29:38 +08:00
|
|
|
ReservedSize += 2 * PageSize;
|
2016-12-14 03:31:54 +08:00
|
|
|
|
2017-11-14 04:38:22 +08:00
|
|
|
ReservedAddressRange AddressRange;
|
2018-04-14 03:21:27 +08:00
|
|
|
uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
|
2018-08-30 03:41:28 +08:00
|
|
|
if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
|
2018-06-16 00:45:19 +08:00
|
|
|
return nullptr;
|
2016-09-20 05:11:55 +08:00
|
|
|
// A page-aligned pointer is assumed after that, so check it now.
|
2018-03-13 03:29:38 +08:00
|
|
|
DCHECK(IsAligned(ReservedBeg, PageSize));
|
|
|
|
uptr ReservedEnd = ReservedBeg + ReservedSize;
|
2016-12-14 03:31:54 +08:00
|
|
|
// The beginning of the user area for that allocation comes after the
|
|
|
|
// initial guard page, and both headers. This is the pointer that has to
|
|
|
|
// abide by alignment requirements.
|
2018-03-13 03:29:38 +08:00
|
|
|
uptr CommittedBeg = ReservedBeg + PageSize;
|
|
|
|
uptr UserBeg = CommittedBeg + HeadersSize;
|
2017-05-12 05:40:45 +08:00
|
|
|
uptr UserEnd = UserBeg + UserSize;
|
2018-03-13 03:29:38 +08:00
|
|
|
uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
|
2016-12-14 03:31:54 +08:00
|
|
|
|
|
|
|
// In the rare event of larger alignments, we will attempt to fit the mmap
|
|
|
|
// area better and unmap extraneous memory. This will also ensure that the
|
2016-12-09 03:05:46 +08:00
|
|
|
// offset and unused bytes field of the header stay small.
|
2018-03-13 03:29:38 +08:00
|
|
|
if (UNLIKELY(Alignment > MinAlignment)) {
|
2017-05-12 05:40:45 +08:00
|
|
|
if (!IsAligned(UserBeg, Alignment)) {
|
|
|
|
UserBeg = RoundUpTo(UserBeg, Alignment);
|
2018-03-13 03:29:38 +08:00
|
|
|
CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
|
|
|
|
const uptr NewReservedBeg = CommittedBeg - PageSize;
|
|
|
|
DCHECK_GE(NewReservedBeg, ReservedBeg);
|
|
|
|
if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
|
|
|
|
AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
|
|
|
|
ReservedBeg = NewReservedBeg;
|
2017-05-12 05:40:45 +08:00
|
|
|
}
|
|
|
|
UserEnd = UserBeg + UserSize;
|
2018-03-13 03:29:38 +08:00
|
|
|
CommittedEnd = RoundUpTo(UserEnd, PageSize);
|
2017-05-12 05:40:45 +08:00
|
|
|
}
|
2018-03-13 03:29:38 +08:00
|
|
|
const uptr NewReservedEnd = CommittedEnd + PageSize;
|
|
|
|
DCHECK_LE(NewReservedEnd, ReservedEnd);
|
|
|
|
if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
|
|
|
|
AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
|
|
|
|
ReservedEnd = NewReservedEnd;
|
2017-05-12 05:40:45 +08:00
|
|
|
}
|
2016-10-27 00:16:58 +08:00
|
|
|
}
|
2016-12-14 03:31:54 +08:00
|
|
|
|
2018-03-13 03:29:38 +08:00
|
|
|
DCHECK_LE(UserEnd, CommittedEnd);
|
|
|
|
const uptr CommittedSize = CommittedEnd - CommittedBeg;
|
|
|
|
// Actually mmap the memory, preserving the guard pages on either sides.
|
|
|
|
CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
|
2018-02-28 00:14:49 +08:00
|
|
|
const uptr Ptr = UserBeg - Chunk::getHeaderSize();
|
2018-03-13 03:29:38 +08:00
|
|
|
LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
|
|
|
|
H->StoredRange = AddressRange;
|
|
|
|
H->Size = CommittedEnd - Ptr;
|
|
|
|
H->CommittedSize = CommittedSize;
|
2017-11-14 04:38:22 +08:00
|
|
|
|
[scudo] 32-bit and hardware agnostic support
Summary:
This update introduces i386 support for the Scudo Hardened Allocator, and
offers software alternatives for functions that used to require hardware
specific instruction sets. This should make porting to new architectures
easier.
Among the changes:
- The chunk header has been changed to accomodate the size limitations
encountered on 32-bit architectures. We now fit everything in 64-bit. This
was achieved by storing the amount of unused bytes in an allocation rather
than the size itself, as one can be deduced from the other with the help
of the GetActuallyAllocatedSize function. As it turns out, this header can
be used for both 64 and 32 bit, and as such we dropped the requirement for
the 128-bit compare and exchange instruction support (cmpxchg16b).
- Add 32-bit support for the checksum and the PRNG functions: if the SSE 4.2
instruction set is supported, use the 32-bit CRC32 instruction, and in the
XorShift128, use a 32-bit based state instead of 64-bit.
- Add software support for CRC32: if SSE 4.2 is not supported, fallback on a
software implementation.
- Modify tests that were not 32-bit compliant, and expand them to cover more
allocation and alignment sizes. The random shuffle test has been deactivated
for linux-i386 & linux-i686 as the 32-bit sanitizer allocator doesn't
currently randomize chunks.
Reviewers: alekseyshl, kcc
Subscribers: filcab, llvm-commits, tberghammer, danalbert, srhines, mgorny, modocache
Differential Revision: https://reviews.llvm.org/D26358
llvm-svn: 288255
2016-12-01 01:32:20 +08:00
|
|
|
// The primary adds the whole class size to the stats when allocating a
|
|
|
|
// chunk, so we will do something similar here. But we will not account for
|
|
|
|
// the guard pages.
|
2017-04-20 23:11:00 +08:00
|
|
|
{
|
|
|
|
SpinMutexLock l(&StatsMutex);
|
2018-03-13 03:29:38 +08:00
|
|
|
Stats->Add(AllocatorStatAllocated, CommittedSize);
|
|
|
|
Stats->Add(AllocatorStatMapped, CommittedSize);
|
|
|
|
AllocatedBytes += CommittedSize;
|
|
|
|
if (LargestSize < CommittedSize)
|
|
|
|
LargestSize = CommittedSize;
|
|
|
|
NumberOfAllocs++;
|
2017-04-20 23:11:00 +08:00
|
|
|
}
|
2016-12-14 03:31:54 +08:00
|
|
|
|
2017-05-12 05:40:45 +08:00
|
|
|
return reinterpret_cast<void *>(Ptr);
|
2016-09-30 07:00:54 +08:00
|
|
|
}
|
|
|
|
|
2016-09-20 05:11:55 +08:00
|
|
|
void Deallocate(AllocatorStats *Stats, void *Ptr) {
|
2018-03-13 03:29:38 +08:00
|
|
|
LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
|
2017-11-14 04:38:22 +08:00
|
|
|
// Since we're unmapping the entirety of where the ReservedAddressRange
|
|
|
|
// actually is, copy onto the stack.
|
2018-03-13 03:29:38 +08:00
|
|
|
ReservedAddressRange AddressRange = H->StoredRange;
|
|
|
|
const uptr Size = H->CommittedSize;
|
2017-04-20 23:11:00 +08:00
|
|
|
{
|
|
|
|
SpinMutexLock l(&StatsMutex);
|
2018-03-13 03:29:38 +08:00
|
|
|
Stats->Sub(AllocatorStatAllocated, Size);
|
|
|
|
Stats->Sub(AllocatorStatMapped, Size);
|
|
|
|
FreedBytes += Size;
|
|
|
|
NumberOfFrees++;
|
2017-04-20 23:11:00 +08:00
|
|
|
}
|
2017-11-14 04:38:22 +08:00
|
|
|
AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
|
|
|
|
AddressRange.size());
|
2016-09-20 05:11:55 +08:00
|
|
|
}
|
|
|
|
|
2018-03-13 03:29:38 +08:00
|
|
|
static uptr GetActuallyAllocatedSize(void *Ptr) {
|
|
|
|
return LargeChunk::getHeader(Ptr)->Size;
|
2016-09-20 05:11:55 +08:00
|
|
|
}
|
|
|
|
|
2018-03-13 03:29:38 +08:00
|
|
|
void PrintStats() {
|
|
|
|
Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
|
|
|
|
"freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
|
|
|
|
NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
|
|
|
|
FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
|
|
|
|
(AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
|
2016-09-20 05:11:55 +08:00
|
|
|
}
|
|
|
|
|
2018-03-13 03:29:38 +08:00
|
|
|
private:
|
2017-12-07 00:53:24 +08:00
|
|
|
static constexpr uptr HeadersSize =
|
2018-03-13 03:29:38 +08:00
|
|
|
LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
|
2017-11-14 04:38:22 +08:00
|
|
|
|
2018-06-14 22:33:28 +08:00
|
|
|
StaticSpinMutex StatsMutex;
|
2018-03-13 03:29:38 +08:00
|
|
|
u32 NumberOfAllocs;
|
|
|
|
u32 NumberOfFrees;
|
|
|
|
uptr AllocatedBytes;
|
|
|
|
uptr FreedBytes;
|
|
|
|
uptr LargestSize;
|
2016-09-20 05:11:55 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
|