[GWP-ASan] Add aligned allocations.

Adds a new allocation API to GWP-ASan that handles size+alignment
restrictions.

Differential Revision: https://reviews.llvm.org/D94830
This commit is contained in:
Mitch Phillips 2021-01-14 15:16:56 -08:00
parent 32e98f05fe
commit 9dc0676247
12 changed files with 235 additions and 148 deletions

View File

@ -11,7 +11,6 @@ set(GWP_ASAN_SOURCES
platform_specific/utilities_posix.cpp
guarded_pool_allocator.cpp
stack_trace_compressor.cpp
utilities.cpp
)
set(GWP_ASAN_HEADERS

View File

@ -40,7 +40,7 @@ constexpr size_t AllocationMetadata::kMaxTraceLengthToCollect;
void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr,
size_t AllocSize) {
Addr = AllocAddr;
Size = AllocSize;
RequestedSize = AllocSize;
IsDeallocated = false;
AllocationTrace.ThreadID = getThreadID();

View File

@ -49,7 +49,7 @@ struct AllocationMetadata {
static constexpr size_t kMaxTraceLengthToCollect = 128;
// Records the given allocation metadata into this struct.
void RecordAllocation(uintptr_t Addr, size_t Size);
void RecordAllocation(uintptr_t Addr, size_t RequestedSize);
// Record that this allocation is now deallocated.
void RecordDeallocation();
@ -70,7 +70,7 @@ struct AllocationMetadata {
// valid, as the allocation has never occurred.
uintptr_t Addr = 0;
// Represents the actual size of the allocation.
size_t Size = 0;
size_t RequestedSize = 0;
CallSiteInfo AllocationTrace;
CallSiteInfo DeallocationTrace;

View File

@ -103,7 +103,7 @@ uintptr_t __gwp_asan_get_allocation_address(
size_t __gwp_asan_get_allocation_size(
const gwp_asan::AllocationMetadata *AllocationMeta) {
return AllocationMeta->Size;
return AllocationMeta->RequestedSize;
}
uint64_t __gwp_asan_get_allocation_thread_id(

View File

@ -12,6 +12,7 @@
#include "gwp_asan/utilities.h"
#include <assert.h>
#include <stddef.h>
using AllocationMetadata = gwp_asan::AllocationMetadata;
using Error = gwp_asan::Error;
@ -32,6 +33,8 @@ size_t roundUpTo(size_t Size, size_t Boundary) {
uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
return Ptr & ~(PageSize - 1);
}
bool isPowerOfTwo(uintptr_t x) { return (x & (x - 1)) == 0; }
} // anonymous namespace
// Gets the singleton implementation of this class. Thread-compatible until
@ -63,8 +66,6 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
assert((PageSize & (PageSize - 1)) == 0);
State.PageSize = PageSize;
PerfectlyRightAlign = Opts.PerfectlyRightAlign;
size_t PoolBytesRequired =
PageSize * (1 + State.MaxSimultaneousAllocations) +
State.MaxSimultaneousAllocations * State.maximumAllocationSize();
@ -113,7 +114,7 @@ void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
const AllocationMetadata &Meta = Metadata[i];
if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
Meta.Addr < Start + Size)
Cb(Meta.Addr, Meta.Size, Arg);
Cb(Meta.Addr, Meta.RequestedSize, Arg);
}
}
@ -138,7 +139,39 @@ void GuardedPoolAllocator::uninitTestOnly() {
*getThreadLocals() = ThreadLocalPackedVariables();
}
void *GuardedPoolAllocator::allocate(size_t Size) {
// Note, minimum backing allocation size in GWP-ASan is always one page, and
// each slot could potentially be multiple pages (but always in
// page-increments). Thus, for anything that requires less than page size
// alignment, we don't need to allocate extra padding to ensure the alignment
// can be met.
size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
size_t Alignment,
size_t PageSize) {
assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
assert(Alignment != 0 && "Alignment should be non-zero");
assert(Size != 0 && "Size should be non-zero");
if (Alignment <= PageSize)
return Size;
return Size + Alignment - PageSize;
}
uintptr_t GuardedPoolAllocator::getAlignedPtr(uintptr_t Ptr, size_t Alignment,
bool IsRightAligned) {
assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
assert(Alignment != 0 && "Alignment should be non-zero");
if ((Ptr & (Alignment - 1)) == 0)
return Ptr;
if (IsRightAligned)
Ptr -= Ptr & (Alignment - 1);
else
Ptr += Alignment - (Ptr & (Alignment - 1));
return Ptr;
}
void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
// GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
// back to the supporting allocator.
if (State.GuardedPagePoolEnd == 0) {
@ -148,14 +181,24 @@ void *GuardedPoolAllocator::allocate(size_t Size) {
return nullptr;
}
if (Size == 0)
Size = 1;
if (Alignment == 0)
Alignment = 1;
if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
Size > State.maximumAllocationSize())
return nullptr;
size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
if (BackingSize > State.maximumAllocationSize())
return nullptr;
// Protect against recursivity.
if (getThreadLocals()->RecursiveGuard)
return nullptr;
ScopedRecursiveGuard SRG;
if (Size == 0 || Size > State.maximumAllocationSize())
return nullptr;
size_t Index;
{
ScopedLock L(PoolMutex);
@ -165,28 +208,33 @@ void *GuardedPoolAllocator::allocate(size_t Size) {
if (Index == kInvalidSlotID)
return nullptr;
uintptr_t Ptr = State.slotToAddr(Index);
// Should we right-align this allocation?
if (getRandomUnsigned32() % 2 == 0) {
AlignmentStrategy Align = AlignmentStrategy::DEFAULT;
if (PerfectlyRightAlign)
Align = AlignmentStrategy::PERFECT;
Ptr +=
State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align);
}
AllocationMetadata *Meta = addrToMetadata(Ptr);
uintptr_t SlotStart = State.slotToAddr(Index);
AllocationMetadata *Meta = addrToMetadata(SlotStart);
uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
uintptr_t UserPtr;
// Randomly choose whether to left-align or right-align the allocation, and
// then apply the necessary adjustments to get an aligned pointer.
if (getRandomUnsigned32() % 2 == 0)
UserPtr = getAlignedPtr(SlotStart, Alignment, /* IsRightAligned */ false);
else
UserPtr =
getAlignedPtr(SlotEnd - Size, Alignment, /* IsRightAligned */ true);
assert(UserPtr >= SlotStart);
assert(UserPtr + Size <= SlotEnd);
// If a slot is multiple pages in size, and the allocation takes up a single
// page, we can improve overflow detection by leaving the unused pages as
// unmapped.
const size_t PageSize = State.PageSize;
allocateInGuardedPool(reinterpret_cast<void *>(getPageAddr(Ptr, PageSize)),
roundUpTo(Size, PageSize));
allocateInGuardedPool(
reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
roundUpTo(Size, PageSize));
Meta->RecordAllocation(Ptr, Size);
Meta->RecordAllocation(UserPtr, Size);
Meta->AllocationTrace.RecordBacktrace(Backtrace);
return reinterpret_cast<void *>(Ptr);
return reinterpret_cast<void *>(UserPtr);
}
void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
@ -250,7 +298,7 @@ size_t GuardedPoolAllocator::getSize(const void *Ptr) {
ScopedLock L(PoolMutex);
AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
return Meta->Size;
return Meta->RequestedSize;
}
AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {

View File

@ -19,6 +19,7 @@
#include <stddef.h>
#include <stdint.h>
// IWYU pragma: no_include <__stddef_max_align_t.h>
namespace gwp_asan {
// This class is the primary implementation of the allocator portion of GWP-
@ -93,10 +94,13 @@ public:
return State.pointerIsMine(Ptr);
}
// Allocate memory in a guarded slot, and return a pointer to the new
// allocation. Returns nullptr if the pool is empty, the requested size is too
// large for this pool to handle, or the requested size is zero.
void *allocate(size_t Size);
// Allocate memory in a guarded slot, with the specified `Alignment`. Returns
// nullptr if the pool is empty, if the alignnment is not a power of two, or
// if the size/alignment makes the allocation too large for this pool to
// handle. By default, uses strong alignment (i.e. `max_align_t`), see
// http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for discussion of
// alignment issues in the standard.
void *allocate(size_t Size, size_t Alignment = alignof(max_align_t));
// Deallocate memory in a guarded slot. The provided pointer must have been
// allocated using this pool. This will set the guarded slot as inaccessible.
@ -111,6 +115,18 @@ public:
// Returns a pointer to the AllocatorState region.
const AllocatorState *getAllocatorState() const { return &State; }
// Exposed as protected for testing.
protected:
// Returns the actual allocation size required to service an allocation with
// the provided Size and Alignment.
static size_t getRequiredBackingSize(size_t Size, size_t Alignment,
size_t PageSize);
// Returns the provided pointer that meets the specified alignment, depending
// on whether it's left or right aligned.
static uintptr_t getAlignedPtr(uintptr_t Ptr, size_t Alignment,
bool IsRightAligned);
private:
// Name of actively-occupied slot mappings.
static constexpr const char *kGwpAsanAliveSlotName = "GWP-ASan Alive Slot";

View File

@ -23,16 +23,6 @@ GWP_ASAN_OPTION(bool, Enabled, GWP_ASAN_DEFAULT_ENABLED,
"Is GWP-ASan enabled? Defaults to " GWP_ASAN_STRINGIFY(
GWP_ASAN_DEFAULT_ENABLED) ".")
GWP_ASAN_OPTION(
bool, PerfectlyRightAlign, false,
"When allocations are right-aligned, should we perfectly align them up to "
"the page boundary? By default (false), we round up allocation size to the "
"nearest power of two (1, 2, 4, 8, 16) up to a maximum of 16-byte "
"alignment for performance reasons. For Bionic, we use 8-byte alignment by "
"default. Setting this to true can find single byte buffer-overflows for "
"multibyte allocations at the cost of performance, and may be incompatible "
"with some architectures.")
GWP_ASAN_OPTION(int, MaxSimultaneousAllocations, 16,
"Number of simultaneously-guarded allocations available in the "
"pool. Defaults to 16.")

View File

@ -6,41 +6,119 @@
//
//===----------------------------------------------------------------------===//
#include "gwp_asan/guarded_pool_allocator.h"
#include "gwp_asan/tests/harness.h"
#include "gwp_asan/utilities.h"
#include <vector>
TEST(AlignmentTest, PowerOfTwo) {
std::vector<std::pair<size_t, size_t>> AskedSizeToAlignedSize = {
{1, 1}, {2, 2}, {3, 4}, {4, 4}, {5, 8}, {7, 8},
{8, 8}, {9, 16}, {15, 16}, {16, 16}, {17, 32}, {31, 32},
{32, 32}, {33, 48}, {4095, 4096}, {4096, 4096},
};
for (const auto &KV : AskedSizeToAlignedSize) {
EXPECT_EQ(KV.second,
gwp_asan::rightAlignedAllocationSize(
KV.first, gwp_asan::AlignmentStrategy::POWER_OF_TWO));
class AlignmentTestGPA : public gwp_asan::GuardedPoolAllocator {
public:
static size_t getRequiredBackingSize(size_t Size, size_t Alignment,
size_t PageSize) {
return GuardedPoolAllocator::getRequiredBackingSize(Size, Alignment,
PageSize);
}
static uintptr_t getAlignedPtr(uintptr_t Ptr, size_t Alignment,
bool IsRightAligned) {
return GuardedPoolAllocator::getAlignedPtr(Ptr, Alignment, IsRightAligned);
}
};
// Global assumptions for these tests:
// 1. Page size is 0x1000.
// 2. All tests assume a slot is multipage, between 0x4000 - 0x8000. While we
// don't use multipage slots right now, this tests more boundary conditions
// and allows us to add this feature at a later date without rewriting the
// alignment functionality.
// These aren't actual requirements of the allocator - but just simplifies the
// numerics of the testing.
TEST(AlignmentTest, LeftAlignedAllocs) {
// Alignment < Page Size.
EXPECT_EQ(0x4000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x4000, /* Alignment */ 0x1,
/* IsRightAligned */ false));
// Alignment == Page Size.
EXPECT_EQ(0x4000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x4000, /* Alignment */ 0x1000,
/* IsRightAligned */ false));
// Alignment > Page Size.
EXPECT_EQ(0x4000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x4000, /* Alignment */ 0x4000,
/* IsRightAligned */ false));
}
TEST(AlignmentTest, AlignBionic) {
std::vector<std::pair<size_t, size_t>> AskedSizeToAlignedSize = {
{1, 8}, {2, 8}, {3, 8}, {4, 8}, {5, 8}, {7, 8},
{8, 8}, {9, 16}, {15, 16}, {16, 16}, {17, 24}, {31, 32},
{32, 32}, {33, 40}, {4095, 4096}, {4096, 4096},
};
TEST(AlignmentTest, SingleByteAllocs) {
// Alignment < Page Size.
EXPECT_EQ(0x1,
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1, /* Alignment */ 0x1, /* PageSize */ 0x1000));
EXPECT_EQ(0x7fff, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x1,
/* IsRightAligned */ true));
for (const auto &KV : AskedSizeToAlignedSize) {
EXPECT_EQ(KV.second, gwp_asan::rightAlignedAllocationSize(
KV.first, gwp_asan::AlignmentStrategy::BIONIC));
}
// Alignment == Page Size.
EXPECT_EQ(0x1,
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1, /* Alignment */ 0x1000, /* PageSize */ 0x1000));
EXPECT_EQ(0x7000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x1000,
/* IsRightAligned */ true));
// Alignment > Page Size.
EXPECT_EQ(0x3001,
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1, /* Alignment */ 0x4000, /* PageSize */ 0x1000));
EXPECT_EQ(0x4000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x4000,
/* IsRightAligned */ true));
}
TEST(AlignmentTest, PerfectAlignment) {
for (size_t i = 1; i <= 4096; ++i) {
EXPECT_EQ(i, gwp_asan::rightAlignedAllocationSize(
i, gwp_asan::AlignmentStrategy::PERFECT));
}
TEST(AlignmentTest, PageSizedAllocs) {
// Alignment < Page Size.
EXPECT_EQ(0x1000,
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1000, /* Alignment */ 0x1, /* PageSize */ 0x1000));
EXPECT_EQ(0x7000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x1,
/* IsRightAligned */ true));
// Alignment == Page Size.
EXPECT_EQ(0x1000, AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1000, /* Alignment */ 0x1000,
/* PageSize */ 0x1000));
EXPECT_EQ(0x7000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x1000,
/* IsRightAligned */ true));
// Alignment > Page Size.
EXPECT_EQ(0x4000, AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1000, /* Alignment */ 0x4000,
/* PageSize */ 0x1000));
EXPECT_EQ(0x4000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x4000,
/* IsRightAligned */ true));
}
TEST(AlignmentTest, MoreThanPageAllocs) {
// Alignment < Page Size.
EXPECT_EQ(0x2fff,
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x2fff, /* Alignment */ 0x1, /* PageSize */ 0x1000));
EXPECT_EQ(0x5001, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x1,
/* IsRightAligned */ true));
// Alignment == Page Size.
EXPECT_EQ(0x2fff, AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x2fff, /* Alignment */ 0x1000,
/* PageSize */ 0x1000));
EXPECT_EQ(0x5000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x1000,
/* IsRightAligned */ true));
// Alignment > Page Size.
EXPECT_EQ(0x5fff, AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x2fff, /* Alignment */ 0x4000,
/* PageSize */ 0x1000));
EXPECT_EQ(0x4000, AlignmentTestGPA::getAlignedPtr(
/* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x4000,
/* IsRightAligned */ true));
}

View File

@ -39,6 +39,37 @@ TEST_F(CustomGuardedPoolAllocator, SizedAllocations) {
TEST_F(DefaultGuardedPoolAllocator, TooLargeAllocation) {
EXPECT_EQ(nullptr,
GPA.allocate(GPA.getAllocatorState()->maximumAllocationSize() + 1));
EXPECT_EQ(nullptr, GPA.allocate(SIZE_MAX, 0));
EXPECT_EQ(nullptr, GPA.allocate(SIZE_MAX, 1));
EXPECT_EQ(nullptr, GPA.allocate(0, SIZE_MAX / 2));
EXPECT_EQ(nullptr, GPA.allocate(1, SIZE_MAX / 2));
EXPECT_EQ(nullptr, GPA.allocate(SIZE_MAX, SIZE_MAX / 2));
}
TEST_F(DefaultGuardedPoolAllocator, ZeroSizeAndAlignmentAllocations) {
void *P;
EXPECT_NE(nullptr, (P = GPA.allocate(0, 0)));
GPA.deallocate(P);
EXPECT_NE(nullptr, (P = GPA.allocate(1, 0)));
GPA.deallocate(P);
EXPECT_NE(nullptr, (P = GPA.allocate(0, 1)));
GPA.deallocate(P);
}
TEST_F(DefaultGuardedPoolAllocator, NonPowerOfTwoAlignment) {
EXPECT_EQ(nullptr, GPA.allocate(0, 3));
EXPECT_EQ(nullptr, GPA.allocate(1, 3));
EXPECT_EQ(nullptr, GPA.allocate(0, SIZE_MAX));
EXPECT_EQ(nullptr, GPA.allocate(1, SIZE_MAX));
}
// Added multi-page slots? You'll need to expand this test.
TEST_F(DefaultGuardedPoolAllocator, TooBigForSinglePageSlots) {
EXPECT_EQ(nullptr, GPA.allocate(0x1001, 0));
EXPECT_EQ(nullptr, GPA.allocate(0x1001, 1));
EXPECT_EQ(nullptr, GPA.allocate(0x1001, 0x1000));
EXPECT_EQ(nullptr, GPA.allocate(1, 0x2000));
EXPECT_EQ(nullptr, GPA.allocate(0, 0x2000));
}
TEST_F(CustomGuardedPoolAllocator, AllocAllSlots) {

View File

@ -29,7 +29,7 @@ protected:
size_t Slot = State.getNearestSlot(Addr);
Metadata[Slot].Addr = Addr;
Metadata[Slot].Size = Size;
Metadata[Slot].RequestedSize = Size;
Metadata[Slot].IsDeallocated = IsDeallocated;
Metadata[Slot].AllocationTrace.ThreadID = 123;
Metadata[Slot].DeallocationTrace.ThreadID = 321;
@ -80,7 +80,8 @@ protected:
__gwp_asan_get_metadata(&State, Metadata, ErrorPtr);
EXPECT_NE(nullptr, Meta);
EXPECT_EQ(Metadata[Index].Addr, __gwp_asan_get_allocation_address(Meta));
EXPECT_EQ(Metadata[Index].Size, __gwp_asan_get_allocation_size(Meta));
EXPECT_EQ(Metadata[Index].RequestedSize,
__gwp_asan_get_allocation_size(Meta));
EXPECT_EQ(Metadata[Index].AllocationTrace.ThreadID,
__gwp_asan_get_allocation_thread_id(Meta));

View File

@ -1,63 +0,0 @@
//===-- utilities.cpp -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "gwp_asan/utilities.h"
#include <assert.h>
namespace gwp_asan {
// See `bionic/tests/malloc_test.cpp` in the Android source for documentation
// regarding their alignment guarantees. We always round up to the closest
// 8-byte window. As GWP-ASan's malloc(X) can always get exactly an X-sized
// allocation, an allocation that rounds up to 16-bytes will always be given a
// 16-byte aligned allocation.
static size_t alignBionic(size_t RealAllocationSize) {
if (RealAllocationSize % 8 == 0)
return RealAllocationSize;
return RealAllocationSize + 8 - (RealAllocationSize % 8);
}
static size_t alignPowerOfTwo(size_t RealAllocationSize) {
if (RealAllocationSize <= 2)
return RealAllocationSize;
if (RealAllocationSize <= 4)
return 4;
if (RealAllocationSize <= 8)
return 8;
if (RealAllocationSize % 16 == 0)
return RealAllocationSize;
return RealAllocationSize + 16 - (RealAllocationSize % 16);
}
#ifdef __BIONIC__
static constexpr AlignmentStrategy PlatformDefaultAlignment =
AlignmentStrategy::BIONIC;
#else // __BIONIC__
static constexpr AlignmentStrategy PlatformDefaultAlignment =
AlignmentStrategy::POWER_OF_TWO;
#endif // __BIONIC__
size_t rightAlignedAllocationSize(size_t RealAllocationSize,
AlignmentStrategy Align) {
assert(RealAllocationSize > 0);
if (Align == AlignmentStrategy::DEFAULT)
Align = PlatformDefaultAlignment;
switch (Align) {
case AlignmentStrategy::BIONIC:
return alignBionic(RealAllocationSize);
case AlignmentStrategy::POWER_OF_TWO:
return alignPowerOfTwo(RealAllocationSize);
case AlignmentStrategy::PERFECT:
return RealAllocationSize;
case AlignmentStrategy::DEFAULT:
__builtin_unreachable();
}
__builtin_unreachable();
}
} // namespace gwp_asan

View File

@ -23,19 +23,6 @@ GWP_ASAN_ALWAYS_INLINE void Check(bool Condition, const char *Message) {
return;
die(Message);
}
enum class AlignmentStrategy {
// Default => POWER_OF_TWO on most platforms, BIONIC for Android Bionic.
DEFAULT,
POWER_OF_TWO,
BIONIC,
PERFECT,
};
// Returns the real size of a right-aligned allocation.
size_t rightAlignedAllocationSize(
size_t RealAllocationSize,
AlignmentStrategy Align = AlignmentStrategy::DEFAULT);
} // namespace gwp_asan
#endif // GWP_ASAN_UTILITIES_H_