scudo: Replace a couple of macros with their expansions.

The macros INLINE and COMPILER_CHECK always expand to the same thing (inline
and static_assert respectively). Both expansions are standards compliant C++
and are used consistently in the rest of LLVM, so let's improve consistency
with the rest of LLVM by replacing them with the expansions.

Differential Revision: https://reviews.llvm.org/D70793
This commit is contained in:
Peter Collingbourne 2019-11-27 09:35:47 -08:00
parent f30fe16d49
commit 6fd6cfdf72
16 changed files with 64 additions and 67 deletions

View File

@ -21,12 +21,12 @@ enum memory_order {
memory_order_acq_rel = 4,
memory_order_seq_cst = 5
};
COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
static_assert(memory_order_release == __ATOMIC_RELEASE, "");
static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
struct atomic_u8 {
typedef u8 Type;
@ -60,7 +60,7 @@ struct atomic_uptr {
};
template <typename T>
INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
typename T::Type V;
__atomic_load(&A->ValDoNotUse, &V, MO);
@ -68,29 +68,29 @@ INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
}
template <typename T>
INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
__atomic_store(&A->ValDoNotUse, &V, MO);
}
INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
template <typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
}
template <typename T>
INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
}
template <typename T>
INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
typename T::Type R;
@ -99,7 +99,7 @@ INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
}
template <typename T>
INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
typename T::Type Xchg,
memory_order MO) {
return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
@ -107,7 +107,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
}
template <typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
typename T::Type Xchg,
memory_order MO) {
return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
@ -117,17 +117,17 @@ INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
// Clutter-reducing helpers.
template <typename T>
INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
inline typename T::Type atomic_load_relaxed(const volatile T *A) {
return atomic_load(A, memory_order_relaxed);
}
template <typename T>
INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
atomic_store(A, V, memory_order_relaxed);
}
template <typename T>
INLINE typename T::Type atomic_compare_exchange(volatile T *A,
inline typename T::Type atomic_compare_exchange(volatile T *A,
typename T::Type Cmp,
typename T::Type Xchg) {
atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);

View File

@ -37,7 +37,7 @@ enum class Checksum : u8 {
// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
// odds with CRC32, but enough for our needs.
INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) {
inline u16 computeBSDChecksum(u16 Sum, uptr Data) {
for (u8 I = 0; I < sizeof(Data); I++) {
Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
Sum = static_cast<u16>(Sum + (Data & 0xff));

View File

@ -20,7 +20,7 @@ namespace scudo {
extern Checksum HashAlgorithm;
INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
// If the hardware CRC32 feature is defined here, it was enabled everywhere,
// as opposed to only for crc32_hw.cpp. This means that other hardware
// specific instructions were likely emitted at other places, and as a result
@ -71,7 +71,7 @@ struct UnpackedHeader {
uptr Checksum : 16;
};
typedef atomic_u64 AtomicPackedHeader;
COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
static_assert(sizeof(UnpackedHeader) == sizeof(PackedHeader), "");
// Those constants are required to silence some -Werror=conversion errors when
// assigning values to the related bitfield variables.
@ -86,12 +86,12 @@ constexpr uptr getHeaderSize() {
return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
}
INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
getHeaderSize());
}
INLINE
inline
const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
return reinterpret_cast<const AtomicPackedHeader *>(
reinterpret_cast<uptr>(Ptr) - getHeaderSize());
@ -100,7 +100,7 @@ const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
// We do not need a cryptographically strong hash for the checksum, but a CRC
// type function that can alert us in the event a header is invalid or
// corrupted. Ideally slightly better than a simple xor of all fields.
static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
static inline u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
UnpackedHeader *Header) {
UnpackedHeader ZeroChecksumHeader = *Header;
ZeroChecksumHeader.Checksum = 0;
@ -110,7 +110,7 @@ static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
ARRAY_SIZE(HeaderHolder));
}
INLINE void storeHeader(u32 Cookie, void *Ptr,
inline void storeHeader(u32 Cookie, void *Ptr,
UnpackedHeader *NewUnpackedHeader) {
NewUnpackedHeader->Checksum =
computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
@ -118,7 +118,7 @@ INLINE void storeHeader(u32 Cookie, void *Ptr,
atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
}
INLINE
inline
void loadHeader(u32 Cookie, const void *Ptr,
UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
@ -128,7 +128,7 @@ void loadHeader(u32 Cookie, const void *Ptr,
reportHeaderCorruption(const_cast<void *>(Ptr));
}
INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
inline void compareExchangeHeader(u32 Cookie, void *Ptr,
UnpackedHeader *NewUnpackedHeader,
UnpackedHeader *OldUnpackedHeader) {
NewUnpackedHeader->Checksum =
@ -141,7 +141,7 @@ INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
reportHeaderRace(Ptr);
}
INLINE
inline
bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);

View File

@ -184,7 +184,7 @@ public:
((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
// Takes care of extravagantly large sizes as well as integer overflows.
COMPILER_CHECK(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment);
static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
if (Options.MayReturnNull)
return nullptr;
@ -523,7 +523,7 @@ private:
reportSanityCheckError("class ID");
}
static INLINE void *getBlockBegin(const void *Ptr,
static inline void *getBlockBegin(const void *Ptr,
Chunk::UnpackedHeader *Header) {
return reinterpret_cast<void *>(
reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
@ -531,7 +531,7 @@ private:
}
// Return the size of a chunk as requested during its allocation.
INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (LIKELY(Header->ClassId))
return SizeOrUnusedBytes;

View File

@ -19,22 +19,22 @@
namespace scudo {
template <class Dest, class Source> INLINE Dest bit_cast(const Source &S) {
COMPILER_CHECK(sizeof(Dest) == sizeof(Source));
template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
static_assert(sizeof(Dest) == sizeof(Source), "");
Dest D;
memcpy(&D, &S, sizeof(D));
return D;
}
INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) {
inline constexpr uptr roundUpTo(uptr X, uptr Boundary) {
return (X + Boundary - 1) & ~(Boundary - 1);
}
INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) {
inline constexpr uptr roundDownTo(uptr X, uptr Boundary) {
return X & ~(Boundary - 1);
}
INLINE constexpr bool isAligned(uptr X, uptr Alignment) {
inline constexpr bool isAligned(uptr X, uptr Alignment) {
return (X & (Alignment - 1)) == 0;
}
@ -48,14 +48,14 @@ template <class T> void Swap(T &A, T &B) {
B = Tmp;
}
INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
INLINE uptr getMostSignificantSetBitIndex(uptr X) {
inline uptr getMostSignificantSetBitIndex(uptr X) {
DCHECK_NE(X, 0U);
return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
}
INLINE uptr roundUpToPowerOfTwo(uptr Size) {
inline uptr roundUpToPowerOfTwo(uptr Size) {
DCHECK(Size);
if (isPowerOfTwo(Size))
return Size;
@ -65,17 +65,17 @@ INLINE uptr roundUpToPowerOfTwo(uptr Size) {
return 1UL << (Up + 1);
}
INLINE uptr getLeastSignificantSetBitIndex(uptr X) {
inline uptr getLeastSignificantSetBitIndex(uptr X) {
DCHECK_NE(X, 0U);
return static_cast<uptr>(__builtin_ctzl(X));
}
INLINE uptr getLog2(uptr X) {
inline uptr getLog2(uptr X) {
DCHECK(isPowerOfTwo(X));
return getLeastSignificantSetBitIndex(X);
}
INLINE u32 getRandomU32(u32 *State) {
inline u32 getRandomU32(u32 *State) {
// ANSI C linear congruential PRNG (16-bit output).
// return (*State = *State * 1103515245 + 12345) >> 16;
// XorShift (32-bit output).
@ -85,11 +85,11 @@ INLINE u32 getRandomU32(u32 *State) {
return *State;
}
INLINE u32 getRandomModN(u32 *State, u32 N) {
inline u32 getRandomModN(u32 *State, u32 N) {
return getRandomU32(State) % N; // [0, N)
}
template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
if (N <= 1)
return;
u32 State = *RandState;
@ -100,7 +100,7 @@ template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
// Hardware specific inlinable functions.
INLINE void yieldProcessor(u8 Count) {
inline void yieldProcessor(u8 Count) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__("" ::: "memory");
for (u8 I = 0; I < Count; I++)
@ -117,7 +117,7 @@ INLINE void yieldProcessor(u8 Count) {
extern uptr PageSizeCached;
uptr getPageSizeSlow();
INLINE uptr getPageSizeCached() {
inline uptr getPageSizeCached() {
// Bionic uses a hardcoded value.
if (SCUDO_ANDROID)
return 4096U;

View File

@ -108,7 +108,7 @@ void FlagParser::parseString(const char *S) {
Pos = OldPos;
}
INLINE bool parseBool(const char *Value, bool *b) {
inline bool parseBool(const char *Value, bool *b) {
if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
strncmp(Value, "false", 5) == 0) {
*b = false;

View File

@ -29,7 +29,7 @@ void NORETURN die() { __builtin_trap(); }
// We zero-initialize the Extra parameter of map(), make sure this is consistent
// with ZX_HANDLE_INVALID.
COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
static_assert(ZX_HANDLE_INVALID == 0, "");
static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
// Only scenario so far.
@ -171,7 +171,7 @@ u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, "");
if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
return false;
_zx_cprng_draw(Buffer, Length);

View File

@ -30,7 +30,6 @@
#define INTERFACE __attribute__((visibility("default")))
#define WEAK __attribute__((weak))
#define INLINE inline
#define ALWAYS_INLINE inline __attribute__((always_inline))
#define ALIAS(X) __attribute__((alias(X)))
// Please only use the ALIGNED macro before the type. Using ALIGNED after the
@ -126,8 +125,6 @@ void NORETURN reportCheckFailed(const char *File, int Line,
die(); \
} while (0)
#define COMPILER_CHECK(Pred) static_assert(Pred, "")
} // namespace scudo
#endif // SCUDO_INTERNAL_DEFS_H_

View File

@ -42,7 +42,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
public:
typedef SizeClassMapT SizeClassMap;
// Regions should be large enough to hold the largest Block.
COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
@ -204,7 +204,7 @@ private:
uptr AllocatedUser;
ReleaseToOsInfo ReleaseInfo;
};
COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr computeRegionId(uptr Mem) {
const uptr Id = Mem >> RegionSizeLog;

View File

@ -215,7 +215,7 @@ private:
MapPlatformData Data;
ReleaseToOsInfo ReleaseInfo;
};
COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0);
static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr PrimaryBase;
RegionInfo *RegionInfoArray;

View File

@ -59,7 +59,7 @@ struct QuarantineBatch {
void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
};
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1U << 13)); // 8Kb.
static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
// Per-thread cache of memory blocks.
template <typename Callback> class QuarantineCache {

View File

@ -34,7 +34,7 @@ private:
ScopedString Message;
};
INLINE void NORETURN trap() { __builtin_trap(); }
inline void NORETURN trap() { __builtin_trap(); }
// This could potentially be called recursively if a CHECK fails in the reports.
void NORETURN reportCheckFailed(const char *File, int Line,

View File

@ -52,7 +52,7 @@ template <uptr MaxFreeListSize = 32U> class MapAllocator {
public:
// Ensure the freelist is disabled on Fuchsia, since it doesn't support
// releasing Secondary blocks yet.
COMPILER_CHECK(!SCUDO_FUCHSIA || MaxFreeListSize == 0U);
static_assert(!SCUDO_FUCHSIA || MaxFreeListSize == 0U, "");
void initLinkerInitialized(GlobalStats *S) {
Stats.initLinkerInitialized();

View File

@ -49,7 +49,7 @@ public:
static const uptr MaxSize = 1UL << MaxSizeLog;
static const uptr NumClasses =
MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
COMPILER_CHECK(NumClasses <= 256);
static_assert(NumClasses <= 256, "");
static const uptr LargestClassId = NumClasses - 1;
static const uptr BatchClassId = 0;

View File

@ -38,7 +38,7 @@ template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
void commitBack(Allocator *Instance) { Instance->commitBack(this); }
INLINE bool tryLock() {
inline bool tryLock() {
if (Mutex.tryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@ -49,12 +49,12 @@ template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
return false;
}
INLINE void lock() {
inline void lock() {
atomic_store_relaxed(&Precedence, 0);
Mutex.lock();
}
INLINE void unlock() { Mutex.unlock(); }
INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
inline void unlock() { Mutex.unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
private:
HybridMutex Mutex;

View File

@ -20,7 +20,7 @@
namespace scudo {
// A common errno setting logic shared by almost all Scudo C wrappers.
INLINE void *setErrnoOnNull(void *Ptr) {
inline void *setErrnoOnNull(void *Ptr) {
if (UNLIKELY(!Ptr))
errno = ENOMEM;
return Ptr;
@ -30,14 +30,14 @@ INLINE void *setErrnoOnNull(void *Ptr) {
// Checks aligned_alloc() parameters, verifies that the alignment is a power of
// two and that the size is a multiple of alignment.
INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
inline bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
return Alignment == 0 || !isPowerOfTwo(Alignment) ||
!isAligned(Size, Alignment);
}
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
inline bool checkPosixMemalignAlignment(uptr Alignment) {
return Alignment == 0 || !isPowerOfTwo(Alignment) ||
!isAligned(Alignment, sizeof(void *));
}
@ -45,7 +45,7 @@ INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
// costly division.
INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
#if __has_builtin(__builtin_umull_overflow)
return __builtin_umull_overflow(Size, N, Product);
#else
@ -58,7 +58,7 @@ INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of PageSize.
INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
return roundUpTo(Size, PageSize) < Size;
}