Revert "[scudo] Use require_constant_initialization"

This reverts commit 7ad4dee3e7.
This commit is contained in:
Vitaly Buka 2021-04-29 09:55:28 -07:00
parent 1089158c5a
commit ea7618684c
16 changed files with 58 additions and 69 deletions

View File

@ -944,8 +944,8 @@ private:
static const sptr MemTagAllocationTraceIndex = -2;
static const sptr MemTagAllocationTidIndex = -1;
u32 Cookie = 0;
u32 QuarantineMaxChunkSize = 0;
u32 Cookie;
u32 QuarantineMaxChunkSize;
GlobalStats Stats;
PrimaryT Primary;
@ -977,7 +977,7 @@ private:
#endif
Entry Entries[NumEntries];
};
AllocationRingBuffer RingBuffer = {};
AllocationRingBuffer RingBuffer;
// The following might get optimized out by the compiler.
NOINLINE void performSanityChecks() {

View File

@ -48,15 +48,6 @@
#define USED __attribute__((used))
#define NOEXCEPT noexcept
#if defined(__has_attribute)
#if __has_attribute(require_constant_initialization)
#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION \
__attribute__((__require_constant_initialization__))
#else
#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION
#endif
#endif
namespace scudo {
typedef unsigned long uptr;

View File

@ -57,9 +57,9 @@ template <class T> struct IntrusiveList {
void checkConsistency() const;
protected:
uptr Size = 0;
T *First = nullptr;
T *Last = nullptr;
uptr Size;
T *First;
T *Last;
};
template <class T> void IntrusiveList<T>::checkConsistency() const {

View File

@ -138,9 +138,9 @@ private:
uptr ClassSize;
CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
};
PerClass PerClassArray[NumClasses] = {};
PerClass PerClassArray[NumClasses];
LocalStats Stats;
SizeClassAllocator *Allocator = nullptr;
SizeClassAllocator *Allocator;
ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
if (LIKELY(C->MaxCount))

View File

@ -48,9 +48,9 @@ private:
static constexpr u8 NumberOfYields = 8U;
#if SCUDO_LINUX
atomic_u32 M = {};
atomic_u32 M;
#elif SCUDO_FUCHSIA
sync_mutex_t M = {};
sync_mutex_t M;
#endif
void lockSlow();

View File

@ -44,8 +44,9 @@ template <typename Config> bool useMemoryTagging(Options Options) {
}
struct AtomicOptions {
atomic_u32 Val = {};
atomic_u32 Val;
public:
Options load() const { return Options{atomic_load_relaxed(&Val)}; }
void clear(OptionBit Opt) {

View File

@ -489,17 +489,17 @@ private:
return TotalReleasedBytes;
}
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
SizeClassInfo SizeClassInfoArray[NumClasses];
// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
ByteMap PossibleRegions = {};
atomic_s32 ReleaseToOsIntervalMs = {};
ByteMap PossibleRegions;
atomic_s32 ReleaseToOsIntervalMs;
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
HybridMutex RegionsStashMutex;
uptr NumberOfStashedRegions = 0;
uptr RegionsStash[MaxStashedRegions] = {};
uptr NumberOfStashedRegions;
uptr RegionsStash[MaxStashedRegions];
};
} // namespace scudo

View File

@ -285,24 +285,24 @@ private:
struct UnpaddedRegionInfo {
HybridMutex Mutex;
SinglyLinkedList<TransferBatch> FreeList;
uptr RegionBeg = 0;
RegionStats Stats = {};
u32 RandState = 0;
uptr MappedUser = 0; // Bytes mapped for user memory.
uptr AllocatedUser = 0; // Bytes allocated for user memory.
MapPlatformData Data = {};
ReleaseToOsInfo ReleaseInfo = {};
bool Exhausted = false;
uptr RegionBeg;
RegionStats Stats;
u32 RandState;
uptr MappedUser; // Bytes mapped for user memory.
uptr AllocatedUser; // Bytes allocated for user memory.
MapPlatformData Data;
ReleaseToOsInfo ReleaseInfo;
bool Exhausted;
};
struct RegionInfo : UnpaddedRegionInfo {
char Padding[SCUDO_CACHE_LINE_SIZE -
(sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {};
(sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)];
};
static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr PrimaryBase = 0;
MapPlatformData Data = {};
atomic_s32 ReleaseToOsIntervalMs = {};
uptr PrimaryBase;
MapPlatformData Data;
atomic_s32 ReleaseToOsIntervalMs;
alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
RegionInfo *getRegionInfo(uptr ClassId) {

View File

@ -161,7 +161,7 @@ public:
private:
SinglyLinkedList<QuarantineBatch> List;
atomic_uptr Size = {};
atomic_uptr Size;
void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
@ -246,9 +246,9 @@ private:
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
CacheT Cache;
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
atomic_uptr MinSize = {};
atomic_uptr MaxSize = {};
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
atomic_uptr MinSize;
atomic_uptr MaxSize;
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
void NOINLINE recycle(uptr MinSize, Callback Cb) {
CacheT Tmp;

View File

@ -377,16 +377,16 @@ private:
}
HybridMutex Mutex;
u32 EntriesCount = 0;
u32 QuarantinePos = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
u64 OldestTime = 0;
u32 IsFullEvents = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
u32 EntriesCount;
u32 QuarantinePos;
atomic_u32 MaxEntriesCount;
atomic_uptr MaxEntrySize;
u64 OldestTime;
u32 IsFullEvents;
atomic_s32 ReleaseToOsIntervalMs;
CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize] = {};
CachedBlock Entries[Config::SecondaryCacheEntriesArraySize];
CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize];
};
template <typename Config> class MapAllocator {
@ -451,11 +451,11 @@ private:
HybridMutex Mutex;
DoublyLinkedList<LargeBlock::Header> InUseBlocks;
uptr AllocatedBytes = 0;
uptr FreedBytes = 0;
uptr LargestSize = 0;
u32 NumberOfAllocs = 0;
u32 NumberOfFrees = 0;
uptr AllocatedBytes;
uptr FreedBytes;
uptr LargestSize;
u32 NumberOfAllocs;
u32 NumberOfFrees;
LocalStats Stats;
};

View File

@ -40,7 +40,7 @@ public:
class StackDepot {
HybridMutex RingEndMu;
u32 RingEnd = 0;
u32 RingEnd;
// This data structure stores a stack trace for each allocation and
// deallocation when stack trace recording is enabled, that may be looked up
@ -70,7 +70,7 @@ class StackDepot {
#endif
static const uptr TabSize = 1 << TabBits;
static const uptr TabMask = TabSize - 1;
atomic_u32 Tab[TabSize] = {};
atomic_u32 Tab[TabSize];
#ifdef SCUDO_FUZZ
static const uptr RingBits = 4;
@ -79,7 +79,7 @@ class StackDepot {
#endif
static const uptr RingSize = 1 << RingBits;
static const uptr RingMask = RingSize - 1;
atomic_u64 Ring[RingSize] = {};
atomic_u64 Ring[RingSize];
public:
// Insert hash of the stack trace [Begin, End) into the stack depot, and

View File

@ -46,11 +46,11 @@ public:
uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
LocalStats *Next = nullptr;
LocalStats *Prev = nullptr;
LocalStats *Next;
LocalStats *Prev;
private:
atomic_uptr StatsArray[StatCount] = {};
atomic_uptr StatsArray[StatCount];
};
// Global stats, used for aggregation and querying.

View File

@ -26,7 +26,7 @@ namespace scudo {
template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
typename Allocator::CacheT Cache;
typename Allocator::QuarantineCacheT QuarantineCache;
u8 DestructorIterations = 0;
u8 DestructorIterations;
void initLinkerInitialized(Allocator *Instance) {
Instance->initCache(&Cache);
@ -59,7 +59,7 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
private:
HybridMutex Mutex;
atomic_uptr Precedence = {};
atomic_uptr Precedence;
};
} // namespace scudo

View File

@ -108,9 +108,9 @@ private:
Instance->callPostInitCallback();
}
pthread_key_t PThreadKey = {};
bool Initialized = false;
atomic_u8 Disabled = {};
pthread_key_t PThreadKey;
bool Initialized;
atomic_u8 Disabled;
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
static thread_local ThreadState State;

View File

@ -26,7 +26,6 @@ extern "C" void SCUDO_PREFIX(malloc_postinit)();
// Export the static allocator so that the C++ wrappers can access it.
// Technically we could have a completely separated heap for C & C++ but in
// reality the amount of cross pollination between the two is staggering.
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR;
#include "wrappers_c.inc"

View File

@ -23,7 +23,6 @@
#define SCUDO_ALLOCATOR Allocator
extern "C" void SCUDO_PREFIX(malloc_postinit)();
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
SCUDO_ALLOCATOR;
@ -37,7 +36,6 @@ static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
#define SCUDO_ALLOCATOR SvelteAllocator
extern "C" void SCUDO_PREFIX(malloc_postinit)();
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
static scudo::Allocator<scudo::AndroidSvelteConfig,
SCUDO_PREFIX(malloc_postinit)>
SCUDO_ALLOCATOR;