Fix flaky ArenaBlock wipe test (#9960)

This commit is contained in:
Junhyun Shim 2023-04-14 20:40:56 +02:00 committed by GitHub
parent 2959d07797
commit eb0bc70ea8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 125 additions and 57 deletions

View File

@ -21,6 +21,7 @@
#include "flow/Arena.h"
#include "flow/UnitTest.h"
#include "flow/ScopeExit.h"
#include "flow/config.h"
@ -343,6 +344,22 @@ void* ArenaBlock::allocate(Reference<ArenaBlock>& self, int bytes, IsSecureMem i
return result;
}
force_inline ArenaBlock* newBigArenaBlock(int size) {
if (keepalive_allocator::isActive()) [[unlikely]] {
return (ArenaBlock*)keepalive_allocator::allocate(size);
} else {
return (ArenaBlock*)new uint8_t[size];
}
}
force_inline void deleteBigArenaBlock(ArenaBlock* block) {
if (keepalive_allocator::isActive()) [[unlikely]] {
return keepalive_allocator::invalidate(block);
} else {
return delete[] reinterpret_cast<uint8_t*>(block);
}
}
// Return an appropriately-sized ArenaBlock to store the given data
ArenaBlock* ArenaBlock::create(int dataSize, Reference<ArenaBlock>& next) {
ArenaBlock* b;
@ -385,23 +402,23 @@ ArenaBlock* ArenaBlock::create(int dataSize, Reference<ArenaBlock>& next) {
b->bigSize = 256;
INSTRUMENT_ALLOCATE("Arena256");
} else if (reqSize <= 512) {
b = (ArenaBlock*)new uint8_t[512];
b = newBigArenaBlock(512);
b->bigSize = 512;
INSTRUMENT_ALLOCATE("Arena512");
} else if (reqSize <= 1024) {
b = (ArenaBlock*)new uint8_t[1024];
b = newBigArenaBlock(1024);
b->bigSize = 1024;
INSTRUMENT_ALLOCATE("Arena1024");
} else if (reqSize <= 2048) {
b = (ArenaBlock*)new uint8_t[2048];
b = newBigArenaBlock(2048);
b->bigSize = 2048;
INSTRUMENT_ALLOCATE("Arena2048");
} else if (reqSize <= 4096) {
b = (ArenaBlock*)new uint8_t[4096];
b = newBigArenaBlock(4096);
b->bigSize = 4096;
INSTRUMENT_ALLOCATE("Arena4096");
} else {
b = (ArenaBlock*)new uint8_t[8192];
b = newBigArenaBlock(8192);
b->bigSize = 8192;
INSTRUMENT_ALLOCATE("Arena8192");
}
@ -413,7 +430,7 @@ ArenaBlock* ArenaBlock::create(int dataSize, Reference<ArenaBlock>& next) {
#ifdef ALLOC_INSTRUMENTATION
allocInstr["ArenaHugeKB"].alloc((reqSize + 1023) >> 10);
#endif
b = (ArenaBlock*)new uint8_t[reqSize];
b = newBigArenaBlock(reqSize);
b->tinySize = b->tinyUsed = NOT_TINY;
b->bigSize = reqSize;
b->totalSizeEstimate = b->bigSize;
@ -505,26 +522,26 @@ void ArenaBlock::destroyLeaf() {
FastAllocator<256>::release(this);
INSTRUMENT_RELEASE("Arena256");
} else if (bigSize <= 512) {
delete[] reinterpret_cast<uint8_t*>(this);
deleteBigArenaBlock(this);
INSTRUMENT_RELEASE("Arena512");
} else if (bigSize <= 1024) {
delete[] reinterpret_cast<uint8_t*>(this);
deleteBigArenaBlock(this);
INSTRUMENT_RELEASE("Arena1024");
} else if (bigSize <= 2048) {
delete[] reinterpret_cast<uint8_t*>(this);
deleteBigArenaBlock(this);
INSTRUMENT_RELEASE("Arena2048");
} else if (bigSize <= 4096) {
delete[] reinterpret_cast<uint8_t*>(this);
deleteBigArenaBlock(this);
INSTRUMENT_RELEASE("Arena4096");
} else if (bigSize <= 8192) {
delete[] reinterpret_cast<uint8_t*>(this);
deleteBigArenaBlock(this);
INSTRUMENT_RELEASE("Arena8192");
} else {
#ifdef ALLOC_INSTRUMENTATION
allocInstr["ArenaHugeKB"].dealloc((bigSize + 1023) >> 10);
#endif
g_hugeArenaMemory.fetch_sub(bigSize);
delete[] reinterpret_cast<uint8_t*>(this);
deleteBigArenaBlock(this);
}
}
}
@ -996,15 +1013,7 @@ TEST_CASE("/flow/Arena/OptionalMap") {
return Void();
}
// TODO: remove the following `#if 0 ... #endif` once we come up with a way of reliably, temporarily swapping
// Arena alloc/free implementation for the duration of a test run.
// See https://github.com/apple/foundationdb/pull/9865/files#r1155735635.
#if 0
TEST_CASE("/flow/Arena/Secure") {
#if !defined(USE_SANITIZER) && !defined(VALGRIND)
// Note: Assumptions underlying this unit test are speculative.
// Disable for a build configuration or entirely if deemed flaky.
// As of writing, below equivalency of (buf == newBuf) holds except for ASAN builds.
auto& rng = *deterministicRandom();
auto sizes = std::vector<int>{ 1 };
for (auto i = 2; i <= ArenaBlock::LARGE * 2; i *= 2) {
@ -1012,49 +1021,37 @@ TEST_CASE("/flow/Arena/Secure") {
// randomly select one value between this pow2 and the next
sizes.push_back(rng.randomInt(sizes.back() + 1, sizes.back() * 2));
}
auto totalIters = 0;
auto samePtrCount = 0;
// temporarily disable allocation tracing: runs with hugeArenaSample seems to cause test to fail for some reason
g_allocation_tracing_disabled++;
auto reenableAllocTracingOnUnwind = ScopeExit([]() { g_allocation_tracing_disabled--; });
for (auto iter = 0; iter < 100; iter++) {
for (auto len : sizes) {
uint8_t* buf = nullptr;
{
Arena arena;
buf = new (arena, WipeAfterUse{}) uint8_t[len];
auto const allocsPerArena = rng.randomInt(1, 80);
std::vector<std::pair<uint8_t*, int>> buffers;
// below scope object keeps deallocated memory alive to test if wipe-after-use behaves correctly
auto keepaliveScope = keepalive_allocator::ActiveScope{};
{
Arena arena;
for (auto i = 0; i < allocsPerArena; i++) {
auto const len = sizes[rng.randomInt(0, sizes.size())];
auto const buf = new (arena, WipeAfterUse{}) uint8_t[len];
for (auto i = 0; i < len; i++)
buf[i] = rng.randomInt(1, 256);
buffers.push_back(std::make_pair(buf, len));
}
{
Arena arena;
uint8_t* newBuf = nullptr;
if (rng.coinflip()) {
newBuf = new (arena, WipeAfterUse{}) uint8_t[len];
} else {
newBuf = new (arena) uint8_t[len];
}
ASSERT(newBuf == buf);
// there's no hard guarantee about the above equality and the result could vary by platform,
// malloc implementation, and tooling instrumentation (e.g. ASAN, valgrind)
// but it is practically likely because of
// a) how Arena uses (and malloc variants tend to use) thread-local freelists, and
// b) the fact that we earlier allocated the memory blocks in some size sequence,
// freed them in reverse order, and then allocated them again immediately in the same size
// sequence.
// in the same vein, it is speculative but likely that if buf == newBuf,
// the memory backing the address is the same and remained untouched,
// because FDB servers are single-threaded
samePtrCount++;
for (auto i = 0; i < len; i++) {
if (newBuf[i] != 0) {
fmt::print("Non-zero byte found at iter {} size {} offset {}\n", iter + 1, len, i);
ASSERT(false);
}
}
// make sure the buffers have been zeroed out
for (auto const& buffer : buffers) {
auto buf = buffer.first;
auto len = buffer.second;
makeDefined(buf, len);
auto poisonOnUnwind = ScopeExit([buf, len]() { makeNoAccess(buf, len); });
for (auto i = 0; i < len; i++) {
if (buf[i] != 0) {
fmt::print("Non-zero byte found at iter {} size {} offset {}\n", iter + 1, len, i);
ASSERT(false);
}
}
totalIters++;
}
}
fmt::print("Total iterations: {}, # of times check passed: {}\n", totalIters, samePtrCount);
#endif // !defined(USE_SANITIZER) && !defind(VALGRIND)
return Void();
}
#endif // 0

View File

@ -304,8 +304,54 @@ static int64_t getSizeCode(int i) {
}
#endif
namespace keepalive_allocator {
namespace detail {
std::set<void*> g_allocatedSet;
std::set<void*> g_freedSet;
bool g_active = false;
} // namespace detail
ActiveScope::ActiveScope() {
// no nested scopes allowed
ASSERT(!detail::g_active);
ASSERT(detail::g_allocatedSet.empty());
ASSERT(detail::g_freedSet.empty());
detail::g_active = true;
}
ActiveScope::~ActiveScope() {
ASSERT_ABORT(detail::g_active);
ASSERT_ABORT(detail::g_allocatedSet == detail::g_freedSet);
for (auto memory : detail::g_allocatedSet) {
delete[] reinterpret_cast<uint8_t*>(memory);
}
detail::g_allocatedSet.clear();
detail::g_freedSet.clear();
detail::g_active = false;
}
void* allocate(size_t size) {
auto ptr = new uint8_t[size];
auto [_, inserted] = detail::g_allocatedSet.insert(ptr);
ASSERT(inserted); // no duplicates
return ptr;
}
void invalidate(void* ptr) {
ASSERT(detail::g_allocatedSet.contains(ptr));
ASSERT(!detail::g_freedSet.contains(ptr));
detail::g_freedSet.insert(ptr);
}
} // namespace keepalive_allocator
template <int Size>
void* FastAllocator<Size>::allocate() {
if (keepalive_allocator::isActive()) [[unlikely]]
return keepalive_allocator::allocate(Size);
#if defined(USE_GPERFTOOLS) || defined(ADDRESS_SANITIZER)
// Some usages of FastAllocator require 4096 byte alignment.
@ -359,6 +405,8 @@ void* FastAllocator<Size>::allocate() {
template <int Size>
void FastAllocator<Size>::release(void* ptr) {
if (keepalive_allocator::isActive()) [[unlikely]]
return keepalive_allocator::invalidate(ptr);
#if defined(USE_GPERFTOOLS) || defined(ADDRESS_SANITIZER)
return aligned_free(ptr);
@ -620,4 +668,4 @@ TEST_CASE("/jemalloc/4k_aligned_usable_size") {
}
return Void();
}
#endif
#endif

View File

@ -177,6 +177,29 @@ void hugeArenaSample(int size);
void releaseAllThreadMagazines();
int64_t getTotalUnusedAllocatedMemory();
// allow temporary overriding of default allocators used by arena to let memory survive deallocation and test
// correctness of memory policy (e.g. zeroing out sensitive contents after use)
namespace keepalive_allocator {
namespace detail {
extern bool g_active;
} // namespace detail
inline bool isActive() noexcept {
return detail::g_active;
}
class ActiveScope {
public:
ActiveScope();
~ActiveScope();
};
void* allocate(size_t);
void invalidate(void*);
} // namespace keepalive_allocator
inline constexpr int nextFastAllocatedSize(int x) {
assert(x > 0 && x <= 16384);
if (x <= 16)