Merge branch 'main' of github.com:apple/foundationdb into getsizetenant

This commit is contained in:
Ankita Kejriwal 2022-10-11 15:03:42 -07:00
commit 7abe785669
19 changed files with 218 additions and 134 deletions

View File

@ -875,6 +875,16 @@ int workerProcessMain(Arguments const& args, int worker_id, shared_memory::Acces
}
}
if (args.disable_client_bypass) {
err = network::setOptionNothrow(FDB_NET_OPTION_DISABLE_CLIENT_BYPASS);
if (err) {
logr.error("network::setOption (FDB_NET_OPTION_DISABLE_CLIENT_BYPASS): {}",
args.disable_client_bypass,
err.what());
return -1;
}
}
/* Network thread must be setup before doing anything */
logr.debug("network::setup()");
network::setup();
@ -1005,6 +1015,7 @@ int initArguments(Arguments& args) {
args.txnspec.ops[i][OP_COUNT] = 0;
}
args.client_threads_per_version = 0;
args.disable_client_bypass = false;
args.disable_ryw = 0;
args.json_output_path[0] = '\0';
args.stats_export_path[0] = '\0';
@ -1248,6 +1259,7 @@ int parseArguments(int argc, char* argv[], Arguments& args) {
{ "txntagging_prefix", required_argument, NULL, ARG_TXNTAGGINGPREFIX },
{ "version", no_argument, NULL, ARG_VERSION },
{ "client_threads_per_version", required_argument, NULL, ARG_CLIENT_THREADS_PER_VERSION },
{ "disable_client_bypass", no_argument, NULL, ARG_DISABLE_CLIENT_BYPASS },
{ "disable_ryw", no_argument, NULL, ARG_DISABLE_RYW },
{ "json_report", optional_argument, NULL, ARG_JSON_REPORT },
{ "bg_file_path", required_argument, NULL, ARG_BG_FILE_PATH },
@ -1446,6 +1458,9 @@ int parseArguments(int argc, char* argv[], Arguments& args) {
case ARG_CLIENT_THREADS_PER_VERSION:
args.client_threads_per_version = atoi(optarg);
break;
case ARG_DISABLE_CLIENT_BYPASS:
args.disable_client_bypass = true;
break;
case ARG_DISABLE_RYW:
args.disable_ryw = 1;
break;

View File

@ -75,6 +75,7 @@ enum ArgKind {
ARG_STREAMING_MODE,
ARG_DISABLE_RYW,
ARG_CLIENT_THREADS_PER_VERSION,
ARG_DISABLE_CLIENT_BYPASS,
ARG_JSON_REPORT,
ARG_BG_FILE_PATH, // if blob granule files are stored locally, mako will read and materialize them if this is set
ARG_EXPORT_PATH,
@ -169,6 +170,7 @@ struct Arguments {
char txntagging_prefix[TAGPREFIXLENGTH_MAX];
FDBStreamingMode streaming_mode;
int64_t client_threads_per_version;
bool disable_client_bypass;
int disable_ryw;
char json_output_path[PATH_MAX];
bool bg_materialize_files;

View File

@ -291,6 +291,19 @@ else()
# for more information.
#add_compile_options(-fno-builtin-memcpy)
if (USE_LIBCXX)
# Make sure that libc++ can be found be the platform's loader, so that thing's like cmake's "try_run" work.
find_library(LIBCXX_SO_PATH c++ /usr/local/lib)
if (LIBCXX_SO_PATH)
get_filename_component(LIBCXX_SO_DIR ${LIBCXX_SO_PATH} DIRECTORY)
if (APPLE)
set(ENV{DYLD_LIBRARY_PATH} "$ENV{DYLD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
else()
set(ENV{LD_LIBRARY_PATH} "$ENV{LD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
endif()
endif()
endif()
if (CLANG OR ICX)
if (APPLE OR USE_LIBCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
@ -298,19 +311,6 @@ else()
if (STATIC_LINK_LIBCXX)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic")
else()
# Make sure that libc++ can be found be the platform's loader, so that thing's like cmake's "try_run" work.
find_library(LIBCXX_SO_PATH c++ /usr/local/lib)
if (LIBCXX_SO_PATH)
get_filename_component(LIBCXX_SO_DIR ${LIBCXX_SO_PATH} DIRECTORY)
if (APPLE)
set(ENV{DYLD_LIBRARY_PATH} "$ENV{DYLD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
elseif(WIN32)
set(ENV{PATH} "$ENV{PATH};${LIBCXX_SO_DIR}")
else()
set(ENV{LD_LIBRARY_PATH} "$ENV{LD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
endif()
endif()
endif()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -Wl,-build-id=sha1")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -stdlib=libc++ -Wl,-build-id=sha1")

View File

@ -442,7 +442,7 @@ void printStatus(StatusObjectReader statusObj,
outputString += "\n Blob granules - enabled";
}
outputString += "\n Encryption at-rest - ";
outputString += "\n Encryption at-rest - ";
if (statusObjConfig.get("encryption_at_rest_mode", strVal)) {
outputString += strVal;
} else {

View File

@ -25,6 +25,8 @@
#include "flow/FileIdentifier.h"
using BlobMetadataDomainId = int64_t;
using BlobMetadataDomainNameRef = StringRef;
using BlobMetadataDomainName = Standalone<BlobMetadataDomainNameRef>;
/*
* There are 3 cases for blob metadata.
@ -38,26 +40,31 @@ using BlobMetadataDomainId = int64_t;
struct BlobMetadataDetailsRef {
constexpr static FileIdentifier file_identifier = 6685526;
BlobMetadataDomainId domainId;
BlobMetadataDomainNameRef domainName;
Optional<StringRef> base;
VectorRef<StringRef> partitions;
BlobMetadataDetailsRef() {}
BlobMetadataDetailsRef(Arena& arena, const BlobMetadataDetailsRef& from)
: domainId(from.domainId), partitions(arena, from.partitions) {
: domainId(from.domainId), domainName(arena, from.domainName), partitions(arena, from.partitions) {
if (from.base.present()) {
base = StringRef(arena, from.base.get());
}
}
explicit BlobMetadataDetailsRef(BlobMetadataDomainId domainId,
BlobMetadataDomainNameRef domainName,
Optional<StringRef> base,
VectorRef<StringRef> partitions)
: domainId(domainId), base(base), partitions(partitions) {}
: domainId(domainId), domainName(domainName), base(base), partitions(partitions) {}
int expectedSize() const { return sizeof(BlobMetadataDetailsRef) + partitions.expectedSize(); }
int expectedSize() const {
return sizeof(BlobMetadataDetailsRef) + domainName.size() + (base.present() ? base.get().size() : 0) +
partitions.expectedSize();
}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, domainId, base, partitions);
serializer(ar, domainId, domainName, base, partitions);
}
};

View File

@ -197,6 +197,7 @@ struct EKPGetLatestBaseCipherKeysReply {
}
};
// TODO: also used for blob metadata, fix name
struct EKPGetLatestCipherKeysRequestInfo {
constexpr static FileIdentifier file_identifier = 2180516;
// Encryption domain identifier
@ -206,7 +207,7 @@ struct EKPGetLatestCipherKeysRequestInfo {
EncryptCipherDomainNameRef domainName;
EKPGetLatestCipherKeysRequestInfo() : domainId(INVALID_ENCRYPT_DOMAIN_ID) {}
EKPGetLatestCipherKeysRequestInfo(const EncryptCipherDomainId dId, StringRef name, Arena& arena)
explicit EKPGetLatestCipherKeysRequestInfo(Arena& arena, const EncryptCipherDomainId dId, StringRef name)
: domainId(dId), domainName(StringRef(arena, name)) {}
bool operator==(const EKPGetLatestCipherKeysRequestInfo& info) const {
@ -261,16 +262,15 @@ struct EKPGetLatestBlobMetadataReply {
struct EKPGetLatestBlobMetadataRequest {
constexpr static FileIdentifier file_identifier = 3821549;
std::vector<BlobMetadataDomainId> domainIds;
Standalone<VectorRef<EKPGetLatestCipherKeysRequestInfo>> domainInfos;
Optional<UID> debugId;
ReplyPromise<EKPGetLatestBlobMetadataReply> reply;
EKPGetLatestBlobMetadataRequest() {}
explicit EKPGetLatestBlobMetadataRequest(const std::vector<BlobMetadataDomainId>& ids) : domainIds(ids) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, domainIds, debugId, reply);
serializer(ar, domainInfos, debugId, reply);
}
};

View File

@ -108,7 +108,7 @@ Future<std::unordered_map<EncryptCipherDomainId, Reference<BlobCipherKey>>> getL
cipherKeys[domain.first] = cachedCipherKey;
} else {
request.encryptDomainInfos.emplace_back(
domain.first /*domainId*/, domain.second /*domainName*/, request.arena);
request.arena, domain.first /*domainId*/, domain.second /*domainName*/);
}
}
if (request.encryptDomainInfos.empty()) {

View File

@ -43,16 +43,42 @@ struct ProcessClass {
DataDistributorClass,
CoordinatorClass,
RatekeeperClass,
ConsistencyScanClass,
StorageCacheClass,
BackupClass,
GrvProxyClass,
BlobManagerClass,
BlobWorkerClass,
EncryptKeyProxyClass,
ConsistencyScanClass,
InvalidClass = -1
};
// class is serialized by enum value, so it's important not to change the
// enum value of a class. New classes should only be added to the end.
static_assert(ProcessClass::UnsetClass == 0);
static_assert(ProcessClass::StorageClass == 1);
static_assert(ProcessClass::TransactionClass == 2);
static_assert(ProcessClass::ResolutionClass == 3);
static_assert(ProcessClass::TesterClass == 4);
static_assert(ProcessClass::CommitProxyClass == 5);
static_assert(ProcessClass::MasterClass == 6);
static_assert(ProcessClass::StatelessClass == 7);
static_assert(ProcessClass::LogClass == 8);
static_assert(ProcessClass::ClusterControllerClass == 9);
static_assert(ProcessClass::LogRouterClass == 10);
static_assert(ProcessClass::FastRestoreClass == 11);
static_assert(ProcessClass::DataDistributorClass == 12);
static_assert(ProcessClass::CoordinatorClass == 13);
static_assert(ProcessClass::RatekeeperClass == 14);
static_assert(ProcessClass::StorageCacheClass == 15);
static_assert(ProcessClass::BackupClass == 16);
static_assert(ProcessClass::GrvProxyClass == 17);
static_assert(ProcessClass::BlobManagerClass == 18);
static_assert(ProcessClass::BlobWorkerClass == 19);
static_assert(ProcessClass::EncryptKeyProxyClass == 20);
static_assert(ProcessClass::ConsistencyScanClass == 21);
static_assert(ProcessClass::InvalidClass == -1);
enum Fitness {
BestFit,
GoodFit,
@ -86,6 +112,12 @@ struct ProcessClass {
int16_t _class;
int16_t _source;
// source is serialized by enum value, so it's important not to change the
// enum value of a source. New sources should only be added to the end.
static_assert(ProcessClass::CommandLineSource == 0);
static_assert(ProcessClass::AutoSource == 1);
static_assert(ProcessClass::DBSource == 2);
public:
ProcessClass() : _class(UnsetClass), _source(CommandLineSource) {}
ProcessClass(ClassType type, ClassSource source) : _class(type), _source(source) {}

View File

@ -451,12 +451,14 @@ TEST_CASE("/blobgranule/server/common/granulesummary") {
}
// FIXME: if credentials can expire, refresh periodically
ACTOR Future<Void> loadBlobMetadataForTenants(BGTenantMap* self, std::vector<TenantMapEntry> tenantMapEntries) {
ACTOR Future<Void> loadBlobMetadataForTenants(
BGTenantMap* self,
std::vector<std::pair<BlobMetadataDomainId, BlobMetadataDomainName>> tenantsToLoad) {
ASSERT(SERVER_KNOBS->BG_METADATA_SOURCE == "tenant");
ASSERT(!tenantMapEntries.empty());
state std::vector<BlobMetadataDomainId> domainIds;
for (auto& entry : tenantMapEntries) {
domainIds.push_back(entry.id);
ASSERT(!tenantsToLoad.empty());
state EKPGetLatestBlobMetadataRequest req;
for (auto& tenant : tenantsToLoad) {
req.domainInfos.emplace_back_deep(req.domainInfos.arena(), tenant.first, StringRef(tenant.second));
}
// FIXME: if one tenant gets an error, don't kill whole process
@ -464,8 +466,7 @@ ACTOR Future<Void> loadBlobMetadataForTenants(BGTenantMap* self, std::vector<Ten
loop {
Future<EKPGetLatestBlobMetadataReply> requestFuture;
if (self->dbInfo.isValid() && self->dbInfo->get().encryptKeyProxy.present()) {
EKPGetLatestBlobMetadataRequest req;
req.domainIds = domainIds;
req.reply.reset();
requestFuture =
brokenPromiseToNever(self->dbInfo->get().encryptKeyProxy.get().getLatestBlobMetadata.getReply(req));
} else {
@ -473,7 +474,7 @@ ACTOR Future<Void> loadBlobMetadataForTenants(BGTenantMap* self, std::vector<Ten
}
choose {
when(EKPGetLatestBlobMetadataReply rep = wait(requestFuture)) {
ASSERT(rep.blobMetadataDetails.size() == domainIds.size());
ASSERT(rep.blobMetadataDetails.size() == req.domainInfos.size());
// not guaranteed to be in same order in the request as the response
for (auto& metadata : rep.blobMetadataDetails) {
auto info = self->tenantInfoById.find(metadata.domainId);
@ -493,7 +494,7 @@ ACTOR Future<Void> loadBlobMetadataForTenants(BGTenantMap* self, std::vector<Ten
// list of tenants that may or may not already exist
void BGTenantMap::addTenants(std::vector<std::pair<TenantName, TenantMapEntry>> tenants) {
std::vector<TenantMapEntry> tenantsToLoad;
std::vector<std::pair<BlobMetadataDomainId, BlobMetadataDomainName>> tenantsToLoad;
for (auto entry : tenants) {
if (tenantInfoById.insert({ entry.second.id, entry.second }).second) {
auto r = makeReference<GranuleTenantData>(entry.first, entry.second);
@ -501,7 +502,7 @@ void BGTenantMap::addTenants(std::vector<std::pair<TenantName, TenantMapEntry>>
if (SERVER_KNOBS->BG_METADATA_SOURCE != "tenant") {
r->bstoreLoaded.send(Void());
} else {
tenantsToLoad.push_back(entry.second);
tenantsToLoad.push_back({ entry.second.id, entry.first });
}
}
}

View File

@ -690,44 +690,43 @@ ACTOR Future<Void> getLatestBlobMetadata(Reference<EncryptKeyProxyData> ekpProxy
}
// Dedup the requested domainIds.
std::unordered_set<BlobMetadataDomainId> dedupedDomainIds;
for (auto id : req.domainIds) {
dedupedDomainIds.emplace(id);
std::unordered_map<BlobMetadataDomainId, BlobMetadataDomainName> dedupedDomainInfos;
for (auto info : req.domainInfos) {
dedupedDomainInfos.insert({ info.domainId, info.domainName });
}
if (dbgTrace.present()) {
dbgTrace.get().detail("NKeys", dedupedDomainIds.size());
for (BlobMetadataDomainId id : dedupedDomainIds) {
dbgTrace.get().detail("NKeys", dedupedDomainInfos.size());
for (auto& info : dedupedDomainInfos) {
// log domainids queried
dbgTrace.get().detail("BMQ" + std::to_string(id), "");
dbgTrace.get().detail("BMQ" + std::to_string(info.first), "");
}
}
// First, check if the requested information is already cached by the server.
// Ensure the cached information is within SERVER_KNOBS->BLOB_METADATA_CACHE_TTL time window.
std::vector<BlobMetadataDomainId> lookupDomains;
for (BlobMetadataDomainId id : dedupedDomainIds) {
const auto itr = ekpProxyData->blobMetadataDomainIdCache.find(id);
state KmsConnBlobMetadataReq kmsReq;
kmsReq.debugId = req.debugId;
for (auto& info : dedupedDomainInfos) {
const auto itr = ekpProxyData->blobMetadataDomainIdCache.find(info.first);
if (itr != ekpProxyData->blobMetadataDomainIdCache.end() && itr->second.isValid()) {
metadataDetails.arena().dependsOn(itr->second.metadataDetails.arena());
metadataDetails.push_back(metadataDetails.arena(), itr->second.metadataDetails);
if (dbgTrace.present()) {
dbgTrace.get().detail("BMC" + std::to_string(id), "");
dbgTrace.get().detail("BMC" + std::to_string(info.first), "");
}
++ekpProxyData->blobMetadataCacheHits;
} else {
lookupDomains.emplace_back(id);
++ekpProxyData->blobMetadataCacheMisses;
kmsReq.domainInfos.emplace_back(kmsReq.domainInfos.arena(), info.first, info.second);
}
}
ekpProxyData->baseCipherDomainIdCacheHits += metadataDetails.size();
ekpProxyData->baseCipherDomainIdCacheMisses += lookupDomains.size();
ekpProxyData->blobMetadataCacheHits += metadataDetails.size();
if (!lookupDomains.empty()) {
if (!kmsReq.domainInfos.empty()) {
ekpProxyData->blobMetadataCacheMisses += kmsReq.domainInfos.size();
try {
KmsConnBlobMetadataReq kmsReq(lookupDomains, req.debugId);
state double startTime = now();
KmsConnBlobMetadataRep kmsRep = wait(kmsConnectorInf.blobMetadataReq.getReply(kmsReq));
ekpProxyData->kmsBlobMetadataReqLatency.addMeasurement(now() - startTime);
@ -755,7 +754,6 @@ ACTOR Future<Void> getLatestBlobMetadata(Reference<EncryptKeyProxyData> ekpProxy
}
req.reply.send(EKPGetLatestBlobMetadataReply(metadataDetails));
return Void();
}
@ -771,10 +769,11 @@ ACTOR Future<Void> refreshBlobMetadataCore(Reference<EncryptKeyProxyData> ekpPro
try {
KmsConnBlobMetadataReq req;
req.debugId = debugId;
req.domainIds.reserve(ekpProxyData->blobMetadataDomainIdCache.size());
req.domainInfos.reserve(req.domainInfos.arena(), ekpProxyData->blobMetadataDomainIdCache.size());
// TODO add refresh + expire timestamp and filter to only ones that need refreshing
for (auto& item : ekpProxyData->blobMetadataDomainIdCache) {
req.domainIds.emplace_back(item.first);
req.domainInfos.emplace_back(req.domainInfos.arena(), item.first, item.second.metadataDetails.domainName);
}
state double startTime = now();
KmsConnBlobMetadataRep rep = wait(kmsConnectorInf.blobMetadataReq.getReply(req));

View File

@ -185,10 +185,13 @@ ACTOR Future<Void> ekLookupByDomainIds(Reference<SimKmsConnectorContext> ctx,
return Void();
}
static Standalone<BlobMetadataDetailsRef> createBlobMetadata(BlobMetadataDomainId domainId) {
// TODO: switch this to use bg_url instead of hardcoding file://fdbblob, so it works as FDBPerfKmsConnector
// FIXME: make this (more) deterministic outside of simulation for FDBPerfKmsConnector
static Standalone<BlobMetadataDetailsRef> createBlobMetadata(BlobMetadataDomainId domainId,
BlobMetadataDomainName domainName) {
Standalone<BlobMetadataDetailsRef> metadata;
metadata.domainId = domainId;
metadata.domainName = domainName;
// 0 == no partition, 1 == suffix partitioned, 2 == storage location partitioned
int type = deterministicRandom()->randomInt(0, 3);
int partitionCount = (type == 0) ? 0 : deterministicRandom()->randomInt(2, 12);
@ -234,17 +237,19 @@ ACTOR Future<Void> blobMetadataLookup(KmsConnectorInterface interf, KmsConnBlobM
dbgDIdTrace.get().detail("DbgId", req.debugId.get());
}
for (BlobMetadataDomainId domainId : req.domainIds) {
auto it = simBlobMetadataStore.find(domainId);
for (auto const& domainInfo : req.domainInfos) {
auto it = simBlobMetadataStore.find(domainInfo.domainId);
if (it == simBlobMetadataStore.end()) {
// construct new blob metadata
it = simBlobMetadataStore.insert({ domainId, createBlobMetadata(domainId) }).first;
it = simBlobMetadataStore
.insert({ domainInfo.domainId, createBlobMetadata(domainInfo.domainId, domainInfo.domainName) })
.first;
}
rep.metadataDetails.arena().dependsOn(it->second.arena());
rep.metadataDetails.push_back(rep.metadataDetails.arena(), it->second);
}
wait(delayJittered(1.0)); // simulate network delay
wait(delay(deterministicRandom()->random01())); // simulate network delay
req.reply.send(rep);

View File

@ -232,17 +232,15 @@ struct KmsConnBlobMetadataRep {
struct KmsConnBlobMetadataReq {
constexpr static FileIdentifier file_identifier = 3913147;
std::vector<BlobMetadataDomainId> domainIds;
Standalone<VectorRef<KmsConnLookupDomainIdsReqInfoRef>> domainInfos;
Optional<UID> debugId;
ReplyPromise<KmsConnBlobMetadataRep> reply;
KmsConnBlobMetadataReq() {}
explicit KmsConnBlobMetadataReq(const std::vector<BlobMetadataDomainId>& ids, Optional<UID> dbgId)
: domainIds(ids), debugId(dbgId) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, domainIds, debugId, reply);
serializer(ar, domainInfos, debugId, reply);
}
};

View File

@ -71,7 +71,7 @@ struct EncryptKeyProxyTestWorkload : TestWorkload {
for (int i = 0; i < self->numDomains / 2; i++) {
const EncryptCipherDomainId domainId = self->minDomainId + i;
self->domainInfos.emplace_back(
EKPGetLatestCipherKeysRequestInfo(domainId, StringRef(std::to_string(domainId)), self->arena));
EKPGetLatestCipherKeysRequestInfo(self->arena, domainId, StringRef(std::to_string(domainId))));
}
state int nAttempts = 0;
@ -127,14 +127,14 @@ struct EncryptKeyProxyTestWorkload : TestWorkload {
for (int i = 0; i < expectedHits; i++) {
const EncryptCipherDomainId domainId = self->minDomainId + i;
self->domainInfos.emplace_back(
EKPGetLatestCipherKeysRequestInfo(domainId, StringRef(std::to_string(domainId)), self->arena));
EKPGetLatestCipherKeysRequestInfo(self->arena, domainId, StringRef(std::to_string(domainId))));
}
expectedMisses = deterministicRandom()->randomInt(1, self->numDomains / 2);
for (int i = 0; i < expectedMisses; i++) {
const EncryptCipherDomainId domainId = self->minDomainId + i + self->numDomains / 2 + 1;
self->domainInfos.emplace_back(
EKPGetLatestCipherKeysRequestInfo(domainId, StringRef(std::to_string(domainId)), self->arena));
EKPGetLatestCipherKeysRequestInfo(self->arena, domainId, StringRef(std::to_string(domainId))));
}
state int nAttempts = 0;
@ -191,7 +191,7 @@ struct EncryptKeyProxyTestWorkload : TestWorkload {
for (int i = 0; i < self->numDomains; i++) {
const EncryptCipherDomainId domainId = self->minDomainId + i;
self->domainInfos.emplace_back(
EKPGetLatestCipherKeysRequestInfo(domainId, StringRef(std::to_string(domainId)), self->arena));
EKPGetLatestCipherKeysRequestInfo(self->arena, domainId, StringRef(std::to_string(domainId))));
}
EKPGetLatestBaseCipherKeysRequest req;

View File

@ -278,8 +278,8 @@ if(WITH_PYTHON)
TEST_FILES restarting/from_7.0.0/UpgradeAndBackupRestore-1.toml
restarting/from_7.0.0/UpgradeAndBackupRestore-2.toml)
add_fdb_test(
TEST_FILES restarting/to_7.1.0/CycleTestRestart-1.txt
restarting/to_7.1.0/CycleTestRestart-2.txt)
TEST_FILES restarting/to_7.1.0/CycleTestRestart-1.toml
restarting/to_7.1.0/CycleTestRestart-2.toml)
add_fdb_test(
TEST_FILES restarting/from_7.1.0/SnapTestAttrition-1.txt
restarting/from_7.1.0/SnapTestAttrition-2.txt)

View File

@ -5,6 +5,10 @@ disableHostname=true
disableEncryption=true
storageEngineExcludeTypes=[3,4]
[[knobs]]
# This can be removed once the lower bound of this downgrade test is a version that understands the new protocol
shard_encode_location_metadata = false
[[test]]
testTitle = 'CloggedConfigureDatabaseTest'
clearAfterTest = false

View File

@ -0,0 +1,49 @@
[configuration]
storageEngineExcludeTypes = [3]
maxTLogVersion = 6
disableTss = true
disableHostname = true
disableEncryption = true
[[knobs]]
# This can be removed once the lower bound of this downgrade test is a version that understands the new protocol
shard_encode_location_metadata = false
[[test]]
testTitle = 'Clogged'
clearAfterTest = false
[[test.workload]]
testName = 'Cycle'
transactionsPerSecond = 500.0
nodeCount = 2500
testDuration = 10.0
expectedRate = 0
[[test.workload]]
testName = 'RandomClogging'
testDuration = 10.0
[[test.workload]]
testName = 'Rollback'
meanDelay = 10.0
testDuration = 10.0
[[test.workload]]
testName = 'Attrition'
machinesToKill = 10
machinesToLeave = 3
reboot = true
testDuration = 10.0
[[test.workload]]
testName = 'Attrition'
machinesToKill = 10
machinesToLeave = 3
reboot = true
testDuration = 10.0
[[test.workload]]
testName = 'SaveAndKill'
restartInfoLocation = 'simfdb/restartInfo.ini'
testDuration = 10.0

View File

@ -1,36 +0,0 @@
storageEngineExcludeTypes=-1,-2,3
maxTLogVersion=6
disableTss=true
disableHostname=true
disableEncryption=true
testTitle=Clogged
clearAfterTest=false
testName=Cycle
transactionsPerSecond=500.0
nodeCount=2500
testDuration=10.0
expectedRate=0
testName=RandomClogging
testDuration=10.0
testName=Rollback
meanDelay=10.0
testDuration=10.0
testName=Attrition
machinesToKill=10
machinesToLeave=3
reboot=true
testDuration=10.0
testName=Attrition
machinesToKill=10
machinesToLeave=3
reboot=true
testDuration=10.0
testName=SaveAndKill
restartInfoLocation=simfdb/restartInfo.ini
testDuration=10.0

View File

@ -0,0 +1,36 @@
[configuration]
maxTLogVersion = 6
disableTss = true
[[test]]
testTitle = 'Clogged'
runSetup = false
[[test.workload]]
testName = 'Cycle'
transactionsPerSecond = 2500.0
nodeCount = 2500
testDuration = 10.0
expectedRate = 0
[[test.workload]]
testName = 'RandomClogging'
testDuration = 10.0
[[test.workload]]
testName = 'Rollback'
meanDelay = 10.0
testDuration = 10.0
[[test.workload]]
testName = 'Attrition'
machinesToKill = 10
machinesToLeave = 3
reboot = true
testDuration = 10.0
[[test.workload]]
testName = 'Attrition'
machinesToKill = 10
machinesToLeave = 3
reboot = true

View File

@ -1,28 +0,0 @@
storageEngineExcludeTypes=-1,-2
maxTLogVersion=6
disableTss=true
testTitle=Clogged
runSetup=false
testName=Cycle
transactionsPerSecond=2500.0
nodeCount=2500
testDuration=10.0
expectedRate=0
testName=RandomClogging
testDuration=10.0
testName=Rollback
meanDelay=10.0
testDuration=10.0
testName=Attrition
machinesToKill=10
machinesToLeave=3
reboot=true
testDuration=10.0
testName=Attrition
machinesToKill=10
machinesToLeave=3
reboot=true