Merge branch 'main' of github.com:apple/foundationdb into network-disable-bypass

This commit is contained in:
Jon Fu 2022-09-20 09:29:29 -07:00
commit 1f778f9d76
465 changed files with 10565 additions and 6498 deletions

View File

@ -22,6 +22,11 @@ else()
cmake_minimum_required(VERSION 3.13)
endif()
# silence deprecation warnings in newer versions of cmake
if(POLICY CMP0135)
cmake_policy(SET CMP0135 NEW)
endif()
project(foundationdb
VERSION 7.2.0
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."

View File

@ -49,6 +49,17 @@ from bindingtester.known_testers import Tester
import fdb
import fdb.tuple
API_VERSIONS = [
13, 14, 16, 21, 22, 23,
100, 200, 300,
400, 410, 420, 430, 440, 450, 460,
500, 510, 520,
600, 610, 620, 630,
700, 710, 720,
]
fdb.api_version(FDB_API_VERSION)
@ -156,8 +167,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
elif random.random() < 0.7:
api_version = min_version
elif random.random() < 0.9:
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700, 710, 720] if v >= min_version and v <= max_version])
api_version = random.choice([v for v in API_VERSIONS if v >= min_version and v <= max_version])
else:
api_version = random.randint(min_version, max_version)

View File

@ -129,7 +129,7 @@ if(NOT WIN32)
add_library(fdb_cpp INTERFACE test/fdb_api.hpp)
target_sources(fdb_cpp INTERFACE )
target_include_directories(fdb_cpp INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/test)
target_link_libraries(fdb_cpp INTERFACE fmt::fmt)
target_link_libraries(fdb_cpp INTERFACE fdb_c fmt::fmt)
set(API_TESTER_SRCS
test/apitester/fdb_c_api_tester.cpp
@ -199,6 +199,9 @@ if(NOT WIN32)
target_include_directories(fdb_c_api_tester_impl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/ ${CMAKE_SOURCE_DIR}/flow/include ${CMAKE_BINARY_DIR}/flow/include)
target_link_libraries(fdb_c_api_tester_impl PRIVATE fdb_cpp toml11_target Threads::Threads fmt::fmt boost_target)
if (NOT APPLE)
target_link_libraries(fdb_c_api_tester_impl PRIVATE stdc++fs)
endif()
target_link_libraries(fdb_c_api_tester_impl PRIVATE SimpleOpt)
target_include_directories(fdb_c_unit_tests_impl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/)

View File

@ -84,12 +84,12 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_network_set_option(FDBNetworkOption
int value_length);
#if FDB_API_VERSION >= 14
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network(void);
#endif
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network(void);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network(void);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*),
void* hook_parameter);
@ -548,8 +548,8 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_summarize_blob_granules(
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_select_api_version_impl(int runtime_version, int header_version);
DLLEXPORT int fdb_get_max_api_version();
DLLEXPORT const char* fdb_get_client_version();
DLLEXPORT int fdb_get_max_api_version(void);
DLLEXPORT const char* fdb_get_client_version(void);
/* LEGACY API VERSIONS */

View File

@ -41,6 +41,10 @@ ApiWorkload::ApiWorkload(const WorkloadConfig& config) : WorkloadBase(config) {
stopReceived = false;
checkingProgress = false;
apiVersion = config.apiVersion;
for (int i = 0; i < config.numTenants; ++i) {
tenants.push_back(fdb::ByteString(fdb::toBytesRef("tenant" + std::to_string(i))));
}
}
IWorkloadControlIfc* ApiWorkload::getControlIfc() {
@ -107,49 +111,57 @@ void ApiWorkload::randomOperation(TTaskFct cont) {
}
fdb::Key ApiWorkload::randomKeyName() {
return keyPrefix + Random::get().randomStringLowerCase(minKeyLength, maxKeyLength);
return keyPrefix + Random::get().randomByteStringLowerCase(minKeyLength, maxKeyLength);
}
fdb::Value ApiWorkload::randomValue() {
return Random::get().randomStringLowerCase(minValueLength, maxValueLength);
return Random::get().randomByteStringLowerCase(minValueLength, maxValueLength);
}
fdb::Key ApiWorkload::randomNotExistingKey() {
fdb::Key ApiWorkload::randomNotExistingKey(std::optional<int> tenantId) {
while (true) {
fdb::Key key = randomKeyName();
if (!store.exists(key)) {
if (!stores[tenantId].exists(key)) {
return key;
}
}
}
fdb::Key ApiWorkload::randomExistingKey() {
fdb::Key ApiWorkload::randomExistingKey(std::optional<int> tenantId) {
fdb::Key genKey = randomKeyName();
fdb::Key key = store.getKey(genKey, true, 1);
if (key != store.endKey()) {
fdb::Key key = stores[tenantId].getKey(genKey, true, 1);
if (key != stores[tenantId].endKey()) {
return key;
}
key = store.getKey(genKey, true, 0);
if (key != store.startKey()) {
key = stores[tenantId].getKey(genKey, true, 0);
if (key != stores[tenantId].startKey()) {
return key;
}
info("No existing key found, using a new random key.");
return genKey;
}
fdb::Key ApiWorkload::randomKey(double existingKeyRatio) {
fdb::Key ApiWorkload::randomKey(double existingKeyRatio, std::optional<int> tenantId) {
if (Random::get().randomBool(existingKeyRatio)) {
return randomExistingKey();
return randomExistingKey(tenantId);
} else {
return randomNotExistingKey();
return randomNotExistingKey(tenantId);
}
}
void ApiWorkload::populateDataTx(TTaskFct cont) {
std::optional<int> ApiWorkload::randomTenant() {
if (tenants.size() > 0) {
return Random::get().randomInt(0, tenants.size() - 1);
} else {
return {};
}
}
void ApiWorkload::populateDataTx(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = maxKeysPerTransaction;
auto kvPairs = std::make_shared<std::vector<fdb::KeyValue>>();
for (int i = 0; i < numKeys; i++) {
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(), randomValue() });
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(tenantId), randomValue() });
}
execTransaction(
[kvPairs](auto ctx) {
@ -158,12 +170,29 @@ void ApiWorkload::populateDataTx(TTaskFct cont) {
}
ctx->commit();
},
[this, kvPairs, cont]() {
[this, tenantId, kvPairs, cont]() {
for (const fdb::KeyValue& kv : *kvPairs) {
store.set(kv.key, kv.value);
stores[tenantId].set(kv.key, kv.value);
}
schedule(cont);
});
},
getTenant(tenantId));
}
void ApiWorkload::clearTenantData(TTaskFct cont, std::optional<int> tenantId) {
execTransaction(
[this](auto ctx) {
ctx->tx().clearRange(keyPrefix, keyPrefix + fdb::Key(1, '\xff'));
ctx->commit();
},
[this, tenantId, cont]() {
if (tenantId && tenantId.value() < tenants.size() - 1) {
clearTenantData(cont, tenantId.value() + 1);
} else {
schedule(cont);
}
},
getTenant(tenantId));
}
void ApiWorkload::clearData(TTaskFct cont) {
@ -175,20 +204,51 @@ void ApiWorkload::clearData(TTaskFct cont) {
[this, cont]() { schedule(cont); });
}
void ApiWorkload::populateData(TTaskFct cont) {
if (store.size() < initialSize) {
populateDataTx([this, cont]() { populateData(cont); });
} else {
void ApiWorkload::populateTenantData(TTaskFct cont, std::optional<int> tenantId) {
while (stores[tenantId].size() >= initialSize && tenantId && tenantId.value() < tenants.size()) {
++tenantId.value();
}
if (tenantId >= tenants.size() || stores[tenantId].size() >= initialSize) {
info("Data population completed");
schedule(cont);
} else {
populateDataTx([this, cont, tenantId]() { populateTenantData(cont, tenantId); }, tenantId);
}
}
void ApiWorkload::randomInsertOp(TTaskFct cont) {
void ApiWorkload::createTenants(TTaskFct cont) {
execTransaction(
[this](auto ctx) {
auto futures = std::make_shared<std::vector<fdb::Future>>();
for (auto tenant : tenants) {
futures->push_back(fdb::Tenant::getTenant(ctx->tx(), tenant));
}
ctx->continueAfterAll(*futures, [this, ctx, futures]() {
for (int i = 0; i < futures->size(); ++i) {
if (!(*futures)[i].get<fdb::future_var::ValueRef>()) {
fdb::Tenant::createTenant(ctx->tx(), tenants[i]);
}
}
ctx->commit();
});
},
[this, cont]() { schedule(cont); });
}
void ApiWorkload::populateData(TTaskFct cont) {
if (tenants.size() > 0) {
createTenants([this, cont]() { populateTenantData(cont, std::make_optional(0)); });
} else {
populateTenantData(cont, {});
}
}
void ApiWorkload::randomInsertOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto kvPairs = std::make_shared<std::vector<fdb::KeyValue>>();
for (int i = 0; i < numKeys; i++) {
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(), randomValue() });
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(tenantId), randomValue() });
}
execTransaction(
[kvPairs](auto ctx) {
@ -197,19 +257,20 @@ void ApiWorkload::randomInsertOp(TTaskFct cont) {
}
ctx->commit();
},
[this, kvPairs, cont]() {
[this, kvPairs, cont, tenantId]() {
for (const fdb::KeyValue& kv : *kvPairs) {
store.set(kv.key, kv.value);
stores[tenantId].set(kv.key, kv.value);
}
schedule(cont);
});
},
getTenant(tenantId));
}
void ApiWorkload::randomClearOp(TTaskFct cont) {
void ApiWorkload::randomClearOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomExistingKey());
keys->push_back(randomExistingKey(tenantId));
}
execTransaction(
[keys](auto ctx) {
@ -218,15 +279,16 @@ void ApiWorkload::randomClearOp(TTaskFct cont) {
}
ctx->commit();
},
[this, keys, cont]() {
[this, keys, cont, tenantId]() {
for (const auto& key : *keys) {
store.clear(key);
stores[tenantId].clear(key);
}
schedule(cont);
});
},
getTenant(tenantId));
}
void ApiWorkload::randomClearRangeOp(TTaskFct cont) {
void ApiWorkload::randomClearRangeOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -237,10 +299,19 @@ void ApiWorkload::randomClearRangeOp(TTaskFct cont) {
ctx->tx().clearRange(begin, end);
ctx->commit();
},
[this, begin, end, cont]() {
store.clear(begin, end);
[this, begin, end, cont, tenantId]() {
stores[tenantId].clear(begin, end);
schedule(cont);
});
},
getTenant(tenantId));
}
std::optional<fdb::BytesRef> ApiWorkload::getTenant(std::optional<int> tenantId) {
if (tenantId) {
return tenants[*tenantId];
} else {
return {};
}
}
} // namespace FdbApiTester

View File

@ -96,17 +96,23 @@ protected:
// Key prefix
fdb::Key keyPrefix;
// The number of tenants to configure in the cluster
std::vector<fdb::ByteString> tenants;
// In-memory store maintaining expected database state
KeyValueStore store;
std::unordered_map<std::optional<int>, KeyValueStore> stores;
ApiWorkload(const WorkloadConfig& config);
// Methods for generating random keys and values
fdb::Key randomKeyName();
fdb::Value randomValue();
fdb::Key randomNotExistingKey();
fdb::Key randomExistingKey();
fdb::Key randomKey(double existingKeyRatio);
fdb::Key randomNotExistingKey(std::optional<int> tenantId);
fdb::Key randomExistingKey(std::optional<int> tenantId);
fdb::Key randomKey(double existingKeyRatio, std::optional<int> tenantId);
// Chooses a random tenant from the available tenants (or an empty optional if tenants aren't used in the test)
std::optional<int> randomTenant();
// Generate initial random data for the workload
void populateData(TTaskFct cont);
@ -115,12 +121,18 @@ protected:
void clearData(TTaskFct cont);
// common operations
void randomInsertOp(TTaskFct cont);
void randomClearOp(TTaskFct cont);
void randomClearRangeOp(TTaskFct cont);
void randomInsertOp(TTaskFct cont, std::optional<int> tenantId);
void randomClearOp(TTaskFct cont, std::optional<int> tenantId);
void randomClearRangeOp(TTaskFct cont, std::optional<int> tenantId);
std::optional<fdb::BytesRef> getTenant(std::optional<int> tenantId);
private:
void populateDataTx(TTaskFct cont);
void populateDataTx(TTaskFct cont, std::optional<int> tenantId);
void populateTenantData(TTaskFct cont, std::optional<int> tenantId);
void createTenants(TTaskFct cont);
void clearTenantData(TTaskFct cont, std::optional<int> tenantId);
void randomOperations();
};

View File

@ -36,14 +36,23 @@ public:
private:
// FIXME: use other new blob granule apis!
enum OpType { OP_INSERT, OP_CLEAR, OP_CLEAR_RANGE, OP_READ, OP_GET_RANGES, OP_SUMMARIZE, OP_LAST = OP_SUMMARIZE };
enum OpType {
OP_INSERT,
OP_CLEAR,
OP_CLEAR_RANGE,
OP_READ,
OP_GET_GRANULES,
OP_SUMMARIZE,
OP_GET_BLOB_RANGES,
OP_LAST = OP_GET_BLOB_RANGES
};
std::vector<OpType> excludedOpTypes;
// Allow reads at the start to get blob_granule_transaction_too_old if BG data isn't initialized yet
// FIXME: should still guarantee a read succeeds eventually somehow
bool seenReadSuccess = false;
void randomReadOp(TTaskFct cont) {
void randomReadOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -82,9 +91,10 @@ private:
ctx->done();
}
},
[this, begin, end, results, tooOld, cont]() {
[this, begin, end, results, tooOld, cont, tenantId]() {
if (!*tooOld) {
std::vector<fdb::KeyValue> expected = store.getRange(begin, end, store.size(), false);
std::vector<fdb::KeyValue> expected =
stores[tenantId].getRange(begin, end, stores[tenantId].size(), false);
if (results->size() != expected.size()) {
error(fmt::format("randomReadOp result size mismatch. expected: {} actual: {}",
expected.size(),
@ -115,10 +125,11 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void randomGetRangesOp(TTaskFct cont) {
void randomGetGranulesOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -138,41 +149,13 @@ private:
true);
},
[this, begin, end, results, cont]() {
if (seenReadSuccess) {
ASSERT(results->size() > 0);
ASSERT(results->front().beginKey <= begin);
ASSERT(results->back().endKey >= end);
}
for (int i = 0; i < results->size(); i++) {
// no empty or inverted ranges
if ((*results)[i].beginKey >= (*results)[i].endKey) {
error(fmt::format("Empty/inverted range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey < (*results)[i].endKey);
}
for (int i = 1; i < results->size(); i++) {
// ranges contain entire requested key range
if ((*results)[i].beginKey != (*results)[i].endKey) {
error(fmt::format("Non-contiguous range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey == (*results)[i - 1].endKey);
}
this->validateRanges(results, begin, end, seenReadSuccess);
schedule(cont);
});
},
getTenant(tenantId));
}
void randomSummarizeOp(TTaskFct cont) {
void randomSummarizeOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -211,32 +194,95 @@ private:
}
schedule(cont);
});
},
getTenant(tenantId));
}
void validateRanges(std::shared_ptr<std::vector<fdb::KeyRange>> results,
fdb::Key begin,
fdb::Key end,
bool shouldBeRanges) {
if (shouldBeRanges) {
ASSERT(results->size() > 0);
ASSERT(results->front().beginKey <= begin);
ASSERT(results->back().endKey >= end);
}
for (int i = 0; i < results->size(); i++) {
// no empty or inverted ranges
if ((*results)[i].beginKey >= (*results)[i].endKey) {
error(fmt::format("Empty/inverted range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey < (*results)[i].endKey);
}
for (int i = 1; i < results->size(); i++) {
// ranges contain entire requested key range
if ((*results)[i].beginKey != (*results)[i].endKey) {
error(fmt::format("Non-contiguous range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey == (*results)[i - 1].endKey);
}
}
void randomGetBlobRangesOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
auto results = std::make_shared<std::vector<fdb::KeyRange>>();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end, results](auto ctx) {
fdb::Future f = ctx->db().listBlobbifiedRanges(begin, end, 1000).eraseType();
ctx->continueAfter(f, [ctx, f, results]() {
*results = copyKeyRangeArray(f.get<fdb::future_var::KeyRangeRefArray>());
ctx->done();
});
},
[this, begin, end, results, cont]() {
this->validateRanges(results, begin, end, seenReadSuccess);
schedule(cont);
},
/* failOnError = */ false);
}
void randomOperation(TTaskFct cont) {
OpType txType = (store.size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
std::optional<int> tenantId = randomTenant();
OpType txType = (stores[tenantId].size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
while (std::count(excludedOpTypes.begin(), excludedOpTypes.end(), txType)) {
txType = (OpType)Random::get().randomInt(0, OP_LAST);
}
switch (txType) {
case OP_INSERT:
randomInsertOp(cont);
randomInsertOp(cont, tenantId);
break;
case OP_CLEAR:
randomClearOp(cont);
randomClearOp(cont, tenantId);
break;
case OP_CLEAR_RANGE:
randomClearRangeOp(cont);
randomClearRangeOp(cont, tenantId);
break;
case OP_READ:
randomReadOp(cont);
randomReadOp(cont, tenantId);
break;
case OP_GET_RANGES:
randomGetRangesOp(cont);
case OP_GET_GRANULES:
randomGetGranulesOp(cont, tenantId);
break;
case OP_SUMMARIZE:
randomSummarizeOp(cont);
randomSummarizeOp(cont, tenantId);
break;
case OP_GET_BLOB_RANGES:
randomGetBlobRangesOp(cont);
break;
}
}

View File

@ -78,7 +78,7 @@ private:
seenReadSuccess = true;
}
if (err.code() != expectedError) {
info(fmt::format("incorrect error. Expected {}, Got {}", err.code(), expectedError));
info(fmt::format("incorrect error. Expected {}, Got {}", expectedError, err.code()));
if (err.code() == error_code_blob_granule_transaction_too_old) {
ASSERT(!seenReadSuccess);
ctx->done();

View File

@ -31,11 +31,11 @@ private:
enum OpType { OP_CANCEL_GET, OP_CANCEL_AFTER_FIRST_GET, OP_LAST = OP_CANCEL_AFTER_FIRST_GET };
// Start multiple concurrent gets and cancel the transaction
void randomCancelGetTx(TTaskFct cont) {
void randomCancelGetTx(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomKey(readExistingKeysRatio));
keys->push_back(randomKey(readExistingKeysRatio, tenantId));
}
execTransaction(
[keys](auto ctx) {
@ -45,25 +45,26 @@ private:
}
ctx->done();
},
[this, cont]() { schedule(cont); });
[this, cont]() { schedule(cont); },
getTenant(tenantId));
}
// Start multiple concurrent gets and cancel the transaction after the first get returns
void randomCancelAfterFirstResTx(TTaskFct cont) {
void randomCancelAfterFirstResTx(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomKey(readExistingKeysRatio));
keys->push_back(randomKey(readExistingKeysRatio, tenantId));
}
execTransaction(
[this, keys](auto ctx) {
[this, keys, tenantId](auto ctx) {
std::vector<fdb::Future> futures;
for (const auto& key : *keys) {
futures.push_back(ctx->tx().get(key, false).eraseType());
}
for (int i = 0; i < keys->size(); i++) {
fdb::Future f = futures[i];
auto expectedVal = store.get((*keys)[i]);
auto expectedVal = stores[tenantId].get((*keys)[i]);
ctx->continueAfter(f, [expectedVal, f, this, ctx]() {
auto val = f.get<fdb::future_var::ValueRef>();
if (expectedVal != val) {
@ -75,17 +76,20 @@ private:
});
}
},
[this, cont]() { schedule(cont); });
[this, cont]() { schedule(cont); },
getTenant(tenantId));
}
void randomOperation(TTaskFct cont) override {
std::optional<int> tenantId = randomTenant();
OpType txType = (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
case OP_CANCEL_GET:
randomCancelGetTx(cont);
randomCancelGetTx(cont, tenantId);
break;
case OP_CANCEL_AFTER_FIRST_GET:
randomCancelAfterFirstResTx(cont);
randomCancelAfterFirstResTx(cont, tenantId);
break;
}
}

View File

@ -41,11 +41,11 @@ private:
OP_LAST = OP_COMMIT_READ
};
void randomCommitReadOp(TTaskFct cont) {
void randomCommitReadOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto kvPairs = std::make_shared<std::vector<fdb::KeyValue>>();
for (int i = 0; i < numKeys; i++) {
kvPairs->push_back(fdb::KeyValue{ randomKey(readExistingKeysRatio), randomValue() });
kvPairs->push_back(fdb::KeyValue{ randomKey(readExistingKeysRatio, tenantId), randomValue() });
}
execTransaction(
[kvPairs](auto ctx) {
@ -54,9 +54,9 @@ private:
}
ctx->commit();
},
[this, kvPairs, cont]() {
[this, kvPairs, cont, tenantId]() {
for (const fdb::KeyValue& kv : *kvPairs) {
store.set(kv.key, kv.value);
stores[tenantId].set(kv.key, kv.value);
}
auto results = std::make_shared<std::vector<std::optional<fdb::Value>>>();
execTransaction(
@ -78,10 +78,10 @@ private:
ctx->done();
});
},
[this, kvPairs, results, cont]() {
[this, kvPairs, results, cont, tenantId]() {
ASSERT(results->size() == kvPairs->size());
for (int i = 0; i < kvPairs->size(); i++) {
auto expected = store.get((*kvPairs)[i].key);
auto expected = stores[tenantId].get((*kvPairs)[i].key);
auto actual = (*results)[i];
if (actual != expected) {
error(
@ -93,16 +93,18 @@ private:
}
}
schedule(cont);
});
});
},
getTenant(tenantId));
},
getTenant(tenantId));
}
void randomGetOp(TTaskFct cont) {
void randomGetOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
auto results = std::make_shared<std::vector<std::optional<fdb::Value>>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomKey(readExistingKeysRatio));
keys->push_back(randomKey(readExistingKeysRatio, tenantId));
}
execTransaction(
[keys, results](auto ctx) {
@ -119,10 +121,10 @@ private:
ctx->done();
});
},
[this, keys, results, cont]() {
[this, keys, results, cont, tenantId]() {
ASSERT(results->size() == keys->size());
for (int i = 0; i < keys->size(); i++) {
auto expected = store.get((*keys)[i]);
auto expected = stores[tenantId].get((*keys)[i]);
if ((*results)[i] != expected) {
error(fmt::format("randomGetOp mismatch. key: {} expected: {:.80} actual: {:.80}",
fdb::toCharsRef((*keys)[i]),
@ -131,16 +133,17 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void randomGetKeyOp(TTaskFct cont) {
void randomGetKeyOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keysWithSelectors = std::make_shared<std::vector<std::pair<fdb::Key, fdb::KeySelector>>>();
auto results = std::make_shared<std::vector<fdb::Key>>();
keysWithSelectors->reserve(numKeys);
for (int i = 0; i < numKeys; i++) {
auto key = randomKey(readExistingKeysRatio);
auto key = randomKey(readExistingKeysRatio, tenantId);
fdb::KeySelector selector;
selector.keyLength = key.size();
selector.orEqual = Random::get().randomBool(0.5);
@ -169,20 +172,20 @@ private:
ctx->done();
});
},
[this, keysWithSelectors, results, cont]() {
[this, keysWithSelectors, results, cont, tenantId]() {
ASSERT(results->size() == keysWithSelectors->size());
for (int i = 0; i < keysWithSelectors->size(); i++) {
auto const& key = (*keysWithSelectors)[i].first;
auto const& selector = (*keysWithSelectors)[i].second;
auto expected = store.getKey(key, selector.orEqual, selector.offset);
auto expected = stores[tenantId].getKey(key, selector.orEqual, selector.offset);
auto actual = (*results)[i];
// Local store only contains data for the current client, while fdb contains data from multiple
// clients. If getKey returned a key outside of the range for the current client, adjust the result
// to match what would be expected in the local store.
if (actual.substr(0, keyPrefix.size()) < keyPrefix) {
actual = store.startKey();
actual = stores[tenantId].startKey();
} else if ((*results)[i].substr(0, keyPrefix.size()) > keyPrefix) {
actual = store.endKey();
actual = stores[tenantId].endKey();
}
if (actual != expected) {
error(fmt::format("randomGetKeyOp mismatch. key: {}, orEqual: {}, offset: {}, expected: {} "
@ -195,37 +198,38 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void getRangeLoop(std::shared_ptr<ITransactionContext> ctx,
fdb::KeySelector begin,
fdb::KeySelector end,
fdb::Key endKey,
std::shared_ptr<std::vector<fdb::KeyValue>> results) {
auto f = ctx->tx().getRange(begin,
end,
fdb::key_select::firstGreaterOrEqual(endKey),
0 /*limit*/,
0 /*target_bytes*/,
FDB_STREAMING_MODE_WANT_ALL,
0 /*iteration*/,
false /*snapshot*/,
false /*reverse*/);
ctx->continueAfter(f, [this, ctx, f, end, results]() {
ctx->continueAfter(f, [this, ctx, f, endKey, results]() {
auto out = copyKeyValueArray(f.get());
results->insert(results->end(), out.first.begin(), out.first.end());
const bool more = out.second;
if (more) {
// Fetch the remaining results.
getRangeLoop(ctx, fdb::key_select::firstGreaterThan(results->back().key), end, results);
getRangeLoop(ctx, fdb::key_select::firstGreaterThan(results->back().key), endKey, results);
} else {
ctx->done();
}
});
}
void randomGetRangeOp(TTaskFct cont) {
auto begin = randomKey(readExistingKeysRatio);
auto end = randomKey(readExistingKeysRatio);
void randomGetRangeOp(TTaskFct cont, std::optional<int> tenantId) {
auto begin = randomKey(readExistingKeysRatio, tenantId);
auto end = randomKey(readExistingKeysRatio, tenantId);
auto results = std::make_shared<std::vector<fdb::KeyValue>>();
execTransaction(
@ -233,13 +237,10 @@ private:
// Clear the results vector, in case the transaction is retried.
results->clear();
getRangeLoop(ctx,
fdb::key_select::firstGreaterOrEqual(begin),
fdb::key_select::firstGreaterOrEqual(end),
results);
getRangeLoop(ctx, fdb::key_select::firstGreaterOrEqual(begin), end, results);
},
[this, begin, end, results, cont]() {
auto expected = store.getRange(begin, end, results->size() + 10, false);
[this, begin, end, results, cont, tenantId]() {
auto expected = stores[tenantId].getRange(begin, end, results->size() + 10, false);
if (results->size() != expected.size()) {
error(fmt::format("randomGetRangeOp mismatch. expected {} keys, actual {} keys",
expected.size(),
@ -260,32 +261,35 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void randomOperation(TTaskFct cont) {
OpType txType = (store.size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
std::optional<int> tenantId = randomTenant();
OpType txType = (stores[tenantId].size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
case OP_INSERT:
randomInsertOp(cont);
randomInsertOp(cont, tenantId);
break;
case OP_GET:
randomGetOp(cont);
randomGetOp(cont, tenantId);
break;
case OP_GET_KEY:
randomGetKeyOp(cont);
randomGetKeyOp(cont, tenantId);
break;
case OP_CLEAR:
randomClearOp(cont);
randomClearOp(cont, tenantId);
break;
case OP_GET_RANGE:
randomGetRangeOp(cont);
randomGetRangeOp(cont, tenantId);
break;
case OP_CLEAR_RANGE:
randomClearRangeOp(cont);
randomClearRangeOp(cont, tenantId);
break;
case OP_COMMIT_READ:
randomCommitReadOp(cont);
randomCommitReadOp(cont, tenantId);
break;
}
}

View File

@ -35,8 +35,8 @@ public:
void start() override { setAndGet(NO_OP_TASK); }
void setAndGet(TTaskFct cont) {
fdb::Key key = keyPrefix + random.randomStringLowerCase(10, 100);
fdb::Value value = random.randomStringLowerCase(10, 1000);
fdb::Key key = keyPrefix + random.randomByteStringLowerCase(10, 100);
fdb::Value value = random.randomByteStringLowerCase(10, 1000);
execTransaction(
[key, value](auto ctx) {
ctx->tx().set(key, value);

View File

@ -49,6 +49,7 @@ public:
int numClientThreads;
int numDatabases;
int numClients;
int numTenants = -1;
int statsIntervalMs = 0;
std::vector<std::pair<std::string, std::string>> knobs;
TestSpec testSpec;

View File

@ -65,6 +65,10 @@ std::unordered_map<std::string, std::function<void(const std::string& value, Tes
[](const std::string& value, TestSpec* spec) { //
spec->databasePerTransaction = (value == "true");
} },
{ "tamperClusterFile",
[](const std::string& value, TestSpec* spec) { //
spec->tamperClusterFile = (value == "true");
} },
{ "minFdbThreads",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "minFdbThreads", spec->minFdbThreads, 1, 1000);
@ -100,6 +104,14 @@ std::unordered_map<std::string, std::function<void(const std::string& value, Tes
{ "disableClientBypass",
[](const std::string& value, TestSpec* spec) { //
spec->disableClientBypass = (value == "true");
} },
{ "minTenants",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "minTenants", spec->minTenants, 1, 1000);
} },
{ "maxTenants",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "maxTenants", spec->maxTenants, 1, 1000);
} }
};

View File

@ -58,6 +58,9 @@ struct TestSpec {
// Execute each transaction in a separate database instance
bool databasePerTransaction = false;
// Test tampering the cluster file
bool tamperClusterFile = false;
// Size of the FDB client thread pool (a random number in the [min,max] range)
int minFdbThreads = 1;
int maxFdbThreads = 1;
@ -78,6 +81,9 @@ struct TestSpec {
// Disable the ability to bypass the MVC API, for
// cases when there are no external clients
bool disableClientBypass = false;
// Number of tenants (a random number in the [min,max] range)
int minTenants = 0;
int maxTenants = 0;
// List of workloads with their options
std::vector<WorkloadSpec> workloads;

View File

@ -23,25 +23,23 @@
#include "foundationdb/fdb_c_types.h"
#include "test/apitester/TesterScheduler.h"
#include "test/fdb_api.hpp"
#include <cstddef>
#include <memory>
#include <stdexcept>
#include <string>
#include <unordered_map>
#include <mutex>
#include <atomic>
#include <chrono>
#include <thread>
#include <fmt/format.h>
#include <filesystem>
namespace FdbApiTester {
constexpr int LONG_WAIT_TIME_US = 2000000;
constexpr int LARGE_NUMBER_OF_RETRIES = 10;
void TransactionActorBase::complete(fdb::Error err) {
error = err;
context = {};
}
void ITransactionContext::continueAfterAll(std::vector<fdb::Future> futures, TTaskFct cont) {
auto counter = std::make_shared<std::atomic<int>>(futures.size());
auto errorCode = std::make_shared<std::atomic<fdb::Error>>(fdb::Error::success());
@ -73,22 +71,32 @@ void ITransactionContext::continueAfterAll(std::vector<fdb::Future> futures, TTa
class TransactionContextBase : public ITransactionContext {
public:
TransactionContextBase(ITransactionExecutor* executor,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath)
: executor(executor), txActor(txActor), contAfterDone(cont), scheduler(scheduler), retryLimit(retryLimit),
txState(TxState::IN_PROGRESS), commitCalled(false), bgBasePath(bgBasePath) {
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: executor(executor), startFct(startFct), contAfterDone(cont), scheduler(scheduler), retryLimit(retryLimit),
txState(TxState::IN_PROGRESS), commitCalled(false), bgBasePath(bgBasePath), tenantName(tenantName),
transactional(transactional) {
databaseCreateErrorInjected = executor->getOptions().injectDatabaseCreateErrors &&
Random::get().randomBool(executor->getOptions().databaseCreateErrorRatio);
fdb::Database db;
if (databaseCreateErrorInjected) {
db = fdb::Database("not_existing_file");
fdbDb = fdb::Database(executor->getClusterFileForErrorInjection());
} else {
db = executor->selectDatabase();
fdbDb = executor->selectDatabase();
}
if (transactional) {
if (tenantName) {
fdb::Tenant tenant = fdbDb.openTenant(*tenantName);
fdbTx = tenant.createTransaction();
} else {
fdbTx = fdbDb.createTransaction();
}
}
fdbTx = db.createTransaction();
}
virtual ~TransactionContextBase() { ASSERT(txState == TxState::DONE); }
@ -97,7 +105,9 @@ public:
// IN_PROGRESS -> (ON_ERROR -> IN_PROGRESS)* [-> ON_ERROR] -> DONE
enum class TxState { IN_PROGRESS, ON_ERROR, DONE };
fdb::Transaction tx() override { return fdbTx; }
fdb::Database db() override { return fdbDb.atomic_load(); }
fdb::Transaction tx() override { return fdbTx.atomic_load(); }
// Set a continuation to be executed when a future gets ready
void continueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -106,6 +116,7 @@ public:
// Complete the transaction with a commit
void commit() override {
ASSERT(transactional);
std::unique_lock<std::mutex> lock(mutex);
if (txState != TxState::IN_PROGRESS) {
return;
@ -135,13 +146,15 @@ public:
retriedErrors.size(),
fmt::join(retriedErrorCodes(), ", "));
}
// cancel transaction so that any pending operations on it
// fail gracefully
fdbTx.cancel();
txActor->complete(fdb::Error::success());
cleanUp();
if (transactional) {
// cancel transaction so that any pending operations on it
// fail gracefully
fdbTx.cancel();
cleanUp();
}
ASSERT(txState == TxState::DONE);
contAfterDone();
contAfterDone(fdb::Error::success());
}
std::string getBGBasePath() override { return bgBasePath; }
@ -164,19 +177,29 @@ public:
ASSERT(!onErrorFuture);
if (databaseCreateErrorInjected) {
if (databaseCreateErrorInjected && canBeInjectedDatabaseCreateError(err.code())) {
// Failed to create a database because of failure injection
// Restart by recreating the transaction in a valid database
scheduler->schedule([this]() {
databaseCreateErrorInjected = false;
fdb::Database db = executor->selectDatabase();
fdbTx = db.createTransaction();
restartTransaction();
auto thisRef = std::static_pointer_cast<TransactionContextBase>(shared_from_this());
scheduler->schedule([thisRef]() {
fdb::Database db = thisRef->executor->selectDatabase();
thisRef->fdbDb.atomic_store(db);
if (thisRef->transactional) {
if (thisRef->tenantName) {
fdb::Tenant tenant = db.openTenant(*thisRef->tenantName);
thisRef->fdbTx.atomic_store(tenant.createTransaction());
} else {
thisRef->fdbTx.atomic_store(db.createTransaction());
}
}
thisRef->restartTransaction();
});
} else {
} else if (transactional) {
onErrorArg = err;
onErrorFuture = tx().onError(err);
handleOnErrorFuture();
} else {
transactionFailed(err);
}
}
@ -188,10 +211,16 @@ protected:
// Clean up transaction state after completing the transaction
// Note that the object may live longer, because it is referenced
// by not yet triggered callbacks
virtual void cleanUp() {
void cleanUp() {
ASSERT(txState == TxState::DONE);
ASSERT(!onErrorFuture);
txActor = {};
cancelPendingFutures();
}
virtual void cancelPendingFutures() {}
bool canBeInjectedDatabaseCreateError(fdb::Error::CodeType errCode) {
return errCode == error_code_no_cluster_file_found || errCode == error_code_connection_string_invalid;
}
// Complete the transaction with an (unretriable) error
@ -207,9 +236,8 @@ protected:
// No need for lock from here on, because only one thread
// can enter DONE state and handle it
txActor->complete(err);
cleanUp();
contAfterDone();
contAfterDone(err);
}
// Handle result of an a transaction onError call
@ -225,12 +253,13 @@ protected:
}
void restartTransaction() {
std::unique_lock<std::mutex> lock(mutex);
ASSERT(txState == TxState::ON_ERROR);
cancelPendingFutures();
std::unique_lock<std::mutex> lock(mutex);
txState = TxState::IN_PROGRESS;
commitCalled = false;
lock.unlock();
txActor->start();
startFct(shared_from_this());
}
// Checks if a transaction can be retried. Fails the transaction if the check fails
@ -262,13 +291,17 @@ protected:
// Set in contructor, stays immutable
ITransactionExecutor* const executor;
// FDB database
// Provides a thread safe interface by itself (no need for mutex)
fdb::Database fdbDb;
// FDB transaction
// Provides a thread safe interface by itself (no need for mutex)
fdb::Transaction fdbTx;
// Actor implementing the transaction worklflow
// The function implementing the starting point of the transaction
// Set in constructor and reset on cleanup (no need for mutex)
std::shared_ptr<ITransactionActor> txActor;
TOpStartFct startFct;
// Mutex protecting access to shared mutable state
// Only the state that is accessible unter IN_PROGRESS state
@ -277,7 +310,7 @@ protected:
// Continuation to be called after completion of the transaction
// Set in contructor, stays immutable
const TTaskFct contAfterDone;
const TOpContFct contAfterDone;
// Reference to the scheduler
// Set in contructor, stays immutable
@ -319,6 +352,12 @@ protected:
// Indicates if the database error was injected
// Accessed on initialization and in ON_ERROR state only (no need for mutex)
bool databaseCreateErrorInjected;
// The tenant that we will run this transaction in
const std::optional<fdb::BytesRef> tenantName;
// Specifies whether the operation is transactional
const bool transactional;
};
/**
@ -327,12 +366,15 @@ protected:
class BlockingTransactionContext : public TransactionContextBase {
public:
BlockingTransactionContext(ITransactionExecutor* executor,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath)
: TransactionContextBase(executor, txActor, cont, scheduler, retryLimit, bgBasePath) {}
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: TransactionContextBase(executor, startFct, cont, scheduler, retryLimit, bgBasePath, tenantName, transactional) {
}
protected:
void doContinueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -402,12 +444,15 @@ protected:
class AsyncTransactionContext : public TransactionContextBase {
public:
AsyncTransactionContext(ITransactionExecutor* executor,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath)
: TransactionContextBase(executor, txActor, cont, scheduler, retryLimit, bgBasePath) {}
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: TransactionContextBase(executor, startFct, cont, scheduler, retryLimit, bgBasePath, tenantName, transactional) {
}
protected:
void doContinueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -415,7 +460,7 @@ protected:
if (txState != TxState::IN_PROGRESS) {
return;
}
callbackMap[f] = CallbackInfo{ f, cont, shared_from_this(), retryOnError, timeNow() };
callbackMap[f] = CallbackInfo{ f, cont, shared_from_this(), retryOnError, timeNow(), false };
lock.unlock();
try {
f.then([this](fdb::Future f) { futureReadyCallback(f, this); });
@ -462,7 +507,7 @@ protected:
err.code(),
err.what());
}
if (err.code() == error_code_transaction_cancelled) {
if (err.code() == error_code_transaction_cancelled || cbInfo.cancelled) {
return;
}
if (err.code() == error_code_success || !cbInfo.retryOnError) {
@ -518,17 +563,17 @@ protected:
scheduler->schedule([thisRef]() { thisRef->handleOnErrorResult(); });
}
void cleanUp() override {
TransactionContextBase::cleanUp();
void cancelPendingFutures() override {
// Cancel all pending operations
// Note that the callbacks of the cancelled futures will still be called
std::unique_lock<std::mutex> lock(mutex);
std::vector<fdb::Future> futures;
for (auto& iter : callbackMap) {
iter.second.cancelled = true;
futures.push_back(iter.second.future);
}
lock.unlock();
for (auto& f : futures) {
f.cancel();
}
@ -548,6 +593,7 @@ protected:
std::shared_ptr<ITransactionContext> thisRef;
bool retryOnError;
TimePoint startTime;
bool cancelled;
};
// Map for keeping track of future waits and holding necessary object references
@ -567,29 +613,86 @@ class TransactionExecutorBase : public ITransactionExecutor {
public:
TransactionExecutorBase(const TransactionExecutorOptions& options) : options(options), scheduler(nullptr) {}
~TransactionExecutorBase() {
if (tamperClusterFileThread.joinable()) {
tamperClusterFileThread.join();
}
}
void init(IScheduler* scheduler, const char* clusterFile, const std::string& bgBasePath) override {
this->scheduler = scheduler;
this->clusterFile = clusterFile;
this->bgBasePath = bgBasePath;
ASSERT(!options.tmpDir.empty());
emptyClusterFile.create(options.tmpDir, "fdbempty.cluster");
invalidClusterFile.create(options.tmpDir, "fdbinvalid.cluster");
invalidClusterFile.write(Random().get().randomStringLowerCase<std::string>(1, 100));
emptyListClusterFile.create(options.tmpDir, "fdbemptylist.cluster");
emptyListClusterFile.write(fmt::format("{}:{}@",
Random().get().randomStringLowerCase<std::string>(3, 8),
Random().get().randomStringLowerCase<std::string>(1, 100)));
if (options.tamperClusterFile) {
tamperedClusterFile.create(options.tmpDir, "fdb.cluster");
originalClusterFile = clusterFile;
this->clusterFile = tamperedClusterFile.getFileName();
// begin with a valid cluster file, but with non existing address
tamperedClusterFile.write(fmt::format("{}:{}@192.168.{}.{}:{}",
Random().get().randomStringLowerCase<std::string>(3, 8),
Random().get().randomStringLowerCase<std::string>(1, 100),
Random().get().randomInt(1, 254),
Random().get().randomInt(1, 254),
Random().get().randomInt(2000, 10000)));
tamperClusterFileThread = std::thread([this]() {
std::this_thread::sleep_for(std::chrono::seconds(2));
// now write an invalid connection string
tamperedClusterFile.write(fmt::format("{}:{}@",
Random().get().randomStringLowerCase<std::string>(3, 8),
Random().get().randomStringLowerCase<std::string>(1, 100)));
std::this_thread::sleep_for(std::chrono::seconds(2));
// finally use correct cluster file contents
std::filesystem::copy_file(std::filesystem::path(originalClusterFile),
std::filesystem::path(tamperedClusterFile.getFileName()),
std::filesystem::copy_options::overwrite_existing);
});
}
}
const TransactionExecutorOptions& getOptions() override { return options; }
void execute(std::shared_ptr<ITransactionActor> txActor, TTaskFct cont) override {
void execute(TOpStartFct startFct,
TOpContFct cont,
std::optional<fdb::BytesRef> tenantName,
bool transactional) override {
try {
std::shared_ptr<ITransactionContext> ctx;
if (options.blockOnFutures) {
ctx = std::make_shared<BlockingTransactionContext>(
this, txActor, cont, scheduler, options.transactionRetryLimit, bgBasePath);
this, startFct, cont, scheduler, options.transactionRetryLimit, bgBasePath, tenantName, true);
} else {
ctx = std::make_shared<AsyncTransactionContext>(
this, txActor, cont, scheduler, options.transactionRetryLimit, bgBasePath);
this, startFct, cont, scheduler, options.transactionRetryLimit, bgBasePath, tenantName, true);
}
txActor->init(ctx);
txActor->start();
startFct(ctx);
} catch (...) {
txActor->complete(fdb::Error(error_code_operation_failed));
cont();
cont(fdb::Error(error_code_operation_failed));
}
}
std::string getClusterFileForErrorInjection() override {
switch (Random::get().randomInt(0, 3)) {
case 0:
return fmt::format("{}{}", "not-existing-file", Random::get().randomStringLowerCase<std::string>(0, 2));
case 1:
return emptyClusterFile.getFileName();
case 2:
return invalidClusterFile.getFileName();
default: // case 3
return emptyListClusterFile.getFileName();
}
}
@ -598,6 +701,12 @@ protected:
std::string bgBasePath;
std::string clusterFile;
IScheduler* scheduler;
TmpFile emptyClusterFile;
TmpFile invalidClusterFile;
TmpFile emptyListClusterFile;
TmpFile tamperedClusterFile;
std::thread tamperClusterFileThread;
std::string originalClusterFile;
};
/**
@ -612,7 +721,7 @@ public:
void init(IScheduler* scheduler, const char* clusterFile, const std::string& bgBasePath) override {
TransactionExecutorBase::init(scheduler, clusterFile, bgBasePath);
for (int i = 0; i < options.numDatabases; i++) {
fdb::Database db(clusterFile);
fdb::Database db(this->clusterFile);
databases.push_back(db);
}
}

View File

@ -38,6 +38,9 @@ class ITransactionContext : public std::enable_shared_from_this<ITransactionCont
public:
virtual ~ITransactionContext() {}
// Current FDB database
virtual fdb::Database db() = 0;
// Current FDB transaction
virtual fdb::Transaction tx() = 0;
@ -62,57 +65,11 @@ public:
virtual void continueAfterAll(std::vector<fdb::Future> futures, TTaskFct cont);
};
/**
* Interface of an actor object implementing a concrete transaction
*/
class ITransactionActor {
public:
virtual ~ITransactionActor() {}
// Type of the lambda functions implementing a database operation
using TOpStartFct = std::function<void(std::shared_ptr<ITransactionContext>)>;
// Initialize with the given transaction context
virtual void init(std::shared_ptr<ITransactionContext> ctx) = 0;
// Start execution of the transaction, also called on retries
virtual void start() = 0;
// Transaction completion result (error_code_success in case of success)
virtual fdb::Error getError() = 0;
// Notification about the completion of the transaction
virtual void complete(fdb::Error err) = 0;
};
/**
* A helper base class for transaction actors
*/
class TransactionActorBase : public ITransactionActor {
public:
void init(std::shared_ptr<ITransactionContext> ctx) override { context = ctx; }
fdb::Error getError() override { return error; }
void complete(fdb::Error err) override;
protected:
std::shared_ptr<ITransactionContext> ctx() { return context; }
private:
std::shared_ptr<ITransactionContext> context;
fdb::Error error = fdb::Error::success();
};
// Type of the lambda functions implementing a transaction
using TTxStartFct = std::function<void(std::shared_ptr<ITransactionContext>)>;
/**
* A wrapper class for transactions implemented by lambda functions
*/
class TransactionFct : public TransactionActorBase {
public:
TransactionFct(TTxStartFct startFct) : startFct(startFct) {}
void start() override { startFct(this->ctx()); }
private:
TTxStartFct startFct;
};
// Type of the lambda functions implementing a database operation
using TOpContFct = std::function<void(fdb::Error)>;
/**
* Configuration of transaction execution mode
@ -127,15 +84,24 @@ struct TransactionExecutorOptions {
// Enable injection of database create errors
bool injectDatabaseCreateErrors = false;
// Test tampering cluster file contents
bool tamperClusterFile = false;
// The probability of injected database create errors
// Used if buggify = true
// Used if injectDatabaseCreateErrors = true
double databaseCreateErrorRatio = 0.1;
// The size of the database instance pool
int numDatabases = 1;
// The number of tenants to create in the cluster. If 0, no tenants are used.
int numTenants = 0;
// Maximum number of retries per transaction (0 - unlimited)
int transactionRetryLimit = 0;
// Temporary directory
std::string tmpDir;
};
/**
@ -147,8 +113,12 @@ class ITransactionExecutor {
public:
virtual ~ITransactionExecutor() {}
virtual void init(IScheduler* sched, const char* clusterFile, const std::string& bgBasePath) = 0;
virtual void execute(std::shared_ptr<ITransactionActor> tx, TTaskFct cont) = 0;
virtual void execute(TOpStartFct start,
TOpContFct cont,
std::optional<fdb::BytesRef> tenantName,
bool transactional) = 0;
virtual fdb::Database selectDatabase() = 0;
virtual std::string getClusterFileForErrorInjection() = 0;
virtual const TransactionExecutorOptions& getOptions() = 0;
};

View File

@ -23,6 +23,9 @@
#include <algorithm>
#include <ctype.h>
#include <chrono>
#include <filesystem>
#include <fstream>
#include <string>
namespace FdbApiTester {
@ -46,16 +49,6 @@ Random& Random::get() {
return random;
}
fdb::ByteString Random::randomStringLowerCase(int minLength, int maxLength) {
int length = randomInt(minLength, maxLength);
fdb::ByteString str;
str.reserve(length);
for (int i = 0; i < length; i++) {
str += (char)randomInt('a', 'z');
}
return str;
}
bool Random::randomBool(double trueRatio) {
return std::uniform_real_distribution<double>(0.0, 1.0)(random) <= trueRatio;
}
@ -119,4 +112,39 @@ GranuleSummaryArray copyGranuleSummaryArray(fdb::future_var::GranuleSummaryRefAr
return out;
};
TmpFile::~TmpFile() {
if (!filename.empty()) {
remove();
}
}
void TmpFile::create(std::string_view dir, std::string_view prefix) {
while (true) {
filename = fmt::format("{}/{}-{}", dir, prefix, Random::get().randomStringLowerCase<std::string>(6, 6));
if (!std::filesystem::exists(std::filesystem::path(filename))) {
break;
}
}
// Create an empty tmp file
std::fstream tmpFile(filename, std::fstream::out);
if (!tmpFile.good()) {
throw TesterError(fmt::format("Failed to create temporary file {}\n", filename));
}
}
void TmpFile::write(std::string_view data) {
std::ofstream ofs(filename, std::fstream::out | std::fstream::binary);
if (!ofs.good()) {
throw TesterError(fmt::format("Failed to write to the temporary file {}\n", filename));
}
ofs.write(data.data(), data.size());
}
void TmpFile::remove() {
if (!std::filesystem::remove(std::filesystem::path(filename))) {
fmt::print(stderr, "Failed to remove file {}\n", filename);
}
}
} // namespace FdbApiTester

View File

@ -66,7 +66,20 @@ public:
int randomInt(int min, int max);
fdb::ByteString randomStringLowerCase(int minLength, int maxLength);
template <class StringType>
StringType randomStringLowerCase(int minLength, int maxLength) {
int length = randomInt(minLength, maxLength);
StringType str;
str.reserve(length);
for (int i = 0; i < length; i++) {
str += (char)randomInt('a', 'z');
}
return str;
}
fdb::ByteString randomByteStringLowerCase(int minLength, int maxLength) {
return randomStringLowerCase<fdb::ByteString>(minLength, maxLength);
}
bool randomBool(double trueRatio);
@ -142,6 +155,19 @@ static fdb::ByteString toByteString(T value) {
return output;
}
// Creates a temporary file; file gets destroyed/deleted along with object destruction.
struct TmpFile {
public:
~TmpFile();
void create(std::string_view dir, std::string_view prefix);
void write(std::string_view data);
void remove();
const std::string& getFileName() const { return filename; }
private:
std::string filename;
};
} // namespace FdbApiTester
#endif

View File

@ -106,30 +106,49 @@ void WorkloadBase::schedule(TTaskFct task) {
});
}
void WorkloadBase::execTransaction(std::shared_ptr<ITransactionActor> tx, TTaskFct cont, bool failOnError) {
void WorkloadBase::execTransaction(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError) {
doExecute(startFct, cont, tenant, failOnError, true);
}
// Execute a non-transactional database operation within the workload
void WorkloadBase::execOperation(TOpStartFct startFct, TTaskFct cont, bool failOnError) {
doExecute(startFct, cont, {}, failOnError, false);
}
void WorkloadBase::doExecute(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError,
bool transactional) {
ASSERT(inProgress);
if (failed) {
return;
}
tasksScheduled++;
numTxStarted++;
manager->txExecutor->execute(tx, [this, tx, cont, failOnError]() {
numTxCompleted++;
fdb::Error err = tx->getError();
if (err.code() == error_code_success) {
cont();
} else {
std::string msg = fmt::format("Transaction failed with error: {} ({})", err.code(), err.what());
if (failOnError) {
error(msg);
failed = true;
} else {
info(msg);
cont();
}
}
scheduledTaskDone();
});
manager->txExecutor->execute(
startFct,
[this, startFct, cont, failOnError](fdb::Error err) {
numTxCompleted++;
if (err.code() == error_code_success) {
cont();
} else {
std::string msg = fmt::format("Transaction failed with error: {} ({})", err.code(), err.what());
if (failOnError) {
error(msg);
failed = true;
} else {
info(msg);
cont();
}
}
scheduledTaskDone();
},
tenant,
transactional);
}
void WorkloadBase::info(const std::string& msg) {

View File

@ -82,6 +82,9 @@ struct WorkloadConfig {
// Total number of clients
int numClients;
// Number of Tenants
int numTenants;
// Selected FDB API version
int apiVersion;
@ -116,12 +119,13 @@ protected:
void schedule(TTaskFct task);
// Execute a transaction within the workload
void execTransaction(std::shared_ptr<ITransactionActor> tx, TTaskFct cont, bool failOnError = true);
void execTransaction(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant = std::optional<fdb::BytesRef>(),
bool failOnError = true);
// Execute a transaction within the workload, a convenience method for a tranasaction defined by a lambda function
void execTransaction(TTxStartFct start, TTaskFct cont, bool failOnError = true) {
execTransaction(std::make_shared<TransactionFct>(start), cont, failOnError);
}
// Execute a non-transactional database operation within the workload
void execOperation(TOpStartFct startFct, TTaskFct cont, bool failOnError = true);
// Log an error message, increase error counter
void error(const std::string& msg);
@ -135,6 +139,12 @@ protected:
private:
WorkloadManager* manager;
void doExecute(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError,
bool transactional);
// Decrease scheduled task counter, notify the workload manager
// that the task is done if no more tasks schedule
void scheduledTaskDone();

View File

@ -36,6 +36,8 @@ namespace FdbApiTester {
namespace {
#define API_VERSION_CLIENT_TMP_DIR 720
enum TesterOptionId {
OPT_CONNFILE,
OPT_HELP,
@ -285,7 +287,7 @@ void fdb_check(fdb::Error e) {
}
void applyNetworkOptions(TesterOptions& options) {
if (!options.tmpDir.empty() && options.apiVersion >= 720) {
if (!options.tmpDir.empty() && options.apiVersion >= API_VERSION_CLIENT_TMP_DIR) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_CLIENT_TMP_DIR, options.tmpDir);
}
if (!options.externalClientLibrary.empty()) {
@ -354,6 +356,12 @@ void randomizeOptions(TesterOptions& options) {
options.numClientThreads = random.randomInt(options.testSpec.minClientThreads, options.testSpec.maxClientThreads);
options.numDatabases = random.randomInt(options.testSpec.minDatabases, options.testSpec.maxDatabases);
options.numClients = random.randomInt(options.testSpec.minClients, options.testSpec.maxClients);
// Choose a random number of tenants. If a test is configured to allow 0 tenants, then use 0 tenants half the time.
if (options.testSpec.maxTenants >= options.testSpec.minTenants &&
(options.testSpec.minTenants > 0 || random.randomBool(0.5))) {
options.numTenants = random.randomInt(options.testSpec.minTenants, options.testSpec.maxTenants);
}
}
bool runWorkloads(TesterOptions& options) {
@ -365,6 +373,9 @@ bool runWorkloads(TesterOptions& options) {
// 7.1 and older releases crash on database create errors
txExecOptions.injectDatabaseCreateErrors = options.testSpec.buggify && options.apiVersion > 710;
txExecOptions.transactionRetryLimit = options.transactionRetryLimit;
txExecOptions.tmpDir = options.tmpDir.empty() ? std::string("/tmp") : options.tmpDir;
txExecOptions.tamperClusterFile = options.testSpec.tamperClusterFile;
txExecOptions.numTenants = options.numTenants;
std::vector<std::shared_ptr<IWorkload>> workloads;
workloads.reserve(options.testSpec.workloads.size() * options.numClients);
@ -376,6 +387,7 @@ bool runWorkloads(TesterOptions& options) {
config.options = workloadSpec.options;
config.clientId = i;
config.numClients = options.numClients;
config.numTenants = options.numTenants;
config.apiVersion = options.apiVersion;
std::shared_ptr<IWorkload> workload = IWorkloadFactory::create(workloadSpec.name, config);
if (!workload) {

View File

@ -0,0 +1,21 @@
[[test]]
title = 'Multi-tenant API Correctness Multi Threaded'
multiThreaded = true
buggify = true
minFdbThreads = 2
maxFdbThreads = 8
minClients = 2
maxClients = 8
minTenants = 2
maxTenants = 5
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 5
initialSize = 100
numRandomOperations = 200
readExistingKeysRatio = 0.9

View File

@ -0,0 +1,24 @@
[[test]]
title = 'Test tampering the cluster file'
multiThreaded = true
buggify = true
tamperClusterFile = true
minFdbThreads = 2
maxFdbThreads = 4
minDatabases = 2
maxDatabases = 4
minClientThreads = 2
maxClientThreads = 4
minClients = 2
maxClients = 4
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9

View File

@ -44,7 +44,7 @@ int main(int argc, char** argv) {
if (argc != 2) {
printf("Usage: %s <cluster_file>", argv[0]);
}
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
fdb_check(fdb_setup_network());
std::thread network_thread{ &fdb_run_network };

View File

@ -46,6 +46,8 @@ namespace native {
#include <foundationdb/fdb_c.h>
}
#define TENANT_API_VERSION_GUARD 720
using ByteString = std::basic_string<uint8_t>;
using BytesRef = std::basic_string_view<uint8_t>;
using CharsRef = std::string_view;
@ -347,6 +349,7 @@ public:
class Future {
protected:
friend class Transaction;
friend class Database;
friend std::hash<Future>;
std::shared_ptr<native::FDBFuture> f;
@ -505,6 +508,14 @@ public:
Transaction(const Transaction&) noexcept = default;
Transaction& operator=(const Transaction&) noexcept = default;
void atomic_store(Transaction other) { std::atomic_store(&tr, other.tr); }
Transaction atomic_load() {
Transaction retVal;
retVal.tr = std::atomic_load(&tr);
return retVal;
}
bool valid() const noexcept { return tr != nullptr; }
explicit operator bool() const noexcept { return valid(); }
@ -708,6 +719,14 @@ public:
}
Database() noexcept : db(nullptr) {}
void atomic_store(Database other) { std::atomic_store(&db, other.db); }
Database atomic_load() {
Database retVal;
retVal.db = std::atomic_load(&db);
return retVal;
}
Error setOptionNothrow(FDBDatabaseOption option, int64_t value) noexcept {
return Error(native::fdb_database_set_option(
db.get(), option, reinterpret_cast<const uint8_t*>(&value), static_cast<int>(sizeof(value))));
@ -753,10 +772,17 @@ public:
throwError("Failed to create transaction: ", err);
return Transaction(tx_native);
}
TypedFuture<future_var::KeyRangeRefArray> listBlobbifiedRanges(KeyRef begin, KeyRef end, int rangeLimit) {
if (!db)
throw std::runtime_error("list_blobbified_ranges from null database");
return native::fdb_database_list_blobbified_ranges(
db.get(), begin.data(), intSize(begin), end.data(), intSize(end), rangeLimit);
}
};
inline Error selectApiVersionNothrow(int version) {
if (version < 720) {
if (version < TENANT_API_VERSION_GUARD) {
Tenant::tenantManagementMapPrefix = "\xff\xff/management/tenant_map/";
}
return Error(native::fdb_select_api_version(version));
@ -769,7 +795,7 @@ inline void selectApiVersion(int version) {
}
inline Error selectApiVersionCappedNothrow(int version) {
if (version < 720) {
if (version < TENANT_API_VERSION_GUARD) {
Tenant::tenantManagementMapPrefix = "\xff\xff/management/tenant_map/";
}
return Error(

View File

@ -4,6 +4,6 @@
int main(int argc, char* argv[]) {
(void)argc;
(void)argv;
fdb_select_api_version(720);
fdb_select_api_version(FDB_API_VERSION);
return 0;
}

View File

@ -641,7 +641,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(720), "select API version", rs);
checkError(fdb_select_api_version(FDB_API_VERSION), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
valueStr = (uint8_t*)malloc((sizeof(uint8_t)) * valueSize);

View File

@ -285,7 +285,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(720), "select API version", rs);
checkError(fdb_select_api_version(FDB_API_VERSION), "select API version", rs);
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, keySize);

View File

@ -97,7 +97,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(720), "select API version", rs);
checkError(fdb_select_api_version(FDB_API_VERSION), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, KEY_SIZE);

View File

@ -255,7 +255,7 @@ int main(int argc, char** argv) {
<< std::endl;
return 1;
}
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
if (argc >= 3) {
std::string externalClientLibrary = argv[2];
if (externalClientLibrary.substr(0, 2) != "--") {

View File

@ -42,13 +42,13 @@ TEST_CASE("setup") {
CHECK(err);
// Select current API version
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
// Error to call again after a successful return
err = fdb_select_api_version(720);
err = fdb_select_api_version(FDB_API_VERSION);
CHECK(err);
CHECK(fdb_get_max_api_version() >= 720);
CHECK(fdb_get_max_api_version() >= FDB_API_VERSION);
fdb_check(fdb_setup_network());
// Calling a second time should fail

View File

@ -53,7 +53,7 @@ bool file_exists(const char* path) {
}
int main(int argc, char** argv) {
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
std::string file_identifier = "trace_partial_file_suffix_test" + std::to_string(std::random_device{}());
std::string trace_partial_file_suffix = ".tmp";

View File

@ -2979,7 +2979,7 @@ int main(int argc, char** argv) {
<< std::endl;
return 1;
}
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
if (argc >= 4) {
std::string externalClientLibrary = argv[3];
if (externalClientLibrary.substr(0, 2) != "--") {

View File

@ -266,7 +266,7 @@ struct SimpleWorkload final : FDBWorkload {
insertsPerTx = context->getOption("insertsPerTx", 100ul);
opsPerTx = context->getOption("opsPerTx", 100ul);
runFor = context->getOption("runFor", 10.0);
auto err = fdb_select_api_version(720);
auto err = fdb_select_api_version(FDB_API_VERSION);
if (err) {
context->trace(
FDBSeverity::Info, "SelectAPIVersionFailed", { { "Error", std::string(fdb_get_error(err)) } });

View File

@ -23,17 +23,17 @@
namespace FDB {
const uint8_t DirectoryLayer::LITTLE_ENDIAN_LONG_ONE[8] = { 1, 0, 0, 0, 0, 0, 0, 0 };
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = LiteralStringRef("hca");
const StringRef DirectoryLayer::LAYER_KEY = LiteralStringRef("layer");
const StringRef DirectoryLayer::VERSION_KEY = LiteralStringRef("version");
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = "hca"_sr;
const StringRef DirectoryLayer::LAYER_KEY = "layer"_sr;
const StringRef DirectoryLayer::VERSION_KEY = "version"_sr;
const int64_t DirectoryLayer::SUB_DIR_KEY = 0;
const uint32_t DirectoryLayer::VERSION[3] = { 1, 0, 0 };
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = LiteralStringRef("\xfe");
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = "\xfe"_sr;
const Subspace DirectoryLayer::DEFAULT_NODE_SUBSPACE = Subspace(DEFAULT_NODE_SUBSPACE_PREFIX);
const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
const StringRef DirectoryLayer::PARTITION_LAYER = "partition"_sr;
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes)
: rootNode(nodeSubspace.get(nodeSubspace.key())), nodeSubspace(nodeSubspace), contentSubspace(contentSubspace),

View File

@ -31,7 +31,7 @@ typedef Standalone<KeyRef> Key;
typedef Standalone<ValueRef> Value;
inline Key keyAfter(const KeyRef& key) {
if (key == LiteralStringRef("\xff\xff"))
if (key == "\xff\xff"_sr)
return key;
Standalone<StringRef> r;
@ -43,7 +43,7 @@ inline Key keyAfter(const KeyRef& key) {
}
inline KeyRef keyAfter(const KeyRef& key, Arena& arena) {
if (key == LiteralStringRef("\xff\xff"))
if (key == "\xff\xff"_sr)
return key;
uint8_t* t = new (arena) uint8_t[key.size() + 1];
memcpy(t, key.begin(), key.size());

View File

@ -38,7 +38,7 @@ THREAD_FUNC networkThread(void* fdb) {
}
ACTOR Future<Void> _test() {
API* fdb = FDB::API::selectAPIVersion(720);
API* fdb = FDB::API::selectAPIVersion(FDB_API_VERSION);
auto db = fdb->createDatabase();
state Reference<Transaction> tr = db->createTransaction();
@ -63,15 +63,14 @@ ACTOR Future<Void> _test() {
// wait( waitForAllReady( versions ) );
printf("Elapsed: %lf\n", timer_monotonic() - starttime);
tr->set(LiteralStringRef("foo"), LiteralStringRef("bar"));
tr->set("foo"_sr, "bar"_sr);
Optional<FDBStandalone<ValueRef>> v = wait(tr->get(LiteralStringRef("foo")));
Optional<FDBStandalone<ValueRef>> v = wait(tr->get("foo"_sr));
if (v.present()) {
printf("%s\n", v.get().toString().c_str());
}
FDBStandalone<RangeResultRef> r =
wait(tr->getRange(KeyRangeRef(LiteralStringRef("a"), LiteralStringRef("z")), 100));
FDBStandalone<RangeResultRef> r = wait(tr->getRange(KeyRangeRef("a"_sr, "z"_sr), 100));
for (auto kv : r) {
printf("%s is %s\n", kv.key.toString().c_str(), kv.value.toString().c_str());
@ -82,7 +81,7 @@ ACTOR Future<Void> _test() {
}
void fdb_flow_test() {
API* fdb = FDB::API::selectAPIVersion(720);
API* fdb = FDB::API::selectAPIVersion(FDB_API_VERSION);
fdb->setupNetwork();
startThread(networkThread, fdb);

View File

@ -545,11 +545,10 @@ struct DirectoryLogDirectoryFunc : InstructionFunc {
pathTuple.append(p, true);
}
instruction->tr->set(logSubspace.pack(LiteralStringRef("path"), true), pathTuple.pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("layer"), true),
Tuple().append(directory->getLayer()).pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("exists"), true), Tuple().append(exists ? 1 : 0).pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("children"), true), childrenTuple.pack());
instruction->tr->set(logSubspace.pack("path"_sr, true), pathTuple.pack());
instruction->tr->set(logSubspace.pack("layer"_sr, true), Tuple().append(directory->getLayer()).pack());
instruction->tr->set(logSubspace.pack("exists"_sr, true), Tuple().append(exists ? 1 : 0).pack());
instruction->tr->set(logSubspace.pack("children"_sr, true), childrenTuple.pack());
return Void();
}

View File

@ -470,12 +470,12 @@ ACTOR Future<Standalone<StringRef>> waitForVoid(Future<Void> f) {
try {
wait(f);
Tuple t;
t.append(LiteralStringRef("RESULT_NOT_PRESENT"));
t.append("RESULT_NOT_PRESENT"_sr);
return t.pack();
} catch (Error& e) {
// printf("FDBError1:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -493,7 +493,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<FDBStandalone<KeyRef>> f
} catch (Error& e) {
// printf("FDBError2:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -509,7 +509,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<Optional<FDBStandalone<V
if (value.present())
str = value.get();
else
str = LiteralStringRef("RESULT_NOT_PRESENT");
str = "RESULT_NOT_PRESENT"_sr;
Tuple t;
t.append(str);
@ -517,7 +517,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<Optional<FDBStandalone<V
} catch (Error& e) {
// printf("FDBError3:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -543,7 +543,7 @@ ACTOR Future<Standalone<StringRef>> getKey(Future<FDBStandalone<KeyRef>> f, Stan
} catch (Error& e) {
// printf("FDBError4:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -670,7 +670,7 @@ struct GetEstimatedRangeSize : InstructionFunc {
state Standalone<StringRef> endKey = Tuple::unpack(s2).getString(0);
Future<int64_t> fsize = instruction->tr->getEstimatedRangeSizeBytes(KeyRangeRef(beginKey, endKey));
int64_t size = wait(fsize);
data->stack.pushTuple(LiteralStringRef("GOT_ESTIMATED_RANGE_SIZE"));
data->stack.pushTuple("GOT_ESTIMATED_RANGE_SIZE"_sr);
return Void();
}
@ -698,7 +698,7 @@ struct GetRangeSplitPoints : InstructionFunc {
Future<FDBStandalone<VectorRef<KeyRef>>> fsplitPoints =
instruction->tr->getRangeSplitPoints(KeyRangeRef(beginKey, endKey), chunkSize);
FDBStandalone<VectorRef<KeyRef>> splitPoints = wait(fsplitPoints);
data->stack.pushTuple(LiteralStringRef("GOT_RANGE_SPLIT_POINTS"));
data->stack.pushTuple("GOT_RANGE_SPLIT_POINTS"_sr);
return Void();
}
@ -743,7 +743,7 @@ struct GetReadVersionFunc : InstructionFunc {
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
Version v = wait(instruction->tr->getReadVersion());
data->lastVersion = v;
data->stack.pushTuple(LiteralStringRef("GOT_READ_VERSION"));
data->stack.pushTuple("GOT_READ_VERSION"_sr);
return Void();
}
};
@ -767,7 +767,7 @@ struct GetCommittedVersionFunc : InstructionFunc {
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
data->lastVersion = instruction->tr->getCommittedVersion();
data->stack.pushTuple(LiteralStringRef("GOT_COMMITTED_VERSION"));
data->stack.pushTuple("GOT_COMMITTED_VERSION"_sr);
return Void();
}
};
@ -781,7 +781,7 @@ struct GetApproximateSizeFunc : InstructionFunc {
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
int64_t _ = wait(instruction->tr->getApproximateSize());
(void)_; // disable unused variable warning
data->stack.pushTuple(LiteralStringRef("GOT_APPROXIMATE_SIZE"));
data->stack.pushTuple("GOT_APPROXIMATE_SIZE"_sr);
return Void();
}
};
@ -1485,7 +1485,7 @@ struct ReadConflictKeyFunc : InstructionFunc {
// printf("=========READ_CONFLICT_KEY:%s\n", printable(key).c_str());
instruction->tr->addReadConflictKey(key);
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_KEY"));
data->stack.pushTuple("SET_CONFLICT_KEY"_sr);
return Void();
}
};
@ -1506,7 +1506,7 @@ struct WriteConflictKeyFunc : InstructionFunc {
// printf("=========WRITE_CONFLICT_KEY:%s\n", printable(key).c_str());
instruction->tr->addWriteConflictKey(key);
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_KEY"));
data->stack.pushTuple("SET_CONFLICT_KEY"_sr);
return Void();
}
};
@ -1529,7 +1529,7 @@ struct ReadConflictRangeFunc : InstructionFunc {
// printf("=========READ_CONFLICT_RANGE:%s:%s\n", printable(begin).c_str(), printable(end).c_str());
instruction->tr->addReadConflictRange(KeyRange(KeyRangeRef(begin, end)));
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_RANGE"));
data->stack.pushTuple("SET_CONFLICT_RANGE"_sr);
return Void();
}
};
@ -1553,7 +1553,7 @@ struct WriteConflictRangeFunc : InstructionFunc {
// printf("=========WRITE_CONFLICT_RANGE:%s:%s\n", printable(begin).c_str(), printable(end).c_str());
instruction->tr->addWriteConflictRange(KeyRange(KeyRangeRef(begin, end)));
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_RANGE"));
data->stack.pushTuple("SET_CONFLICT_RANGE"_sr);
return Void();
}
};
@ -1643,10 +1643,8 @@ struct UnitTestsFunc : InstructionFunc {
Optional<StringRef>(StringRef((const uint8_t*)&locationCacheSize, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MAX_WATCHES,
Optional<StringRef>(StringRef((const uint8_t*)&maxWatches, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_DATACENTER_ID,
Optional<StringRef>(LiteralStringRef("dc_id")));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MACHINE_ID,
Optional<StringRef>(LiteralStringRef("machine_id")));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_DATACENTER_ID, Optional<StringRef>("dc_id"_sr));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MACHINE_ID, Optional<StringRef>("machine_id"_sr));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_SNAPSHOT_RYW_ENABLE);
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_SNAPSHOT_RYW_DISABLE);
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_TRANSACTION_LOGGING_MAX_FIELD_LENGTH,
@ -1685,13 +1683,13 @@ struct UnitTestsFunc : InstructionFunc {
Optional<StringRef>(StringRef((const uint8_t*)&maxRetryDelay, 8)));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_USED_DURING_COMMIT_PROTECTION_DISABLE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_TRANSACTION_LOGGING_ENABLE,
Optional<StringRef>(LiteralStringRef("my_transaction")));
Optional<StringRef>("my_transaction"_sr));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_READ_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_INCLUDE_PORT_IN_ADDRESS);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_REPORT_CONFLICTING_KEYS);
Optional<FDBStandalone<ValueRef>> _ = wait(tr->get(LiteralStringRef("\xff")));
Optional<FDBStandalone<ValueRef>> _ = wait(tr->get("\xff"_sr));
tr->cancel();
return Void();
@ -1724,13 +1722,13 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
Tuple opTuple = Tuple::unpack(data->instructions[idx].value);
state Standalone<StringRef> op = opTuple.getString(0);
state bool isDatabase = op.endsWith(LiteralStringRef("_DATABASE"));
state bool isSnapshot = op.endsWith(LiteralStringRef("_SNAPSHOT"));
state bool isDirectory = op.startsWith(LiteralStringRef("DIRECTORY_"));
state bool isDatabase = op.endsWith("_DATABASE"_sr);
state bool isSnapshot = op.endsWith("_SNAPSHOT"_sr);
state bool isDirectory = op.startsWith("DIRECTORY_"_sr);
try {
if (LOG_INSTRUCTIONS) {
if (op != LiteralStringRef("SWAP") && op != LiteralStringRef("PUSH")) {
if (op != "SWAP"_sr && op != "PUSH"_sr) {
printf("%zu. %s\n", idx, tupleToString(opTuple).c_str());
fflush(stdout);
}
@ -1773,7 +1771,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
if (opsThatCreateDirectories.count(op.toString())) {
data->directoryData.directoryList.push_back(DirectoryOrSubspace());
}
data->stack.pushTuple(LiteralStringRef("DIRECTORY_ERROR"));
data->stack.pushTuple("DIRECTORY_ERROR"_sr);
} else {
data->stack.pushError(e.code());
}
@ -1873,7 +1871,7 @@ ACTOR void _test_versionstamp() {
try {
g_network = newNet2(TLSConfig());
API* fdb = FDB::API::selectAPIVersion(720);
API* fdb = FDB::API::selectAPIVersion(FDB_API_VERSION);
fdb->setupNetwork();
startThread(networkThread, fdb);
@ -1883,15 +1881,14 @@ ACTOR void _test_versionstamp() {
state Future<FDBStandalone<StringRef>> ftrVersion = tr->getVersionstamp();
tr->atomicOp(LiteralStringRef("foo"),
LiteralStringRef("blahblahbl\x00\x00\x00\x00"),
FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
tr->atomicOp(
"foo"_sr, "blahblahbl\x00\x00\x00\x00"_sr, FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
wait(tr->commit()); // should use retry loop
tr->reset();
Optional<FDBStandalone<StringRef>> optionalDbVersion = wait(tr->get(LiteralStringRef("foo")));
Optional<FDBStandalone<StringRef>> optionalDbVersion = wait(tr->get("foo"_sr));
state FDBStandalone<StringRef> dbVersion = optionalDbVersion.get();
FDBStandalone<StringRef> trVersion = wait(ftrVersion);

View File

@ -71,7 +71,7 @@ struct FlowTesterStack {
void pushError(int errorCode) {
FDB::Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", errorCode));
// pack above as error string into another tuple
pushTuple(t.pack().toString());

View File

@ -128,7 +128,7 @@ func APIVersion(version int) error {
return errAPIVersionAlreadySet
}
if version < 200 || version > 720 {
if version < 200 || version > headerVersion {
return errAPIVersionNotSupported
}

View File

@ -29,10 +29,12 @@ import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
)
const API_VERSION int = 720
func ExampleOpenDefault() {
var e error
e = fdb.APIVersion(720)
e = fdb.APIVersion(API_VERSION)
if e != nil {
fmt.Printf("Unable to set API version: %v\n", e)
return
@ -52,7 +54,7 @@ func ExampleOpenDefault() {
}
func TestVersionstamp(t *testing.T) {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
@ -98,7 +100,7 @@ func TestVersionstamp(t *testing.T) {
}
func TestReadTransactionOptions(t *testing.T) {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
_, e := db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
rtr.Options().SetAccessSystemKeys()
@ -110,7 +112,7 @@ func TestReadTransactionOptions(t *testing.T) {
}
func ExampleTransactor() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error {
@ -161,7 +163,7 @@ func ExampleTransactor() {
}
func ExampleReadTransactor() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
@ -214,7 +216,7 @@ func ExampleReadTransactor() {
}
func ExamplePrefixRange() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()
@ -253,7 +255,7 @@ func ExamplePrefixRange() {
}
func ExampleRangeIterator() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()

View File

@ -379,7 +379,7 @@ struct JVM {
jmethodID selectMethod =
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;");
checkException();
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(720));
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(FDB_API_VERSION));
checkException();
env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V"));
checkException();

View File

@ -40,6 +40,8 @@ import org.junit.jupiter.api.Assertions;
* This test is to verify the atomicity of transactions.
*/
public class CycleMultiClientIntegrationTest {
public static final int API_VERSION = 720;
public static final MultiClientHelper clientHelper = new MultiClientHelper();
// more write txn than validate txn, as parent thread waits only for validate txn.
@ -51,7 +53,7 @@ public class CycleMultiClientIntegrationTest {
private static List<String> expected = new ArrayList<>(Arrays.asList("0", "1", "2", "3"));
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
System.out.println("Starting tests");

View File

@ -40,7 +40,8 @@ import org.junit.jupiter.api.extension.ExtendWith;
*/
@ExtendWith(RequiresDatabase.class)
class DirectoryTest {
private static final FDB fdb = FDB.selectAPIVersion(720);
public static final int API_VERSION = 720;
private static final FDB fdb = FDB.selectAPIVersion(API_VERSION);
@Test
void testCanCreateDirectory() throws Exception {

View File

@ -41,7 +41,8 @@ import org.junit.jupiter.api.extension.ExtendWith;
@ExtendWith(RequiresDatabase.class)
class MappedRangeQueryIntegrationTest {
private static final FDB fdb = FDB.selectAPIVersion(720);
public static final int API_VERSION = 720;
private static final FDB fdb = FDB.selectAPIVersion(API_VERSION);
public String databaseArg = null;
private Database openFDB() { return fdb.open(databaseArg); }
@ -110,7 +111,7 @@ class MappedRangeQueryIntegrationTest {
boolean validate = true;
@Test
void comparePerformance() {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try (Database db = openFDB()) {
insertRecordsWithIndexes(numRecords, db);
instrument(rangeQueryAndThenRangeQueries, "rangeQueryAndThenRangeQueries", db);

View File

@ -41,7 +41,8 @@ import org.junit.jupiter.api.extension.ExtendWith;
*/
@ExtendWith(RequiresDatabase.class)
class RangeQueryIntegrationTest {
private static final FDB fdb = FDB.selectAPIVersion(720);
public static final int API_VERSION = 720;
private static final FDB fdb = FDB.selectAPIVersion(API_VERSION);
@BeforeEach
@AfterEach

View File

@ -41,6 +41,8 @@ import org.junit.jupiter.api.Assertions;
* are still seeting the initialValue even after new transactions set them to a new value.
*/
public class RepeatableReadMultiThreadClientTest {
public static final int API_VERSION = 720;
public static final MultiClientHelper clientHelper = new MultiClientHelper();
private static final int oldValueReadCount = 30;
@ -52,7 +54,7 @@ public class RepeatableReadMultiThreadClientTest {
private static final Map<Thread, OldValueReader> threadToOldValueReaders = new HashMap<>();
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
System.out.println("Starting tests");

View File

@ -47,6 +47,7 @@ import org.opentest4j.TestAbortedException;
* be running a server and you don't want to deal with spurious test failures.
*/
public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback {
public static final int API_VERSION = 720;
public static boolean canRunIntegrationTest() {
String prop = System.getProperty("run.integration.tests");
@ -80,7 +81,7 @@ public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback {
* assume that if we are here, then canRunIntegrationTest() is returning true and we don't have to bother
* checking it.
*/
try (Database db = FDB.selectAPIVersion(720).open()) {
try (Database db = FDB.selectAPIVersion(API_VERSION).open()) {
db.run(tr -> {
CompletableFuture<byte[]> future = tr.get("test".getBytes());

View File

@ -19,6 +19,8 @@ import org.junit.jupiter.api.Assertions;
* This test is to verify the causal consistency of transactions for mutli-threaded client.
*/
public class SidebandMultiThreadClientTest {
public static final int API_VERSION = 720;
public static final MultiClientHelper clientHelper = new MultiClientHelper();
private static final Map<Database, BlockingQueue<String>> db2Queues = new HashMap<>();
@ -26,7 +28,7 @@ public class SidebandMultiThreadClientTest {
private static final int txnCnt = 1000;
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
for (Database db : dbs) {

View File

@ -29,6 +29,8 @@ import org.junit.jupiter.api.extension.ExtensionContext;
* are not available for any reason.
*/
public class FDBLibraryRule implements BeforeAllCallback {
public static final int CURRENT_API_VERSION = 720;
private final int apiVersion;
// because FDB is a singleton (currently), this isn't a super-useful cache,
@ -37,7 +39,7 @@ public class FDBLibraryRule implements BeforeAllCallback {
public FDBLibraryRule(int apiVersion) { this.apiVersion = apiVersion; }
public static FDBLibraryRule current() { return new FDBLibraryRule(720); }
public static FDBLibraryRule current() { return new FDBLibraryRule(CURRENT_API_VERSION); }
public static FDBLibraryRule v63() { return new FDBLibraryRule(630); }

View File

@ -52,10 +52,6 @@ public class TenantManagement {
* @param tenantName The name of the tenant. Can be any byte string that does not begin a 0xFF byte.
*/
public static void createTenant(Transaction tr, byte[] tenantName) {
if (FDB.instance().getAPIVersion() < 720) {
tr.options().setSpecialKeySpaceRelaxed();
}
tr.options().setSpecialKeySpaceEnableWrites();
tr.set(ByteArrayUtil.join(TENANT_MAP_PREFIX, tenantName), new byte[0]);
}
@ -90,7 +86,6 @@ public class TenantManagement {
final AtomicBoolean checkedExistence = new AtomicBoolean(false);
final byte[] key = ByteArrayUtil.join(TENANT_MAP_PREFIX, tenantName);
return db.runAsync(tr -> {
tr.options().setSpecialKeySpaceRelaxed();
tr.options().setSpecialKeySpaceEnableWrites();
if(checkedExistence.get()) {
tr.set(key, new byte[0]);
@ -138,10 +133,6 @@ public class TenantManagement {
* @param tenantName The name of the tenant being deleted.
*/
public static void deleteTenant(Transaction tr, byte[] tenantName) {
if (FDB.instance().getAPIVersion() < 720) {
tr.options().setSpecialKeySpaceRelaxed();
}
tr.options().setSpecialKeySpaceEnableWrites();
tr.clear(ByteArrayUtil.join(TENANT_MAP_PREFIX, tenantName));
}
@ -182,7 +173,6 @@ public class TenantManagement {
final AtomicBoolean checkedExistence = new AtomicBoolean(false);
final byte[] key = ByteArrayUtil.join(TENANT_MAP_PREFIX, tenantName);
return db.runAsync(tr -> {
tr.options().setSpecialKeySpaceRelaxed();
tr.options().setSpecialKeySpaceEnableWrites();
if(checkedExistence.get()) {
tr.clear(key);
@ -248,12 +238,7 @@ public class TenantManagement {
* and the value is the unprocessed JSON string containing the tenant's metadata
*/
public static CloseableAsyncIterator<KeyValue> listTenants(Database db, Tuple begin, Tuple end, int limit) {
Transaction tr = db.createTransaction();
if (FDB.instance().getAPIVersion() < 720) {
tr.options().setSpecialKeySpaceRelaxed();
}
return listTenants_internal(tr, begin.pack(), end.pack(), limit);
return listTenants_internal(db.createTransaction(), begin.pack(), end.pack(), limit);
}
private static CloseableAsyncIterator<KeyValue> listTenants_internal(Transaction tr, byte[] begin, byte[] end,

View File

@ -28,8 +28,10 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static final int apiVersion = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(apiVersion);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -29,11 +29,13 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.Transaction;
public class BlockingBenchmark {
public static final int API_VERSION = 720;
private static final int REPS = 100000;
private static final int PARALLEL = 100;
public static void main(String[] args) throws InterruptedException {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
// The cluster file DOES NOT need to be valid, although it must exist.
// This is because the database is never really contacted in this test.

View File

@ -30,6 +30,8 @@ import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
public class ConcurrentGetSetGet {
public static final int API_VERSION = 720;
public static final Charset UTF8 = Charset.forName("UTF-8");
final Semaphore semaphore = new Semaphore(CONCURRENCY);
@ -48,7 +50,7 @@ public class ConcurrentGetSetGet {
}
public static void main(String[] args) {
try(Database database = FDB.selectAPIVersion(720).open()) {
try(Database database = FDB.selectAPIVersion(API_VERSION).open()) {
new ConcurrentGetSetGet().apply(database);
}
}

View File

@ -25,8 +25,10 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -28,10 +28,12 @@ import com.apple.foundationdb.KeyValue;
import com.apple.foundationdb.TransactionContext;
public class IterableTest {
public static final int API_VERSION = 720;
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -32,9 +32,10 @@ import com.apple.foundationdb.async.AsyncUtil;
import com.apple.foundationdb.tuple.ByteArrayUtil;
public class LocalityTests {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database database = fdb.open(args[0])) {
try(Transaction tr = database.createTransaction()) {
String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join();

View File

@ -36,6 +36,8 @@ import com.apple.foundationdb.async.AsyncIterator;
import com.apple.foundationdb.tuple.ByteArrayUtil;
public class ParallelRandomScan {
public static final int API_VERSION = 720;
private static final int ROWS = 1000000;
private static final int DURATION_MS = 2000;
private static final int PARALLELISM_MIN = 10;
@ -43,7 +45,7 @@ public class ParallelRandomScan {
private static final int PARALLELISM_STEP = 5;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(720);
FDB api = FDB.selectAPIVersion(API_VERSION);
try(Database database = api.open(args[0])) {
for(int i = PARALLELISM_MIN; i <= PARALLELISM_MAX; i += PARALLELISM_STEP) {
runTest(database, i, ROWS, DURATION_MS);

View File

@ -29,12 +29,14 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.Transaction;
public class SerialInsertion {
public static final int API_VERSION = 720;
private static final int THREAD_COUNT = 10;
private static final int BATCH_SIZE = 1000;
private static final int NODES = 1000000;
public static void main(String[] args) {
FDB api = FDB.selectAPIVersion(720);
FDB api = FDB.selectAPIVersion(API_VERSION);
try(Database database = api.open()) {
long start = System.currentTimeMillis();

View File

@ -34,12 +34,14 @@ import com.apple.foundationdb.Transaction;
import com.apple.foundationdb.async.AsyncIterable;
public class SerialIteration {
public static final int API_VERSION = 720;
private static final int ROWS = 1000000;
private static final int RUNS = 25;
private static final int THREAD_COUNT = 1;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(720);
FDB api = FDB.selectAPIVersion(API_VERSION);
try(Database database = api.open(args[0])) {
for(int i = 1; i <= THREAD_COUNT; i++) {
runThreadedTest(database, i);

View File

@ -27,10 +27,12 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.TransactionContext;
public class SerialTest {
public static final int API_VERSION = 720;
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -35,11 +35,13 @@ import com.apple.foundationdb.tuple.Tuple;
* Some tests regarding conflict ranges to make sure they do what we expect.
*/
public class SnapshotTransactionTest {
public static final int API_VERSION = 720;
private static final int CONFLICT_CODE = 1020;
private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges"));
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
snapshotReadShouldNotConflict(db);
snapshotShouldNotAddConflictRange(db);

View File

@ -32,12 +32,14 @@ import com.apple.foundationdb.tuple.Tuple;
import com.apple.foundationdb.tuple.Versionstamp;
public class TupleTest {
public static final int API_VERSION = 720;
private static final byte FF = (byte)0xff;
public static void main(String[] args) throws NoSuchFieldException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -31,8 +31,10 @@ import com.apple.foundationdb.tuple.Tuple;
import com.apple.foundationdb.tuple.Versionstamp;
public class VersionstampSmokeTest {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
db.run(tr -> {
tr.clear(Tuple.from("prefix").range());

View File

@ -32,9 +32,10 @@ import com.apple.foundationdb.FDBException;
import com.apple.foundationdb.Transaction;
public class WatchTest {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database database = fdb.open(args[0])) {
database.options().setLocationCacheSize(42);
try(Transaction tr = database.createTransaction()) {

View File

@ -23,7 +23,6 @@
"""Documentation for this API can be found at
https://apple.github.io/foundationdb/api-python.html"""
import fdb
from fdb import impl as _impl
_tenant_map_prefix = b'\xff\xff/management/tenant/map/'
@ -53,9 +52,6 @@ def _check_tenant_existence(tr, key, existence_check_marker, force_maybe_commite
# If the existence_check_marker is a non-empty list, then the existence check is skipped.
@_impl.transactional
def _create_tenant_impl(tr, tenant_name, existence_check_marker, force_existence_check_maybe_committed=False):
if fdb._version < 720:
tr.options.set_special_key_space_relaxed()
tr.options.set_special_key_space_enable_writes()
key = b'%s%s' % (_tenant_map_prefix, tenant_name)
@ -74,9 +70,6 @@ def _create_tenant_impl(tr, tenant_name, existence_check_marker, force_existence
# If the existence_check_marker is a non-empty list, then the existence check is skipped.
@_impl.transactional
def _delete_tenant_impl(tr, tenant_name, existence_check_marker, force_existence_check_maybe_committed=False):
if fdb._version < 720:
tr.options.set_special_key_space_relaxed()
tr.options.set_special_key_space_enable_writes()
key = b'%s%s' % (_tenant_map_prefix, tenant_name)
@ -110,9 +103,6 @@ class FDBTenantList(object):
# JSON strings of the tenant metadata
@_impl.transactional
def _list_tenants_impl(tr, begin, end, limit):
if fdb._version < 720:
tr.options.set_special_key_space_relaxed()
tr.options.set_raw_access()
begin_key = b'%s%s' % (_tenant_map_prefix, begin)
end_key = b'%s%s' % (_tenant_map_prefix, end)

View File

@ -36,7 +36,7 @@ function(compile_boost)
set(B2_COMMAND "./b2")
set(BOOST_COMPILER_FLAGS -fvisibility=hidden -fPIC -std=c++17 -w)
set(BOOST_LINK_FLAGS "")
if(APPLE OR CLANG OR ICX OR USE_LIBCXX)
if(APPLE OR ICX OR USE_LIBCXX)
list(APPEND BOOST_COMPILER_FLAGS -stdlib=libc++ -nostdlib++)
list(APPEND BOOST_LINK_FLAGS -lc++ -lc++abi)
if (NOT APPLE)
@ -133,7 +133,7 @@ if(WIN32)
return()
endif()
find_package(Boost 1.78.0 EXACT QUIET COMPONENTS context filesystem CONFIG PATHS ${BOOST_HINT_PATHS})
find_package(Boost 1.78.0 EXACT QUIET COMPONENTS context filesystem iostreams CONFIG PATHS ${BOOST_HINT_PATHS})
set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores any installed boost")
if(Boost_FOUND AND Boost_filesystem_FOUND AND Boost_context_FOUND AND Boost_iostreams_FOUND AND NOT FORCE_BOOST_BUILD)

View File

@ -12,6 +12,7 @@ if (RocksDB_FOUND)
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DFAIL_ON_WARNINGS=OFF
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
@ -43,6 +44,7 @@ else()
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DFAIL_ON_WARNINGS=OFF
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF

View File

@ -290,6 +290,10 @@ else()
add_link_options(-stdlib=libc++ -Wl,-build-id=sha1)
endif()
endif()
if (NOT APPLE AND NOT USE_LIBCXX)
message(STATUS "Linking libatomic")
add_link_options(-latomic)
endif()
if (OPEN_FOR_IDE)
add_compile_options(
-Wno-unknown-attributes)
@ -307,11 +311,19 @@ else()
-Wno-unknown-warning-option
-Wno-unused-parameter
-Wno-constant-logical-operand
# These need to be disabled for FDB's RocksDB storage server implementation
-Wno-deprecated-copy
-Wno-delete-non-abstract-non-virtual-dtor
-Wno-range-loop-construct
-Wno-reorder-ctor
# Needed for clang 13 (todo: Update above logic so that it figures out when to pass in -static-libstdc++ and when it will be ignored)
# When you remove this, you might need to move it back to the USE_CCACHE stanza. It was (only) there before I moved it here.
-Wno-unused-command-line-argument
)
if (USE_CCACHE)
add_compile_options(
-Wno-register
-Wno-unused-command-line-argument)
)
endif()
if (PROFILE_INSTR_GENERATE)
add_compile_options(-fprofile-instr-generate)

View File

@ -178,7 +178,7 @@ set(PORTABLE_ROCKSDB ON CACHE BOOL "Compile RocksDB in portable mode") # Set thi
set(WITH_LIBURING OFF CACHE BOOL "Build with liburing enabled") # Set this to ON to include liburing
# RocksDB is currently enabled by default for GCC but does not build with the latest
# Clang.
if (SSD_ROCKSDB_EXPERIMENTAL AND GCC)
if (SSD_ROCKSDB_EXPERIMENTAL AND NOT WIN32)
set(WITH_ROCKSDB_EXPERIMENTAL ON)
else()
set(WITH_ROCKSDB_EXPERIMENTAL OFF)
@ -200,6 +200,9 @@ else()
URL "https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz"
URL_HASH SHA256=bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_C_COMPILER:FILEPATH=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER:FILEPATH=${CMAKE_CXX_COMPILER}
-DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_CURRENT_BINARY_DIR}/toml11
-Dtoml11_BUILD_TEST:BOOL=OFF
BUILD_ALWAYS ON)

View File

@ -14,7 +14,7 @@ ExternalProject_add(Jemalloc_project
BUILD_BYPRODUCTS "${JEMALLOC_DIR}/include/jemalloc/jemalloc.h"
"${JEMALLOC_DIR}/lib/libjemalloc.a"
"${JEMALLOC_DIR}/lib/libjemalloc_pic.a"
CONFIGURE_COMMAND ./configure --prefix=${JEMALLOC_DIR} --enable-static --disable-cxx --enable-prof
CONFIGURE_COMMAND CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} ./configure --prefix=${JEMALLOC_DIR} --enable-static --disable-cxx --enable-prof
BUILD_IN_SOURCE ON
BUILD_COMMAND make
INSTALL_DIR "${JEMALLOC_DIR}"
@ -24,4 +24,4 @@ add_dependencies(im_jemalloc_pic Jemalloc_project)
set_target_properties(im_jemalloc_pic PROPERTIES IMPORTED_LOCATION "${JEMALLOC_DIR}/lib/libjemalloc_pic.a")
set_target_properties(im_jemalloc PROPERTIES IMPORTED_LOCATION "${JEMALLOC_DIR}/lib/libjemalloc.a")
target_include_directories(jemalloc INTERFACE "${JEMALLOC_DIR}/include")
target_link_libraries(jemalloc INTERFACE im_jemalloc_pic im_jemalloc)
target_link_libraries(jemalloc INTERFACE im_jemalloc_pic im_jemalloc)

View File

@ -139,6 +139,9 @@ class Config:
self.max_errors_args = {'short_name': 'E'}
self.old_binaries_path: Path = Path('/app/deploy/global_data/oldBinaries/')
self.old_binaries_path_args = {'help': 'Path to the directory containing the old fdb binaries'}
self.tls_plugin_path: Path = Path('/app/deploy/runtime/.tls_5_1/FDBLibTLS.so')
self.tls_plugin_path_args = {'help': 'Path to the tls plugin used for binaries < 5.2.0'}
self.disable_kaio: bool = False
self.use_valgrind: bool = False
self.use_valgrind_args = {'action': 'store_true'}
self.buggify = BuggifyOption('random')

View File

@ -18,7 +18,7 @@ from functools import total_ordering
from pathlib import Path
from test_harness.version import Version
from test_harness.config import config
from typing import List, Pattern, OrderedDict
from typing import Dict, List, Pattern, OrderedDict
from test_harness.summarize import Summary, SummaryTree
@ -309,6 +309,7 @@ class TestRun:
self.trace_format: str | None = config.trace_format
if Version.of_binary(self.binary) < "6.1.0":
self.trace_format = None
self.use_tls_plugin = Version.of_binary(self.binary) < "5.2.0"
self.temp_path = config.run_dir / str(self.uid)
# state for the run
self.retryable_error: bool = False
@ -332,6 +333,7 @@ class TestRun:
def run(self):
command: List[str] = []
env: Dict[str, str] = os.environ.copy()
valgrind_file: Path | None = None
if self.use_valgrind:
command.append('valgrind')
@ -346,6 +348,11 @@ class TestRun:
'-s', str(self.random_seed)]
if self.trace_format is not None:
command += ['--trace_format', self.trace_format]
if self.use_tls_plugin:
command += ['--tls_plugin', str(config.tls_plugin_path)]
env["FDB_TLS_PLUGIN"] = str(config.tls_plugin_path)
if config.disable_kaio:
command += ['--knob-disable-posix-kernel-aio=1']
if Version.of_binary(self.binary) >= '7.1.0':
command += ['-fi', 'on' if self.fault_injection_enabled else 'off']
if self.restarting:
@ -361,7 +368,7 @@ class TestRun:
resources = ResourceMonitor()
resources.start()
process = subprocess.Popen(command, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, cwd=self.temp_path,
text=True)
text=True, env=env)
did_kill = False
timeout = 20 * config.kill_seconds if self.use_valgrind else config.kill_seconds
err_out: str

View File

@ -32,10 +32,10 @@ public:
explicit SKRExampleImpl(KeyRangeRef kr): SpecialKeyRangeReadImpl(kr) {
// Our implementation is quite simple here, the key-value pairs are formatted as:
// \xff\xff/example/<country_name> : <capital_city_name>
CountryToCapitalCity[LiteralStringRef("USA")] = LiteralStringRef("Washington, D.C.");
CountryToCapitalCity[LiteralStringRef("UK")] = LiteralStringRef("London");
CountryToCapitalCity[LiteralStringRef("Japan")] = LiteralStringRef("Tokyo");
CountryToCapitalCity[LiteralStringRef("China")] = LiteralStringRef("Beijing");
CountryToCapitalCity["USA"_sr] = "Washington, D.C."_sr;
CountryToCapitalCity["UK"_sr] = "London"_sr;
CountryToCapitalCity["Japan"_sr] = "Tokyo"_sr;
CountryToCapitalCity["China"_sr] = "Beijing"_sr;
}
// Implement the getRange interface
Future<RangeResult> getRange(ReadYourWritesTransaction* ryw,
@ -58,7 +58,7 @@ private:
};
// Instantiate the function object
// In development, you should have a function object pointer in DatabaseContext(DatabaseContext.h) and initialize in DatabaseContext's constructor(NativeAPI.actor.cpp)
const KeyRangeRef exampleRange(LiteralStringRef("\xff\xff/example/"), LiteralStringRef("\xff\xff/example/\xff"));
const KeyRangeRef exampleRange("\xff\xff/example/"_sr, "\xff\xff/example/\xff"_sr);
SKRExampleImpl exampleImpl(exampleRange);
// Assuming the database handler is `cx`, register to special-key-space
// In development, you should register all function objects in the constructor of DatabaseContext(NativeAPI.actor.cpp)
@ -67,16 +67,16 @@ cx->specialKeySpace->registerKeyRange(exampleRange, &exampleImpl);
state ReadYourWritesTransaction tr(cx);
// get
Optional<Value> res1 = wait(tr.get("\xff\xff/example/Japan"));
ASSERT(res1.present() && res.getValue() == LiteralStringRef("Tokyo"));
ASSERT(res1.present() && res.getValue() == "Tokyo"_sr);
// getRange
// Note: for getRange(key1, key2), both key1 and key2 should prefixed with \xff\xff
// something like getRange("normal_key", "\xff\xff/...") is not supported yet
RangeResult res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff")));
RangeResult res2 = wait(tr.getRange("\xff\xff/example/U"_sr, "\xff\xff/example/U\xff"_sr));
// res2 should contain USA and UK
ASSERT(
res2.size() == 2 &&
res2[0].value == LiteralStringRef("London") &&
res2[1].value == LiteralStringRef("Washington, D.C.")
res2[0].value == "London"_sr &&
res2[1].value == "Washington, D.C."_sr
);
```

View File

@ -12,6 +12,7 @@ Administration
configuration
moving-a-cluster
tls
authorization
This document covers the administration of an existing FoundationDB cluster. We recommend you read this document before setting up a cluster for performance testing or production use.

View File

@ -0,0 +1,124 @@
#############
Authorization
#############
.. warning :: Authorization is currently experimental and is not recommended for use in production.
Introduction
============
:ref:`Multi-tenant <multi-tenancy>` database implies a couple of new concepts that did not previously exist in FoundationDB.
The first is the concept of privilege levels: we have *data-plane clients* whose typical workload is limited to accessing a tenant keyspace.
On the other hand, we have *control-plane clients* or *administrators* who may read or update cluster-wide configurations through system keyspace.
These operations also include creation and deletion of tenants.
The second is access control: with multiple tenant keyspaces, it comes naturally that we would want to restrict database access of a client to a subset of them.
Privilege Levels
----------------
Authorization feature extends FoundationDB's existing TLS policy to distinguish administrators from data-plane clients,
making TLS configuration a prerequisite for enabling authorization.
There are only two privilege levels: *trusted* versus *untrusted* clients.
Trusted clients are authorized to perform any operation that pre-authorization FoundationDB clients used to perform, including those accessing the system keyspace.
Untrusted clients may only request what is necessary to access tenant keyspaces for which they are authorized.
Untrusted clients are blocked from accessing anything in the system keyspace or issuing management operations that modify the cluster in any way.
In order to be considered a trusted client, a client needs to be :ref:`configured with a valid chain of X.509 certificates and a private key <configuring-tls>`,
and its certificate chain must be trusted by the server. In other words, a client must successfully complete a mutual TLS authentication.
Additionally, if the server was configured with trusted IP subnets, i.e. run with one or more ``--trusted-subnet-SUBNET_NAME`` followed by a CIDR block describing the subnet,
then the client's IP as seen from the server must belong to at least one of the subnets.
Choosing to respond with an empty certificate chain during `client authentication <https://www.rfc-editor.org/rfc/rfc5246#section-7.4.6>`_ marks the client as untrusted.
If the server specifies a list of trusted subnets and the client's server-facing IP is not part of any of the subnets,
then the client is untrusted even if it successfully completes a mutual TLS authentication.
.. note:: Presenting a bad or untrusted certificate chain causes the server to break the client connection and eventually throttle the client.
It does not let the client connect untrusted.
Access Control
--------------
To restrict untrusted client's database access to a subset of tenant keyspaces, authorization feature allows database administrators
to grant tenant-scoped access in the form of `JSON Web Tokens <https://www.rfc-editor.org/rfc/rfc7519>`_.
Token verification is performed against a set of named public keys written in `JWK Set <https://www.rfc-editor.org/rfc/rfc7517#section-5>`_ format.
A token's header part must contain the `key identifier <https://www.rfc-editor.org/rfc/rfc7515.html#section-4.1.4>`_ of the public key which shall be used to verify the token itself.
Below is the list of token fields recognized by FoundationDB.
Note that some of the fields are *recognized* by FoundationDB but not *actively used* in enforcing security, pending future implementation.
Those fields are marked as **NOT required**.
.. table:: JSON Web Token Fields supported by FoundationDB
:align: left
:widths: auto
=============== =========== ======== ==================================================== ================================================================================
Containing Part Field Name Required Purpose Reference
=============== =========== ======== ==================================================== ================================================================================
Header ``typ`` Yes Type of JSON Web Signature. Must be ``JWT``. `RFC7519 Section 5.1 <https://www.rfc-editor.org/rfc/rfc7519#section-5.1>`_
Header ``alg`` Yes Algorithm used to generate the signature. Only `RFC7515 Section 4.1.1 <https://www.rfc-editor.org/rfc/rfc7515#section-4.1.1>`_
``ES256`` and ``RS256`` are supported.
Must match the ``alg`` attribute of public key.
Header ``kid`` Yes Name of public key with which to verify the token. `RFC7515 Section 4.1.4 <https://www.rfc-editor.org/rfc/rfc7515#section-4.1.4>`_
Must match the ``kid`` attribute of public key.
Claim ``exp`` Yes Timestamp after which token is not accepted. `RFC7519 Section 4.1.4 <https://www.rfc-editor.org/rfc/rfc7519#section-4.1.4>`_
Claim ``nbf`` Yes Timestamp before which token is not accepted. `RFC7519 Section 4.1.5 <https://www.rfc-editor.org/rfc/rfc7519#section-4.1.5>`_
Claim ``iat`` Yes Timestamp at which token was issued. `RFC7519 Section 4.1.6 <https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6>`_
Claim ``tenants`` Yes Tenants names for which token holder is authorized. N/A
Must be an array.
Claim ``iss`` No Issuer of the token. `RFC7519 Section 4.1.1 <https://www.rfc-editor.org/rfc/rfc7519#section-4.1.1>`_
Claim ``sub`` No Subject of the token. `RFC7519 Section 4.1.2 <https://www.rfc-editor.org/rfc/rfc7519#section-4.1.2>`_
Claim ``aud`` No Intended recipients of the token. Must be an array. `RFC7519 Section 4.1.3 <https://www.rfc-editor.org/rfc/rfc7519#section-4.1.3>`_
Claim ``jti`` No String that uniquely identifies a token. `RFC7519 Section 4.1.7 <https://www.rfc-editor.org/rfc/rfc7519#section-4.1.7>`_
=============== =========== ======== ==================================================== ================================================================================
Public keys with which to verify the token must be serialized in `JWK Set <https://www.rfc-editor.org/rfc/rfc7517#section-5>`_ format and stored in a file.
The location of the key set file must be passed as command line argument ``--authorization-public-key-file`` to the ``fdbserver`` executable.
Public keys in the set must be either `RSA <https://datatracker.ietf.org/doc/html/rfc7518#section-6.3>`_ public keys
containing ``n`` and ``e`` parameters, each containing `Base64urlUInt <https://www.rfc-editor.org/rfc/rfc7518#section-2>`_-encoded modulus and exponent,
or `Elliptic Curve <https://datatracker.ietf.org/doc/html/rfc7518#section-6.2>`_ public keys on a ``P-256`` curve,
where ``crv`` parameter is set to ``P-256`` and ``x`` and ``y`` parameters contain
`base64url <https://datatracker.ietf.org/doc/html/rfc4648#section-5>`_-encoded affine coordinates.
In addition, each public key JSON object in set must contain ``kty`` (set to either ``EC`` or ``RSA``) field to indicate public key algorithm,
along with ``kid``, and ``alg`` fields to be compared against their token header counterparts.
Private keys are strongly recommended against being included in the public key set and, if found, are excluded from consideration.
.. note:: By design, FoundationDB authorization feature does not support revocation of outstanding tokens.
Use extra caution in signing tokens with long token durations.
Enabling Clients to use Authorization Tokens
============================================
In order to use an untrusted client with an authorization token, a client must be configured to trust the server's CA,
but must not be configured to use the client's own certificates and keys.
More concretely, the client's ``TLS_CA_FILE`` must include the server's root CA certificate,
but the client must not be configured with its own ``TLS_CERTIFICATE_FILE`` or ``TLS_KEY_FILE``, neither programmatically nor by environment variable.
Before performing a tenant data read or update, a client must set ``AUTHORIZATION_TOKEN`` transaction option with the token string as argument.
It is the client's responsibility to keep the token up-to-date, by timely assigning a new token to the transaction object.
.. note:: The TLS authentication mode of an untrusted client is similar to how typical web browsers connect to TLS-enabled web services.
They authenticate the server using their bundle of trusted root CA certificates,
but they do not authenticate themselves to the server.
Public Key Rotation
===================
FoundationDB's internal public key set automatically refreshes itself based on the key set file's latest content every ``PUBLIC_KEY_FILE_REFRESH_INTERVAL_SECONDS`` seconds.
The in-memory set of public keys does not update unless the key file holds a correct `JWK Set`_.
Token Caching
=============
In a single-threaded runtime environment such as FoundationDB, it is important not to let the main thread be overloaded with computationally expensive operations,
such as token signature verification. FoundationDB internally caches the tokens that are considered valid at the time of verification in a fixed-size cache,
whose size may be configured using ``TOKEN_CACHE_SIZE`` knob.
.. note:: Token cache is independent of the active public key set. Once the token reaches the cache, it is valid until its expiration time,
regardless of any key rotation that takes place thereafter.
Allowing Untrusted Clients to Access Tenant Data Without Tokens
===============================================================
Rolling out a public key distribution infrastructure and an authorization-enabled FoundationDB cluster in lockstep might not be feasible with large scale distributed systems.
To support incremental rollout, authorization feature introduces ``ALLOW_TOKENLESS_TENANT_ACCESS`` boolean knob,
which preserves the TLS-based privilege level policy without untrusted clients having to set authorization tokens to their transactions in order to access tenant data.
With this knob active, any authorization token assigned to the client transaction is simply ignored.

View File

@ -82,7 +82,7 @@ Values must always be encoded according to the :ref:`api-python-tuple-layer`.
// In GlobalConfig.actor.h
extern const KeyRef myGlobalConfigKey;
// In GlobalConfig.actor.cpp
const KeyRef myGlobalConfigKey = LiteralStringRef("config/key");
const KeyRef myGlobalConfigKey = "config/key"_sr;
// When you want to set the value..
Tuple value = Tuple::makeTuple((double)1.5);

View File

@ -2,6 +2,21 @@
Release Notes
#############
7.1.23
======
* Same as 7.1.22 release with AVX enabled.
7.1.22
======
* Released with AVX disabled.
* Added new latency samples for GetValue, GetRange, QueueWait, and VersionWait in storage servers. `(PR #8215) <https://github.com/apple/foundationdb/pull/8215>`_
* Fixed a rare partial data write for TLogs. `(PR #8210) <https://github.com/apple/foundationdb/pull/8210>`_
* Added HTTP proxy support for backup agents. `(PR #8193) <https://github.com/apple/foundationdb/pull/8193>`_
* Fixed a memory bug of secondary queries in index prefetch. `(PR #8195) <https://github.com/apple/foundationdb/pull/8195>`_, `(PR #8190) <https://github.com/apple/foundationdb/pull/8190>`_
* Introduced STORAGE_SERVER_REBOOT_ON_IO_TIMEOUT knob to recreate SS at io_timeout errors. `(PR #8123) <https://github.com/apple/foundationdb/pull/8123>`_
* Fixed two TLog stopped bugs and a CC leader replacement bug. `(PR #8081) <https://github.com/apple/foundationdb/pull/8081>`_
* Added back RecoveryAvailable trace event for status's seconds_since_last_recovered field. `(PR #8068) <https://github.com/apple/foundationdb/pull/8068>`_
7.1.21
======
* Same as 7.1.20 release with AVX enabled.

View File

@ -2,6 +2,8 @@
Tenants
#######
.. _multi-tenancy:
.. warning :: Tenants are currently experimental and are not recommended for use in production.
FoundationDB provides a feature called tenants that allow you to configure one or more named transaction domains in your cluster. A transaction domain is a key-space in which a transaction is allowed to operate, and no tenant operations are allowed to use keys outside the tenant key-space. Tenants can be useful for managing separate, unrelated use-cases and preventing them from interfering with each other. They can also be helpful for defining safe boundaries when moving a subset of data between clusters.

View File

@ -478,7 +478,7 @@ ACTOR Future<Void> fdbClient() {
state Transaction tx(db);
state std::string keyPrefix = "/tut/";
state Key startKey;
state KeyRef endKey = LiteralStringRef("/tut0");
state KeyRef endKey = "/tut0"_sr;
state int beginIdx = 0;
loop {
try {
@ -494,7 +494,7 @@ ACTOR Future<Void> fdbClient() {
RangeResult range = wait(tx.getRange(KeyRangeRef(startKey, endKey), 100));
for (int i = 0; i < 10; ++i) {
Key k = Key(keyPrefix + std::to_string(beginIdx + deterministicRandom()->randomInt(0, 100)));
tx.set(k, LiteralStringRef("foo"));
tx.set(k, "foo"_sr);
}
wait(tx.commit());
std::cout << "Committed\n";

View File

@ -905,12 +905,12 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
SO_END_OF_OPTIONS
};
const KeyRef exeAgent = LiteralStringRef("backup_agent");
const KeyRef exeBackup = LiteralStringRef("fdbbackup");
const KeyRef exeRestore = LiteralStringRef("fdbrestore");
const KeyRef exeFastRestoreTool = LiteralStringRef("fastrestore_tool"); // must be lower case
const KeyRef exeDatabaseAgent = LiteralStringRef("dr_agent");
const KeyRef exeDatabaseBackup = LiteralStringRef("fdbdr");
const KeyRef exeAgent = "backup_agent"_sr;
const KeyRef exeBackup = "fdbbackup"_sr;
const KeyRef exeRestore = "fdbrestore"_sr;
const KeyRef exeFastRestoreTool = "fastrestore_tool"_sr; // must be lower case
const KeyRef exeDatabaseAgent = "dr_agent"_sr;
const KeyRef exeDatabaseBackup = "fdbdr"_sr;
extern const char* getSourceVersion();
@ -1351,7 +1351,7 @@ ProgramExe getProgramType(std::string programExe) {
}
#endif
// For debugging convenience, remove .debug suffix if present.
if (StringRef(programExe).endsWith(LiteralStringRef(".debug")))
if (StringRef(programExe).endsWith(".debug"_sr))
programExe = programExe.substr(0, programExe.size() - 6);
// Check if backup agent
@ -2449,8 +2449,8 @@ ACTOR Future<Void> runFastRestoreTool(Database db,
dbVersion,
LockDB::True,
randomUID,
LiteralStringRef(""),
LiteralStringRef("")));
""_sr,
""_sr));
// TODO: Support addPrefix and removePrefix
if (waitForDone) {
// Wait for parallel restore to finish and unlock DB after that
@ -3089,7 +3089,7 @@ static void addKeyRange(std::string optionValue, Standalone<VectorRef<KeyRangeRe
Version parseVersion(const char* str) {
StringRef s((const uint8_t*)str, strlen(str));
if (s.endsWith(LiteralStringRef("days")) || s.endsWith(LiteralStringRef("d"))) {
if (s.endsWith("days"_sr) || s.endsWith("d"_sr)) {
float days;
if (sscanf(str, "%f", &days) != 1) {
fprintf(stderr, "Could not parse version: %s\n", str);
@ -3608,7 +3608,7 @@ int main(int argc, char* argv[]) {
case OPT_DESTCONTAINER:
destinationContainer = args->OptionArg();
// If the url starts with '/' then prepend "file://" for backwards compatibility
if (StringRef(destinationContainer).startsWith(LiteralStringRef("/")))
if (StringRef(destinationContainer).startsWith("/"_sr))
destinationContainer = std::string("file://") + destinationContainer;
modifyOptions.destURL = destinationContainer;
break;
@ -3654,7 +3654,7 @@ int main(int argc, char* argv[]) {
case OPT_RESTORECONTAINER:
restoreContainer = args->OptionArg();
// If the url starts with '/' then prepend "file://" for backwards compatibility
if (StringRef(restoreContainer).startsWith(LiteralStringRef("/")))
if (StringRef(restoreContainer).startsWith("/"_sr))
restoreContainer = std::string("file://") + restoreContainer;
break;
case OPT_DESCRIBE_DEEP:
@ -4323,19 +4323,19 @@ int main(int argc, char* argv[]) {
char* demangled = abi::__cxa_demangle(i->first, NULL, NULL, NULL);
if (demangled) {
s = demangled;
if (StringRef(s).startsWith(LiteralStringRef("(anonymous namespace)::")))
s = s.substr(LiteralStringRef("(anonymous namespace)::").size());
if (StringRef(s).startsWith("(anonymous namespace)::"_sr))
s = s.substr("(anonymous namespace)::"_sr.size());
free(demangled);
} else
s = i->first;
#else
s = i->first;
if (StringRef(s).startsWith(LiteralStringRef("class `anonymous namespace'::")))
s = s.substr(LiteralStringRef("class `anonymous namespace'::").size());
else if (StringRef(s).startsWith(LiteralStringRef("class ")))
s = s.substr(LiteralStringRef("class ").size());
else if (StringRef(s).startsWith(LiteralStringRef("struct ")))
s = s.substr(LiteralStringRef("struct ").size());
if (StringRef(s).startsWith("class `anonymous namespace'::"_sr))
s = s.substr("class `anonymous namespace'::"_sr.size());
else if (StringRef(s).startsWith("class "_sr))
s = s.substr("class "_sr.size());
else if (StringRef(s).startsWith("struct "_sr))
s = s.substr("struct "_sr.size());
#endif
typeNames.emplace_back(s, i->first);

View File

@ -31,7 +31,7 @@
namespace fdb_cli {
const KeyRef advanceVersionSpecialKey = LiteralStringRef("\xff\xff/management/min_required_commit_version");
const KeyRef advanceVersionSpecialKey = "\xff\xff/management/min_required_commit_version"_sr;
ACTOR Future<bool> advanceVersionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 2) {

View File

@ -112,7 +112,7 @@ ACTOR Future<bool> blobRangeCommandActor(Database localDb,
end = tokens[3];
}
if (end > LiteralStringRef("\xff")) {
if (end > "\xff"_sr) {
// TODO is this something we want?
fmt::print("Cannot blobbify system keyspace! Problematic End Key: {0}\n", tokens[3].printable());
return false;

View File

@ -44,20 +44,20 @@ ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
if (tokens.size() < 2)
result = ConfigurationResult::NO_OPTIONS_PROVIDED;
else {
if (tokens[startToken] == LiteralStringRef("FORCE")) {
if (tokens[startToken] == "FORCE"_sr) {
force = true;
startToken = 2;
}
state Optional<ConfigureAutoResult> conf;
if (tokens[startToken] == LiteralStringRef("auto")) {
if (tokens[startToken] == "auto"_sr) {
// get cluster status
state Reference<ITransaction> tr = db->createTransaction();
if (!tr->isValid()) {
StatusObject _s = wait(StatusClient::statusFetcher(localDb));
s = _s;
} else {
state ThreadFuture<Optional<Value>> statusValueF = tr->get(LiteralStringRef("\xff\xff/status/json"));
state ThreadFuture<Optional<Value>> statusValueF = tr->get("\xff\xff/status/json"_sr);
Optional<Value> statusValue = wait(safeThreadFutureToFuture(statusValueF));
if (!statusValue.present()) {
fprintf(stderr, "ERROR: Failed to get status json from the cluster\n");
@ -166,7 +166,7 @@ ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
case ConfigurationResult::CONFLICTING_OPTIONS:
case ConfigurationResult::UNKNOWN_OPTION:
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
printUsage(LiteralStringRef("configure"));
printUsage("configure"_sr);
ret = false;
break;
case ConfigurationResult::INVALID_CONFIGURATION:
@ -259,7 +259,6 @@ ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
fprintf(stderr,
"Type `configure perpetual_storage_wiggle=1' to enable the perpetual wiggle, or `configure "
"storage_migration_type=gradual' to set the gradual migration type.\n");
ret = false;
break;
case ConfigurationResult::SUCCESS_WARN_ROCKSDB_EXPERIMENTAL:
printf("Configuration changed\n");

View File

@ -30,7 +30,7 @@
namespace fdb_cli {
const KeyRef consistencyCheckSpecialKey = LiteralStringRef("\xff\xff/management/consistency_check_suspended");
const KeyRef consistencyCheckSpecialKey = "\xff\xff/management/consistency_check_suspended"_sr;
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
std::vector<StringRef> tokens,

View File

@ -0,0 +1,122 @@
/*
* ConsistencyScanCommand.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/IClientApi.h"
#include "flow/Arena.h"
#include "flow/FastRef.h"
#include "flow/ThreadHelper.actor.h"
#include "fdbclient/ConsistencyScanInterface.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace fdb_cli {
ACTOR Future<bool> consistencyScanCommandActor(Database db, std::vector<StringRef> tokens) {
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(db);
// Here we do not proceed in a try-catch loop since the transaction is always supposed to succeed.
// If not, the outer loop catch block(fdbcli.actor.cpp) will handle the error and print out the error message
state int usageError = 0;
state ConsistencyScanInfo csInfo = ConsistencyScanInfo();
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
// Get the exisiting consistencyScanInfo object if present
state Optional<Value> consistencyScanInfo = wait(ConsistencyScanInfo::getInfo(tr));
wait(tr->commit());
if (consistencyScanInfo.present())
csInfo = ObjectReader::fromStringRef<ConsistencyScanInfo>(consistencyScanInfo.get(), IncludeVersion());
tr->reset();
if (tokens.size() == 1) {
printf("Consistency Scan Info: %s\n", csInfo.toString().c_str());
} else if ((tokens.size() == 2) && tokencmp(tokens[1], "off")) {
csInfo.consistency_scan_enabled = false;
wait(ConsistencyScanInfo::setInfo(tr, csInfo));
wait(tr->commit());
} else if ((tokencmp(tokens[1], "on") && tokens.size() > 2)) {
csInfo.consistency_scan_enabled = true;
state std::vector<StringRef>::iterator t;
for (t = tokens.begin() + 2; t != tokens.end(); ++t) {
if (tokencmp(t->toString(), "restart")) {
if (++t != tokens.end()) {
if (tokencmp(t->toString(), "0")) {
csInfo.restart = false;
} else if (tokencmp(t->toString(), "1")) {
csInfo.restart = true;
} else {
usageError = 1;
}
} else {
usageError = 1;
}
} else if (tokencmp(t->toString(), "maxRate")) {
if (++t != tokens.end()) {
char* end;
csInfo.max_rate = std::strtod(t->toString().data(), &end);
if (!std::isspace(*end) && (*end != '\0')) {
fprintf(stderr, "ERROR: %s failed to parse.\n", t->toString().c_str());
return false;
}
} else {
usageError = 1;
}
} else if (tokencmp(t->toString(), "targetInterval")) {
if (++t != tokens.end()) {
char* end;
csInfo.target_interval = std::strtod(t->toString().data(), &end);
if (!std::isspace(*end) && (*end != '\0')) {
fprintf(stderr, "ERROR: %s failed to parse.\n", t->toString().c_str());
return false;
}
} else {
usageError = 1;
}
} else {
usageError = 1;
}
}
if (!usageError) {
wait(ConsistencyScanInfo::setInfo(tr, csInfo));
wait(tr->commit());
}
} else {
usageError = 1;
}
if (usageError) {
printUsage(tokens[0]);
return false;
}
return true;
}
CommandFactory consistencyScanFactory(
"consistencyscan",
CommandHelp("consistencyscan <on|off> <restart 0|1> <maxRate val> <targetInterval val>",
"enables or disables consistency scan",
"Calling this command with `on' enables the consistency scan process to run the scan with given "
"arguments and `off' will halt the scan. "
"Calling this command with no arguments will display if consistency scan is currently enabled.\n"));
} // namespace fdb_cli

View File

@ -64,17 +64,26 @@ ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<Strin
state int notEnoughMachineResults = 0;
state StringRef new_cluster_description;
state std::string auto_coordinators_str;
StringRef nameTokenBegin = LiteralStringRef("description=");
state bool disableConfigDB = false;
StringRef nameTokenBegin = "description="_sr;
StringRef noConfigDB = "--no-config-db"_sr;
for (auto tok = tokens.begin() + 1; tok != tokens.end(); ++tok) {
if (tok->startsWith(nameTokenBegin)) {
if (tok->startsWith(nameTokenBegin) && new_cluster_description.empty()) {
new_cluster_description = tok->substr(nameTokenBegin.size());
auto next = tok - 1;
std::copy(tok + 1, tokens.end(), tok);
tokens.resize(tokens.size() - 1);
break;
tok = next;
} else if (tok->startsWith(noConfigDB)) {
disableConfigDB = true;
auto next = tok - 1;
std::copy(tok + 1, tokens.end(), tok);
tokens.resize(tokens.size() - 1);
tok = next;
}
}
state bool automatic = tokens.size() == 2 && tokens[1] == LiteralStringRef("auto");
state bool automatic = tokens.size() == 2 && tokens[1] == "auto"_sr;
state Reference<ITransaction> tr = db->createTransaction();
loop {
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
@ -83,6 +92,10 @@ ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<Strin
if (new_cluster_description.size()) {
tr->set(fdb_cli::clusterDescriptionSpecialKey, new_cluster_description);
}
if (disableConfigDB) {
// All that matters is the key is set.
tr->set(fdb_cli::configDBSpecialKey, ""_sr);
}
// if auto change, read the special key to retrieve the recommended config
if (automatic) {
// if previous read failed, retry, otherwise, use the same recommened config
@ -173,9 +186,10 @@ ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<Strin
namespace fdb_cli {
const KeyRef clusterDescriptionSpecialKey = LiteralStringRef("\xff\xff/configuration/coordinators/cluster_description");
const KeyRef coordinatorsAutoSpecialKey = LiteralStringRef("\xff\xff/management/auto_coordinators");
const KeyRef coordinatorsProcessSpecialKey = LiteralStringRef("\xff\xff/configuration/coordinators/processes");
const KeyRef clusterDescriptionSpecialKey = "\xff\xff/configuration/coordinators/cluster_description"_sr;
const KeyRef configDBSpecialKey = "\xff\xff/configuration/coordinators/config_db"_sr;
const KeyRef coordinatorsAutoSpecialKey = "\xff\xff/management/auto_coordinators"_sr;
const KeyRef coordinatorsProcessSpecialKey = "\xff\xff/configuration/coordinators/processes"_sr;
ACTOR Future<bool> coordinatorsCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 2) {

View File

@ -108,8 +108,8 @@ Future<Void> setDDIgnoreRebalanceOff(Reference<IDatabase> db, uint8_t DDIgnoreOp
namespace fdb_cli {
const KeyRef ddModeSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/mode");
const KeyRef ddIgnoreRebalanceSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/rebalance_ignored");
const KeyRef ddModeSpecialKey = "\xff\xff/management/data_distribution/mode"_sr;
const KeyRef ddIgnoreRebalanceSpecialKey = "\xff\xff/management/data_distribution/rebalance_ignored"_sr;
constexpr auto usage =
"Usage: datadistribution <on|off|disable <ssfailure|rebalance|rebalance_disk|rebalance_read>|enable "
"<ssfailure|rebalance|rebalance_disk|rebalance_read>>\n";
@ -127,7 +127,7 @@ ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::ve
printf("Data distribution is turned off.\n");
} else if (tokencmp(tokens[1], "disable")) {
if (tokencmp(tokens[2], "ssfailure")) {
wait(success((setHealthyZone(db, LiteralStringRef("IgnoreSSFailures"), 0))));
wait(success((setHealthyZone(db, "IgnoreSSFailures"_sr, 0))));
printf("Data distribution is disabled for storage server failures.\n");
} else if (tokencmp(tokens[2], "rebalance")) {
wait(setDDIgnoreRebalanceOn(db, DDIgnore::REBALANCE_DISK | DDIgnore::REBALANCE_READ));

View File

@ -227,22 +227,19 @@ ACTOR Future<Void> checkForCoordinators(Reference<IDatabase> db, std::vector<Add
namespace fdb_cli {
const KeyRangeRef excludedServersSpecialKeyRange(LiteralStringRef("\xff\xff/management/excluded/"),
LiteralStringRef("\xff\xff/management/excluded0"));
const KeyRangeRef failedServersSpecialKeyRange(LiteralStringRef("\xff\xff/management/failed/"),
LiteralStringRef("\xff\xff/management/failed0"));
const KeyRangeRef excludedLocalitySpecialKeyRange(LiteralStringRef("\xff\xff/management/excluded_locality/"),
LiteralStringRef("\xff\xff/management/excluded_locality0"));
const KeyRangeRef failedLocalitySpecialKeyRange(LiteralStringRef("\xff\xff/management/failed_locality/"),
LiteralStringRef("\xff\xff/management/failed_locality0"));
const KeyRef excludedForceOptionSpecialKey = LiteralStringRef("\xff\xff/management/options/excluded/force");
const KeyRef failedForceOptionSpecialKey = LiteralStringRef("\xff\xff/management/options/failed/force");
const KeyRef excludedLocalityForceOptionSpecialKey =
LiteralStringRef("\xff\xff/management/options/excluded_locality/force");
const KeyRef failedLocalityForceOptionSpecialKey =
LiteralStringRef("\xff\xff/management/options/failed_locality/force");
const KeyRangeRef exclusionInProgressSpecialKeyRange(LiteralStringRef("\xff\xff/management/in_progress_exclusion/"),
LiteralStringRef("\xff\xff/management/in_progress_exclusion0"));
const KeyRangeRef excludedServersSpecialKeyRange("\xff\xff/management/excluded/"_sr,
"\xff\xff/management/excluded0"_sr);
const KeyRangeRef failedServersSpecialKeyRange("\xff\xff/management/failed/"_sr, "\xff\xff/management/failed0"_sr);
const KeyRangeRef excludedLocalitySpecialKeyRange("\xff\xff/management/excluded_locality/"_sr,
"\xff\xff/management/excluded_locality0"_sr);
const KeyRangeRef failedLocalitySpecialKeyRange("\xff\xff/management/failed_locality/"_sr,
"\xff\xff/management/failed_locality0"_sr);
const KeyRef excludedForceOptionSpecialKey = "\xff\xff/management/options/excluded/force"_sr;
const KeyRef failedForceOptionSpecialKey = "\xff\xff/management/options/failed/force"_sr;
const KeyRef excludedLocalityForceOptionSpecialKey = "\xff\xff/management/options/excluded_locality/force"_sr;
const KeyRef failedLocalityForceOptionSpecialKey = "\xff\xff/management/options/failed_locality/force"_sr;
const KeyRangeRef exclusionInProgressSpecialKeyRange("\xff\xff/management/in_progress_exclusion/"_sr,
"\xff\xff/management/in_progress_exclusion0"_sr);
ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens, Future<Void> warn) {
if (tokens.size() <= 1) {
@ -281,11 +278,11 @@ ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<Stri
if (!result)
return false;
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
if (*t == LiteralStringRef("FORCE")) {
if (*t == "FORCE"_sr) {
force = true;
} else if (*t == LiteralStringRef("no_wait")) {
} else if (*t == "no_wait"_sr) {
waitForAllExcluded = false;
} else if (*t == LiteralStringRef("failed")) {
} else if (*t == "failed"_sr) {
markFailed = true;
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) &&
t->toString().find(':') != std::string::npos) {

View File

@ -78,7 +78,7 @@ ACTOR Future<bool> fileConfigureCommandActor(Reference<IDatabase> db,
name + "=" +
json_spirit::write_string(json_spirit::mValue(value.get_array()), json_spirit::Output_options::none);
} else {
printUsage(LiteralStringRef("fileconfigure"));
printUsage("fileconfigure"_sr);
return false;
}
}

View File

@ -92,8 +92,7 @@ ACTOR Future<Void> includeServers(Reference<IDatabase> db, std::vector<AddressEx
// This is why we now make two clears: first only of the ip
// address, the second will delete all ports.
if (s.isWholeMachine())
tr->clear(KeyRangeRef(addr.withSuffix(LiteralStringRef(":")),
addr.withSuffix(LiteralStringRef(";"))));
tr->clear(KeyRangeRef(addr.withSuffix(":"_sr), addr.withSuffix(";"_sr)));
}
}
wait(safeThreadFutureToFuture(tr->commit()));
@ -112,9 +111,9 @@ ACTOR Future<bool> include(Reference<IDatabase> db, std::vector<StringRef> token
state bool failed = false;
state bool all = false;
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
if (*t == LiteralStringRef("all")) {
if (*t == "all"_sr) {
all = true;
} else if (*t == LiteralStringRef("failed")) {
} else if (*t == "failed"_sr) {
failed = true;
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) && t->toString().find(':') != std::string::npos) {
// if the token starts with 'locality_' prefix.

View File

@ -59,7 +59,7 @@ ACTOR Future<bool> lockDatabase(Reference<IDatabase> db, UID id) {
namespace fdb_cli {
const KeyRef lockSpecialKey = LiteralStringRef("\xff\xff/management/db_locked");
const KeyRef lockSpecialKey = "\xff\xff/management/db_locked"_sr;
ACTOR Future<bool> lockCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 1) {

View File

@ -69,10 +69,10 @@ ACTOR Future<Void> printHealthyZone(Reference<IDatabase> db) {
namespace fdb_cli {
const KeyRangeRef maintenanceSpecialKeyRange = KeyRangeRef(LiteralStringRef("\xff\xff/management/maintenance/"),
LiteralStringRef("\xff\xff/management/maintenance0"));
const KeyRangeRef maintenanceSpecialKeyRange =
KeyRangeRef("\xff\xff/management/maintenance/"_sr, "\xff\xff/management/maintenance0"_sr);
// The special key, if present, means data distribution is disabled for storage failures;
const KeyRef ignoreSSFailureSpecialKey = LiteralStringRef("\xff\xff/management/maintenance/IgnoreSSFailures");
const KeyRef ignoreSSFailureSpecialKey = "\xff\xff/management/maintenance/IgnoreSSFailures"_sr;
// add a zone to maintenance and specify the maintenance duration
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning) {

View File

@ -115,17 +115,13 @@ ACTOR Future<bool> profileCommandActor(Database db,
return false;
}
// Hold the reference to the standalone's memory
state ThreadFuture<RangeResult> kvsFuture =
tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY);
state ThreadFuture<RangeResult> kvsFuture = tr->getRange(
KeyRangeRef("\xff\xff/worker_interfaces/"_sr, "\xff\xff/worker_interfaces0"_sr), CLIENT_KNOBS->TOO_MANY);
RangeResult kvs = wait(safeThreadFutureToFuture(kvsFuture));
ASSERT(!kvs.more);
for (const auto& pair : kvs) {
auto ip_port =
(pair.key.endsWith(LiteralStringRef(":tls")) ? pair.key.removeSuffix(LiteralStringRef(":tls"))
: pair.key)
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
auto ip_port = (pair.key.endsWith(":tls"_sr) ? pair.key.removeSuffix(":tls"_sr) : pair.key)
.removePrefix("\xff\xff/worker_interfaces/"_sr);
printf("%s\n", printable(ip_port).c_str());
}
} else {

View File

@ -105,12 +105,10 @@ ACTOR Future<bool> setProcessClass(Reference<IDatabase> db, KeyRef network_addre
namespace fdb_cli {
const KeyRangeRef processClassSourceSpecialKeyRange =
KeyRangeRef(LiteralStringRef("\xff\xff/configuration/process/class_source/"),
LiteralStringRef("\xff\xff/configuration/process/class_source0"));
KeyRangeRef("\xff\xff/configuration/process/class_source/"_sr, "\xff\xff/configuration/process/class_source0"_sr);
const KeyRangeRef processClassTypeSpecialKeyRange =
KeyRangeRef(LiteralStringRef("\xff\xff/configuration/process/class_type/"),
LiteralStringRef("\xff\xff/configuration/process/class_type0"));
KeyRangeRef("\xff\xff/configuration/process/class_type/"_sr, "\xff\xff/configuration/process/class_type0"_sr);
ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 3 && tokens.size() != 1) {

View File

@ -40,7 +40,7 @@ ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<Str
for (int i = 1; i < tokens.size(); i++) {
snap_cmd = snap_cmd.withSuffix(tokens[i]);
if (i != tokens.size() - 1) {
snap_cmd = snap_cmd.withSuffix(LiteralStringRef(" "));
snap_cmd = snap_cmd.withSuffix(" "_sr);
}
}
try {

View File

@ -1256,7 +1256,7 @@ ACTOR Future<bool> statusCommandActor(Reference<IDatabase> db,
StatusObject _s = wait(StatusClient::statusFetcher(localDb));
s = _s;
} else {
state ThreadFuture<Optional<Value>> statusValueF = tr->get(LiteralStringRef("\xff\xff/status/json"));
state ThreadFuture<Optional<Value>> statusValueF = tr->get("\xff\xff/status/json"_sr);
Optional<Value> statusValue = wait(safeThreadFutureToFuture(statusValueF));
if (!statusValue.present()) {
fprintf(stderr, "ERROR: Failed to get status json from the cluster\n");

View File

@ -163,11 +163,11 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
}
}
if (tokens.size() == 7) {
if (tokens[6] == LiteralStringRef("default")) {
if (tokens[6] == "default"_sr) {
priority = TransactionPriority::DEFAULT;
} else if (tokens[6] == LiteralStringRef("immediate")) {
} else if (tokens[6] == "immediate"_sr) {
priority = TransactionPriority::IMMEDIATE;
} else if (tokens[6] == LiteralStringRef("batch")) {
} else if (tokens[6] == "batch"_sr) {
priority = TransactionPriority::BATCH;
} else {
fprintf(stderr,

View File

@ -89,7 +89,7 @@ ACTOR Future<bool> tssQuarantine(Reference<IDatabase> db, bool enable, UID tssId
}
if (enable) {
tr->set(tssQuarantineKeyFor(tssId), LiteralStringRef(""));
tr->set(tssQuarantineKeyFor(tssId), ""_sr);
// remove server from TSS mapping when quarantine is enabled
tssMapDB.erase(tr, ssi.tssPairID.get());
} else {
@ -112,19 +112,19 @@ namespace fdb_cli {
ACTOR Future<bool> tssqCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() == 2) {
if (tokens[1] != LiteralStringRef("list")) {
if (tokens[1] != "list"_sr) {
printUsage(tokens[0]);
return false;
} else {
wait(tssQuarantineList(db));
}
} else if (tokens.size() == 3) {
if ((tokens[1] != LiteralStringRef("start") && tokens[1] != LiteralStringRef("stop")) ||
(tokens[2].size() != 32) || !std::all_of(tokens[2].begin(), tokens[2].end(), &isxdigit)) {
if ((tokens[1] != "start"_sr && tokens[1] != "stop"_sr) || (tokens[2].size() != 32) ||
!std::all_of(tokens[2].begin(), tokens[2].end(), &isxdigit)) {
printUsage(tokens[0]);
return false;
} else {
bool enable = tokens[1] == LiteralStringRef("start");
bool enable = tokens[1] == "start"_sr;
UID tssId = UID::fromString(tokens[2].toString());
bool success = wait(tssQuarantine(db, enable, tssId));
return success;

Some files were not shown because too many files have changed in this diff Show More