Merge branch 'main' of https://github.com/apple/foundationdb into thread-priority

This commit is contained in:
He Liu 2022-09-30 08:57:53 -07:00
commit 5f975623fb
412 changed files with 10837 additions and 5796 deletions

1
.gitignore vendored
View File

@ -64,6 +64,7 @@ packaging/msi/obj
simfdb
tests/oldBinaries
trace.*.xml
trace.*.json
.venv
# Editor files

View File

@ -34,6 +34,7 @@
#include <algorithm>
#include <exception>
#include <map>
#include <stdexcept>
#include <string>
#include <vector>

View File

@ -28,6 +28,7 @@
#include <algorithm>
#include <exception>
#include <cstring>
#include <stdexcept>
static int hexValue(char c) {
static char const digits[] = "0123456789ABCDEF";

View File

@ -293,6 +293,22 @@ if(NOT WIN32)
@LOG_DIR@
)
add_fdbclient_test(
NAME fdb_c_api_tests_local_only
DISABLE_LOG_DUMP
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/local_tests
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
)
add_fdbclient_test(
NAME fdb_c_api_tests_blob_granule
DISABLE_LOG_DUMP

View File

@ -548,10 +548,14 @@ extern "C" DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_verify_blob_rang
uint8_t const* end_key_name,
int end_key_name_length,
int64_t version) {
Optional<Version> rv;
if (version != latestVersion) {
rv = version;
}
return (FDBFuture*)(DB(db)
->verifyBlobRange(KeyRangeRef(StringRef(begin_key_name, begin_key_name_length),
StringRef(end_key_name, end_key_name_length)),
version)
rv)
.extractPtr());
}

View File

@ -84,12 +84,12 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_network_set_option(FDBNetworkOption
int value_length);
#if FDB_API_VERSION >= 14
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network(void);
#endif
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network(void);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network(void);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*),
void* hook_parameter);
@ -548,8 +548,8 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_summarize_blob_granules(
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_select_api_version_impl(int runtime_version, int header_version);
DLLEXPORT int fdb_get_max_api_version();
DLLEXPORT const char* fdb_get_client_version();
DLLEXPORT int fdb_get_max_api_version(void);
DLLEXPORT const char* fdb_get_client_version(void);
/* LEGACY API VERSIONS */

View File

@ -36,7 +36,17 @@ public:
private:
// FIXME: use other new blob granule apis!
enum OpType { OP_INSERT, OP_CLEAR, OP_CLEAR_RANGE, OP_READ, OP_GET_RANGES, OP_SUMMARIZE, OP_LAST = OP_SUMMARIZE };
enum OpType {
OP_INSERT,
OP_CLEAR,
OP_CLEAR_RANGE,
OP_READ,
OP_GET_GRANULES,
OP_SUMMARIZE,
OP_GET_BLOB_RANGES,
OP_VERIFY,
OP_LAST = OP_VERIFY
};
std::vector<OpType> excludedOpTypes;
// Allow reads at the start to get blob_granule_transaction_too_old if BG data isn't initialized yet
@ -120,7 +130,7 @@ private:
getTenant(tenantId));
}
void randomGetRangesOp(TTaskFct cont, std::optional<int> tenantId) {
void randomGetGranulesOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -140,42 +150,19 @@ private:
true);
},
[this, begin, end, results, cont]() {
if (seenReadSuccess) {
ASSERT(results->size() > 0);
ASSERT(results->front().beginKey <= begin);
ASSERT(results->back().endKey >= end);
}
for (int i = 0; i < results->size(); i++) {
// no empty or inverted ranges
if ((*results)[i].beginKey >= (*results)[i].endKey) {
error(fmt::format("Empty/inverted range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey < (*results)[i].endKey);
}
for (int i = 1; i < results->size(); i++) {
// ranges contain entire requested key range
if ((*results)[i].beginKey != (*results)[i].endKey) {
error(fmt::format("Non-contiguous range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey == (*results)[i - 1].endKey);
}
this->validateRanges(results, begin, end, seenReadSuccess);
schedule(cont);
},
getTenant(tenantId));
}
void randomSummarizeOp(TTaskFct cont, std::optional<int> tenantId) {
if (!seenReadSuccess) {
// tester can't handle this throwing bg_txn_too_old, so just don't call it unless we have already seen a
// read success
schedule(cont);
return;
}
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -194,11 +181,9 @@ private:
true);
},
[this, begin, end, results, cont]() {
if (seenReadSuccess) {
ASSERT(results->size() > 0);
ASSERT(results->front().keyRange.beginKey <= begin);
ASSERT(results->back().keyRange.endKey >= end);
}
ASSERT(results->size() > 0);
ASSERT(results->front().keyRange.beginKey <= begin);
ASSERT(results->back().keyRange.endKey >= end);
for (int i = 0; i < results->size(); i++) {
// TODO: could do validation of subsequent calls and ensure snapshot version never decreases
@ -218,6 +203,95 @@ private:
getTenant(tenantId));
}
void validateRanges(std::shared_ptr<std::vector<fdb::KeyRange>> results,
fdb::Key begin,
fdb::Key end,
bool shouldBeRanges) {
if (shouldBeRanges) {
ASSERT(results->size() > 0);
ASSERT(results->front().beginKey <= begin);
ASSERT(results->back().endKey >= end);
}
for (int i = 0; i < results->size(); i++) {
// no empty or inverted ranges
if ((*results)[i].beginKey >= (*results)[i].endKey) {
error(fmt::format("Empty/inverted range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey < (*results)[i].endKey);
}
for (int i = 1; i < results->size(); i++) {
// ranges contain entire requested key range
if ((*results)[i].beginKey != (*results)[i].endKey) {
error(fmt::format("Non-contiguous range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey == (*results)[i - 1].endKey);
}
}
void randomGetBlobRangesOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
auto results = std::make_shared<std::vector<fdb::KeyRange>>();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end, results](auto ctx) {
fdb::Future f = ctx->db().listBlobbifiedRanges(begin, end, 1000).eraseType();
ctx->continueAfter(f, [ctx, f, results]() {
*results = copyKeyRangeArray(f.get<fdb::future_var::KeyRangeRefArray>());
ctx->done();
});
},
[this, begin, end, results, cont]() {
this->validateRanges(results, begin, end, seenReadSuccess);
schedule(cont);
},
/* failOnError = */ false);
}
void randomVerifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
auto verifyVersion = std::make_shared<int64_t>(false);
// info("Verify op starting");
execOperation(
[begin, end, verifyVersion](auto ctx) {
fdb::Future f = ctx->db().verifyBlobRange(begin, end, -2 /* latest version*/).eraseType();
ctx->continueAfter(f, [ctx, verifyVersion, f]() {
*verifyVersion = f.get<fdb::future_var::Int64>();
ctx->done();
});
},
[this, begin, end, verifyVersion, cont]() {
if (*verifyVersion == -1) {
ASSERT(!seenReadSuccess);
} else {
if (!seenReadSuccess) {
info("BlobGranuleCorrectness::randomVerifyOp first success");
}
seenReadSuccess = true;
}
// info(fmt::format("verify op done @ {}", *verifyVersion));
schedule(cont);
},
/* failOnError = */ false);
}
void randomOperation(TTaskFct cont) {
std::optional<int> tenantId = randomTenant();
@ -239,12 +313,18 @@ private:
case OP_READ:
randomReadOp(cont, tenantId);
break;
case OP_GET_RANGES:
randomGetRangesOp(cont, tenantId);
case OP_GET_GRANULES:
randomGetGranulesOp(cont, tenantId);
break;
case OP_SUMMARIZE:
randomSummarizeOp(cont, tenantId);
break;
case OP_GET_BLOB_RANGES:
randomGetBlobRangesOp(cont);
break;
case OP_VERIFY:
randomVerifyOp(cont);
break;
}
}
};

View File

@ -34,10 +34,21 @@ private:
OP_READ_NO_MATERIALIZE,
OP_READ_FILE_LOAD_ERROR,
OP_READ_TOO_OLD,
OP_CANCEL_RANGES,
OP_LAST = OP_CANCEL_RANGES
OP_PURGE_UNALIGNED,
OP_BLOBBIFY_UNALIGNED,
OP_UNBLOBBIFY_UNALIGNED,
OP_CANCEL_GET_GRANULES,
OP_CANCEL_GET_RANGES,
OP_CANCEL_VERIFY,
OP_CANCEL_SUMMARIZE,
OP_CANCEL_BLOBBIFY,
OP_CANCEL_UNBLOBBIFY,
OP_CANCEL_PURGE,
OP_LAST = OP_CANCEL_PURGE
};
// could add summarize too old and verify too old as ops if desired but those are lower value
// Allow reads at the start to get blob_granule_transaction_too_old if BG data isn't initialized yet
// FIXME: should still guarantee a read succeeds eventually somehow
bool seenReadSuccess = false;
@ -74,9 +85,6 @@ private:
error(fmt::format("Operation succeeded in error test!"));
}
ASSERT(err.code() != error_code_success);
if (err.code() != error_code_blob_granule_transaction_too_old) {
seenReadSuccess = true;
}
if (err.code() != expectedError) {
info(fmt::format("incorrect error. Expected {}, Got {}", expectedError, err.code()));
if (err.code() == error_code_blob_granule_transaction_too_old) {
@ -86,6 +94,9 @@ private:
ctx->onError(err);
}
} else {
if (err.code() != error_code_blob_granule_transaction_too_old) {
seenReadSuccess = true;
}
ctx->done();
}
},
@ -107,7 +118,55 @@ private:
doErrorOp(cont, "", true, 1, error_code_blob_granule_transaction_too_old);
}
void randomCancelGetRangesOp(TTaskFct cont) {
void randomPurgeUnalignedOp(TTaskFct cont) {
// blobbify/unblobbify need to be aligned to blob range boundaries, so this should always fail
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[this, begin, end](auto ctx) {
fdb::Future f = ctx->db().purgeBlobGranules(begin, end, -2, false).eraseType();
ctx->continueAfter(
f,
[this, ctx, f]() {
info(fmt::format("unaligned purge got {}", f.error().code()));
ASSERT(f.error().code() == error_code_unsupported_operation);
ctx->done();
},
true);
},
[this, cont]() { schedule(cont); });
}
void randomBlobbifyUnalignedOp(bool blobbify, TTaskFct cont) {
// blobbify/unblobbify need to be aligned to blob range boundaries, so this should always return false
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
auto success = std::make_shared<bool>(false);
execOperation(
[begin, end, blobbify, success](auto ctx) {
fdb::Future f = blobbify ? ctx->db().blobbifyRange(begin, end).eraseType()
: ctx->db().unblobbifyRange(begin, end).eraseType();
ctx->continueAfter(
f,
[ctx, f, success]() {
*success = f.get<fdb::future_var::Bool>();
ctx->done();
},
true);
},
[this, cont, success]() {
ASSERT(!(*success));
schedule(cont);
});
}
void randomCancelGetGranulesOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -121,6 +180,90 @@ private:
[this, cont]() { schedule(cont); });
}
void randomCancelGetRangesOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().listBlobbifiedRanges(begin, end, 1000).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelVerifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().verifyBlobRange(begin, end, -2 /* latest version*/).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelSummarizeOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execTransaction(
[begin, end](auto ctx) {
fdb::Future f = ctx->tx().summarizeBlobGranules(begin, end, -2, 1000).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelBlobbifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().blobbifyRange(begin, end).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelUnblobbifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().unblobbifyRange(begin, end).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelPurgeOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().purgeBlobGranules(begin, end, -2, false).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomOperation(TTaskFct cont) override {
OpType txType = (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
@ -133,9 +276,37 @@ private:
case OP_READ_TOO_OLD:
randomOpReadTooOld(cont);
break;
case OP_CANCEL_RANGES:
case OP_PURGE_UNALIGNED:
// gets the correct error but it doesn't propagate properly in the test
// randomPurgeUnalignedOp(cont);
break;
case OP_BLOBBIFY_UNALIGNED:
randomBlobbifyUnalignedOp(true, cont);
break;
case OP_UNBLOBBIFY_UNALIGNED:
randomBlobbifyUnalignedOp(false, cont);
break;
case OP_CANCEL_GET_GRANULES:
randomCancelGetGranulesOp(cont);
break;
case OP_CANCEL_GET_RANGES:
randomCancelGetRangesOp(cont);
break;
case OP_CANCEL_VERIFY:
randomCancelVerifyOp(cont);
break;
case OP_CANCEL_SUMMARIZE:
randomCancelSummarizeOp(cont);
break;
case OP_CANCEL_BLOBBIFY:
randomCancelBlobbifyOp(cont);
break;
case OP_CANCEL_UNBLOBBIFY:
randomCancelUnblobbifyOp(cont);
break;
case OP_CANCEL_PURGE:
randomCancelPurgeOp(cont);
break;
}
}
};

View File

@ -204,23 +204,23 @@ private:
void getRangeLoop(std::shared_ptr<ITransactionContext> ctx,
fdb::KeySelector begin,
fdb::KeySelector end,
fdb::Key endKey,
std::shared_ptr<std::vector<fdb::KeyValue>> results) {
auto f = ctx->tx().getRange(begin,
end,
fdb::key_select::firstGreaterOrEqual(endKey),
0 /*limit*/,
0 /*target_bytes*/,
FDB_STREAMING_MODE_WANT_ALL,
0 /*iteration*/,
false /*snapshot*/,
false /*reverse*/);
ctx->continueAfter(f, [this, ctx, f, end, results]() {
ctx->continueAfter(f, [this, ctx, f, endKey, results]() {
auto out = copyKeyValueArray(f.get());
results->insert(results->end(), out.first.begin(), out.first.end());
const bool more = out.second;
if (more) {
// Fetch the remaining results.
getRangeLoop(ctx, fdb::key_select::firstGreaterThan(results->back().key), end, results);
getRangeLoop(ctx, fdb::key_select::firstGreaterThan(results->back().key), endKey, results);
} else {
ctx->done();
}
@ -237,10 +237,7 @@ private:
// Clear the results vector, in case the transaction is retried.
results->clear();
getRangeLoop(ctx,
fdb::key_select::firstGreaterOrEqual(begin),
fdb::key_select::firstGreaterOrEqual(end),
results);
getRangeLoop(ctx, fdb::key_select::firstGreaterOrEqual(begin), end, results);
},
[this, begin, end, results, cont, tenantId]() {
auto expected = stores[tenantId].getRange(begin, end, results->size() + 10, false);

View File

@ -101,6 +101,10 @@ std::unordered_map<std::string, std::function<void(const std::string& value, Tes
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "maxClients", spec->maxClients, 1, 1000);
} },
{ "disableClientBypass",
[](const std::string& value, TestSpec* spec) { //
spec->disableClientBypass = (value == "true");
} },
{ "minTenants",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "minTenants", spec->minTenants, 1, 1000);

View File

@ -78,6 +78,9 @@ struct TestSpec {
int minClients = 1;
int maxClients = 10;
// Disable the ability to bypass the MVC API, for
// cases when there are no external clients
bool disableClientBypass = false;
// Number of tenants (a random number in the [min,max] range)
int minTenants = 0;
int maxTenants = 0;

View File

@ -40,11 +40,6 @@ namespace FdbApiTester {
constexpr int LONG_WAIT_TIME_US = 2000000;
constexpr int LARGE_NUMBER_OF_RETRIES = 10;
void TransactionActorBase::complete(fdb::Error err) {
error = err;
context = {};
}
void ITransactionContext::continueAfterAll(std::vector<fdb::Future> futures, TTaskFct cont) {
auto counter = std::make_shared<std::atomic<int>>(futures.size());
auto errorCode = std::make_shared<std::atomic<fdb::Error>>(fdb::Error::success());
@ -76,28 +71,31 @@ void ITransactionContext::continueAfterAll(std::vector<fdb::Future> futures, TTa
class TransactionContextBase : public ITransactionContext {
public:
TransactionContextBase(ITransactionExecutor* executor,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName)
: executor(executor), txActor(txActor), contAfterDone(cont), scheduler(scheduler), retryLimit(retryLimit),
txState(TxState::IN_PROGRESS), commitCalled(false), bgBasePath(bgBasePath), tenantName(tenantName) {
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: executor(executor), startFct(startFct), contAfterDone(cont), scheduler(scheduler), retryLimit(retryLimit),
txState(TxState::IN_PROGRESS), commitCalled(false), bgBasePath(bgBasePath), tenantName(tenantName),
transactional(transactional) {
databaseCreateErrorInjected = executor->getOptions().injectDatabaseCreateErrors &&
Random::get().randomBool(executor->getOptions().databaseCreateErrorRatio);
fdb::Database db;
if (databaseCreateErrorInjected) {
db = fdb::Database(executor->getClusterFileForErrorInjection());
fdbDb = fdb::Database(executor->getClusterFileForErrorInjection());
} else {
db = executor->selectDatabase();
fdbDb = executor->selectDatabase();
}
if (tenantName) {
fdb::Tenant tenant = db.openTenant(*tenantName);
fdbTx = tenant.createTransaction();
} else {
fdbTx = db.createTransaction();
if (transactional) {
if (tenantName) {
fdb::Tenant tenant = fdbDb.openTenant(*tenantName);
fdbTx = tenant.createTransaction();
} else {
fdbTx = fdbDb.createTransaction();
}
}
}
@ -107,6 +105,8 @@ public:
// IN_PROGRESS -> (ON_ERROR -> IN_PROGRESS)* [-> ON_ERROR] -> DONE
enum class TxState { IN_PROGRESS, ON_ERROR, DONE };
fdb::Database db() override { return fdbDb.atomic_load(); }
fdb::Transaction tx() override { return fdbTx.atomic_load(); }
// Set a continuation to be executed when a future gets ready
@ -116,6 +116,7 @@ public:
// Complete the transaction with a commit
void commit() override {
ASSERT(transactional);
std::unique_lock<std::mutex> lock(mutex);
if (txState != TxState::IN_PROGRESS) {
return;
@ -146,14 +147,14 @@ public:
fmt::join(retriedErrorCodes(), ", "));
}
// cancel transaction so that any pending operations on it
// fail gracefully
fdbTx.cancel();
txActor->complete(fdb::Error::success());
cleanUp();
if (transactional) {
// cancel transaction so that any pending operations on it
// fail gracefully
fdbTx.cancel();
cleanUp();
}
ASSERT(txState == TxState::DONE);
contAfterDone();
contAfterDone(fdb::Error::success());
}
std::string getBGBasePath() override { return bgBasePath; }
@ -179,20 +180,26 @@ public:
if (databaseCreateErrorInjected && canBeInjectedDatabaseCreateError(err.code())) {
// Failed to create a database because of failure injection
// Restart by recreating the transaction in a valid database
scheduler->schedule([this]() {
fdb::Database db = executor->selectDatabase();
if (tenantName) {
fdb::Tenant tenant = db.openTenant(*tenantName);
fdbTx.atomic_store(tenant.createTransaction());
} else {
fdbTx.atomic_store(db.createTransaction());
auto thisRef = std::static_pointer_cast<TransactionContextBase>(shared_from_this());
scheduler->schedule([thisRef]() {
fdb::Database db = thisRef->executor->selectDatabase();
thisRef->fdbDb.atomic_store(db);
if (thisRef->transactional) {
if (thisRef->tenantName) {
fdb::Tenant tenant = db.openTenant(*thisRef->tenantName);
thisRef->fdbTx.atomic_store(tenant.createTransaction());
} else {
thisRef->fdbTx.atomic_store(db.createTransaction());
}
}
restartTransaction();
thisRef->restartTransaction();
});
} else {
} else if (transactional) {
onErrorArg = err;
onErrorFuture = tx().onError(err);
handleOnErrorFuture();
} else {
transactionFailed(err);
}
}
@ -207,7 +214,6 @@ protected:
void cleanUp() {
ASSERT(txState == TxState::DONE);
ASSERT(!onErrorFuture);
txActor = {};
cancelPendingFutures();
}
@ -230,9 +236,8 @@ protected:
// No need for lock from here on, because only one thread
// can enter DONE state and handle it
txActor->complete(err);
cleanUp();
contAfterDone();
contAfterDone(err);
}
// Handle result of an a transaction onError call
@ -254,7 +259,7 @@ protected:
txState = TxState::IN_PROGRESS;
commitCalled = false;
lock.unlock();
txActor->start();
startFct(shared_from_this());
}
// Checks if a transaction can be retried. Fails the transaction if the check fails
@ -286,13 +291,17 @@ protected:
// Set in contructor, stays immutable
ITransactionExecutor* const executor;
// FDB database
// Provides a thread safe interface by itself (no need for mutex)
fdb::Database fdbDb;
// FDB transaction
// Provides a thread safe interface by itself (no need for mutex)
fdb::Transaction fdbTx;
// Actor implementing the transaction worklflow
// The function implementing the starting point of the transaction
// Set in constructor and reset on cleanup (no need for mutex)
std::shared_ptr<ITransactionActor> txActor;
TOpStartFct startFct;
// Mutex protecting access to shared mutable state
// Only the state that is accessible unter IN_PROGRESS state
@ -301,7 +310,7 @@ protected:
// Continuation to be called after completion of the transaction
// Set in contructor, stays immutable
const TTaskFct contAfterDone;
const TOpContFct contAfterDone;
// Reference to the scheduler
// Set in contructor, stays immutable
@ -346,6 +355,9 @@ protected:
// The tenant that we will run this transaction in
const std::optional<fdb::BytesRef> tenantName;
// Specifies whether the operation is transactional
const bool transactional;
};
/**
@ -354,13 +366,15 @@ protected:
class BlockingTransactionContext : public TransactionContextBase {
public:
BlockingTransactionContext(ITransactionExecutor* executor,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName)
: TransactionContextBase(executor, txActor, cont, scheduler, retryLimit, bgBasePath, tenantName) {}
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: TransactionContextBase(executor, startFct, cont, scheduler, retryLimit, bgBasePath, tenantName, transactional) {
}
protected:
void doContinueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -430,13 +444,15 @@ protected:
class AsyncTransactionContext : public TransactionContextBase {
public:
AsyncTransactionContext(ITransactionExecutor* executor,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName)
: TransactionContextBase(executor, txActor, cont, scheduler, retryLimit, bgBasePath, tenantName) {}
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: TransactionContextBase(executor, startFct, cont, scheduler, retryLimit, bgBasePath, tenantName, transactional) {
}
protected:
void doContinueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -648,23 +664,22 @@ public:
const TransactionExecutorOptions& getOptions() override { return options; }
void execute(std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
std::optional<fdb::BytesRef> tenantName = {}) override {
void execute(TOpStartFct startFct,
TOpContFct cont,
std::optional<fdb::BytesRef> tenantName,
bool transactional) override {
try {
std::shared_ptr<ITransactionContext> ctx;
if (options.blockOnFutures) {
ctx = std::make_shared<BlockingTransactionContext>(
this, txActor, cont, scheduler, options.transactionRetryLimit, bgBasePath, tenantName);
this, startFct, cont, scheduler, options.transactionRetryLimit, bgBasePath, tenantName, true);
} else {
ctx = std::make_shared<AsyncTransactionContext>(
this, txActor, cont, scheduler, options.transactionRetryLimit, bgBasePath, tenantName);
this, startFct, cont, scheduler, options.transactionRetryLimit, bgBasePath, tenantName, true);
}
txActor->init(ctx);
txActor->start();
startFct(ctx);
} catch (...) {
txActor->complete(fdb::Error(error_code_operation_failed));
cont();
cont(fdb::Error(error_code_operation_failed));
}
}

View File

@ -38,6 +38,9 @@ class ITransactionContext : public std::enable_shared_from_this<ITransactionCont
public:
virtual ~ITransactionContext() {}
// Current FDB database
virtual fdb::Database db() = 0;
// Current FDB transaction
virtual fdb::Transaction tx() = 0;
@ -62,57 +65,11 @@ public:
virtual void continueAfterAll(std::vector<fdb::Future> futures, TTaskFct cont);
};
/**
* Interface of an actor object implementing a concrete transaction
*/
class ITransactionActor {
public:
virtual ~ITransactionActor() {}
// Type of the lambda functions implementing a database operation
using TOpStartFct = std::function<void(std::shared_ptr<ITransactionContext>)>;
// Initialize with the given transaction context
virtual void init(std::shared_ptr<ITransactionContext> ctx) = 0;
// Start execution of the transaction, also called on retries
virtual void start() = 0;
// Transaction completion result (error_code_success in case of success)
virtual fdb::Error getError() = 0;
// Notification about the completion of the transaction
virtual void complete(fdb::Error err) = 0;
};
/**
* A helper base class for transaction actors
*/
class TransactionActorBase : public ITransactionActor {
public:
void init(std::shared_ptr<ITransactionContext> ctx) override { context = ctx; }
fdb::Error getError() override { return error; }
void complete(fdb::Error err) override;
protected:
std::shared_ptr<ITransactionContext> ctx() { return context; }
private:
std::shared_ptr<ITransactionContext> context;
fdb::Error error = fdb::Error::success();
};
// Type of the lambda functions implementing a transaction
using TTxStartFct = std::function<void(std::shared_ptr<ITransactionContext>)>;
/**
* A wrapper class for transactions implemented by lambda functions
*/
class TransactionFct : public TransactionActorBase {
public:
TransactionFct(TTxStartFct startFct) : startFct(startFct) {}
void start() override { startFct(this->ctx()); }
private:
TTxStartFct startFct;
};
// Type of the lambda functions implementing a database operation
using TOpContFct = std::function<void(fdb::Error)>;
/**
* Configuration of transaction execution mode
@ -156,9 +113,10 @@ class ITransactionExecutor {
public:
virtual ~ITransactionExecutor() {}
virtual void init(IScheduler* sched, const char* clusterFile, const std::string& bgBasePath) = 0;
virtual void execute(std::shared_ptr<ITransactionActor> tx,
TTaskFct cont,
std::optional<fdb::BytesRef> tenantName = {}) = 0;
virtual void execute(TOpStartFct start,
TOpContFct cont,
std::optional<fdb::BytesRef> tenantName,
bool transactional) = 0;
virtual fdb::Database selectDatabase() = 0;
virtual std::string getClusterFileForErrorInjection() = 0;
virtual const TransactionExecutorOptions& getOptions() = 0;

View File

@ -106,10 +106,23 @@ void WorkloadBase::schedule(TTaskFct task) {
});
}
void WorkloadBase::execTransaction(std::shared_ptr<ITransactionActor> tx,
void WorkloadBase::execTransaction(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError) {
doExecute(startFct, cont, tenant, failOnError, true);
}
// Execute a non-transactional database operation within the workload
void WorkloadBase::execOperation(TOpStartFct startFct, TTaskFct cont, bool failOnError) {
doExecute(startFct, cont, {}, failOnError, false);
}
void WorkloadBase::doExecute(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError,
bool transactional) {
ASSERT(inProgress);
if (failed) {
return;
@ -117,10 +130,9 @@ void WorkloadBase::execTransaction(std::shared_ptr<ITransactionActor> tx,
tasksScheduled++;
numTxStarted++;
manager->txExecutor->execute(
tx,
[this, tx, cont, failOnError]() {
startFct,
[this, startFct, cont, failOnError](fdb::Error err) {
numTxCompleted++;
fdb::Error err = tx->getError();
if (err.code() == error_code_success) {
cont();
} else {
@ -135,7 +147,8 @@ void WorkloadBase::execTransaction(std::shared_ptr<ITransactionActor> tx,
}
scheduledTaskDone();
},
tenant);
tenant,
transactional);
}
void WorkloadBase::info(const std::string& msg) {

View File

@ -119,18 +119,13 @@ protected:
void schedule(TTaskFct task);
// Execute a transaction within the workload
void execTransaction(std::shared_ptr<ITransactionActor> tx,
void execTransaction(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant = std::optional<fdb::BytesRef>(),
bool failOnError = true);
// Execute a transaction within the workload, a convenience method for a tranasaction defined by a lambda function
void execTransaction(TTxStartFct start,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant = std::optional<fdb::BytesRef>(),
bool failOnError = true) {
execTransaction(std::make_shared<TransactionFct>(start), cont, tenant, failOnError);
}
// Execute a non-transactional database operation within the workload
void execOperation(TOpStartFct startFct, TTaskFct cont, bool failOnError = true);
// Log an error message, increase error counter
void error(const std::string& msg);
@ -144,6 +139,12 @@ protected:
private:
WorkloadManager* manager;
void doExecute(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError,
bool transactional);
// Decrease scheduled task counter, notify the workload manager
// that the task is done if no more tasks schedule
void scheduledTaskDone();

View File

@ -322,6 +322,10 @@ void applyNetworkOptions(TesterOptions& options) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_CLIENT_BUGGIFY_ENABLE);
}
if (options.testSpec.disableClientBypass && options.apiVersion >= 720) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_DISABLE_CLIENT_BYPASS);
}
if (options.trace) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE, options.traceDir);
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_FORMAT, options.traceFormat);

View File

@ -0,0 +1,29 @@
[[test]]
title = 'API Correctness Single Threaded'
minClients = 1
maxClients = 3
minDatabases = 1
maxDatabases = 3
multiThreaded = false
disableClientBypass = true
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
[[test.workload]]
name = 'AtomicOpsCorrectness'
initialSize = 0
numRandomOperations = 100
[[test.workload]]
name = 'WatchAndWait'
initialSize = 0
numRandomOperations = 10

View File

@ -155,6 +155,13 @@ struct None {
struct Type {};
static Error extract(native::FDBFuture*, Type&) noexcept { return Error(0); }
};
struct Bool {
using Type = native::fdb_bool_t;
static Error extract(native::FDBFuture* f, Type& out) noexcept {
auto err = native::fdb_future_get_bool(f, &out);
return Error(err);
}
};
struct Int64 {
using Type = int64_t;
static Error extract(native::FDBFuture* f, Type& out) noexcept {
@ -349,6 +356,7 @@ public:
class Future {
protected:
friend class Transaction;
friend class Database;
friend std::hash<Future>;
std::shared_ptr<native::FDBFuture> f;
@ -718,6 +726,14 @@ public:
}
Database() noexcept : db(nullptr) {}
void atomic_store(Database other) { std::atomic_store(&db, other.db); }
Database atomic_load() {
Database retVal;
retVal.db = std::atomic_load(&db);
return retVal;
}
Error setOptionNothrow(FDBDatabaseOption option, int64_t value) noexcept {
return Error(native::fdb_database_set_option(
db.get(), option, reinterpret_cast<const uint8_t*>(&value), static_cast<int>(sizeof(value))));
@ -763,6 +779,46 @@ public:
throwError("Failed to create transaction: ", err);
return Transaction(tx_native);
}
TypedFuture<future_var::KeyRangeRefArray> listBlobbifiedRanges(KeyRef begin, KeyRef end, int rangeLimit) {
if (!db)
throw std::runtime_error("listBlobbifiedRanges from null database");
return native::fdb_database_list_blobbified_ranges(
db.get(), begin.data(), intSize(begin), end.data(), intSize(end), rangeLimit);
}
TypedFuture<future_var::Int64> verifyBlobRange(KeyRef begin, KeyRef end, int64_t version) {
if (!db)
throw std::runtime_error("verifyBlobRange from null database");
return native::fdb_database_verify_blob_range(
db.get(), begin.data(), intSize(begin), end.data(), intSize(end), version);
}
TypedFuture<future_var::Bool> blobbifyRange(KeyRef begin, KeyRef end) {
if (!db)
throw std::runtime_error("blobbifyRange from null database");
return native::fdb_database_blobbify_range(db.get(), begin.data(), intSize(begin), end.data(), intSize(end));
}
TypedFuture<future_var::Bool> unblobbifyRange(KeyRef begin, KeyRef end) {
if (!db)
throw std::runtime_error("unblobbifyRange from null database");
return native::fdb_database_unblobbify_range(db.get(), begin.data(), intSize(begin), end.data(), intSize(end));
}
TypedFuture<future_var::KeyRef> purgeBlobGranules(KeyRef begin, KeyRef end, int64_t version, bool force) {
if (!db)
throw std::runtime_error("purgeBlobGranules from null database");
native::fdb_bool_t forceBool = force;
return native::fdb_database_purge_blob_granules(
db.get(), begin.data(), intSize(begin), end.data(), intSize(end), version, forceBool);
}
TypedFuture<future_var::None> waitPurgeGranulesComplete(KeyRef purgeKey) {
if (!db)
throw std::runtime_error("purgeBlobGranules from null database");
return native::fdb_database_wait_purge_granules_complete(db.get(), purgeKey.data(), intSize(purgeKey));
}
};
inline Error selectApiVersionNothrow(int version) {

View File

@ -23,17 +23,17 @@
namespace FDB {
const uint8_t DirectoryLayer::LITTLE_ENDIAN_LONG_ONE[8] = { 1, 0, 0, 0, 0, 0, 0, 0 };
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = LiteralStringRef("hca");
const StringRef DirectoryLayer::LAYER_KEY = LiteralStringRef("layer");
const StringRef DirectoryLayer::VERSION_KEY = LiteralStringRef("version");
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = "hca"_sr;
const StringRef DirectoryLayer::LAYER_KEY = "layer"_sr;
const StringRef DirectoryLayer::VERSION_KEY = "version"_sr;
const int64_t DirectoryLayer::SUB_DIR_KEY = 0;
const uint32_t DirectoryLayer::VERSION[3] = { 1, 0, 0 };
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = LiteralStringRef("\xfe");
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = "\xfe"_sr;
const Subspace DirectoryLayer::DEFAULT_NODE_SUBSPACE = Subspace(DEFAULT_NODE_SUBSPACE_PREFIX);
const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
const StringRef DirectoryLayer::PARTITION_LAYER = "partition"_sr;
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes)
: rootNode(nodeSubspace.get(nodeSubspace.key())), nodeSubspace(nodeSubspace), contentSubspace(contentSubspace),

View File

@ -31,7 +31,7 @@ typedef Standalone<KeyRef> Key;
typedef Standalone<ValueRef> Value;
inline Key keyAfter(const KeyRef& key) {
if (key == LiteralStringRef("\xff\xff"))
if (key == "\xff\xff"_sr)
return key;
Standalone<StringRef> r;
@ -43,7 +43,7 @@ inline Key keyAfter(const KeyRef& key) {
}
inline KeyRef keyAfter(const KeyRef& key, Arena& arena) {
if (key == LiteralStringRef("\xff\xff"))
if (key == "\xff\xff"_sr)
return key;
uint8_t* t = new (arena) uint8_t[key.size() + 1];
memcpy(t, key.begin(), key.size());

View File

@ -63,15 +63,14 @@ ACTOR Future<Void> _test() {
// wait( waitForAllReady( versions ) );
printf("Elapsed: %lf\n", timer_monotonic() - starttime);
tr->set(LiteralStringRef("foo"), LiteralStringRef("bar"));
tr->set("foo"_sr, "bar"_sr);
Optional<FDBStandalone<ValueRef>> v = wait(tr->get(LiteralStringRef("foo")));
Optional<FDBStandalone<ValueRef>> v = wait(tr->get("foo"_sr));
if (v.present()) {
printf("%s\n", v.get().toString().c_str());
}
FDBStandalone<RangeResultRef> r =
wait(tr->getRange(KeyRangeRef(LiteralStringRef("a"), LiteralStringRef("z")), 100));
FDBStandalone<RangeResultRef> r = wait(tr->getRange(KeyRangeRef("a"_sr, "z"_sr), 100));
for (auto kv : r) {
printf("%s is %s\n", kv.key.toString().c_str(), kv.value.toString().c_str());

View File

@ -545,11 +545,10 @@ struct DirectoryLogDirectoryFunc : InstructionFunc {
pathTuple.append(p, true);
}
instruction->tr->set(logSubspace.pack(LiteralStringRef("path"), true), pathTuple.pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("layer"), true),
Tuple().append(directory->getLayer()).pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("exists"), true), Tuple().append(exists ? 1 : 0).pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("children"), true), childrenTuple.pack());
instruction->tr->set(logSubspace.pack("path"_sr, true), pathTuple.pack());
instruction->tr->set(logSubspace.pack("layer"_sr, true), Tuple().append(directory->getLayer()).pack());
instruction->tr->set(logSubspace.pack("exists"_sr, true), Tuple().append(exists ? 1 : 0).pack());
instruction->tr->set(logSubspace.pack("children"_sr, true), childrenTuple.pack());
return Void();
}

View File

@ -470,12 +470,12 @@ ACTOR Future<Standalone<StringRef>> waitForVoid(Future<Void> f) {
try {
wait(f);
Tuple t;
t.append(LiteralStringRef("RESULT_NOT_PRESENT"));
t.append("RESULT_NOT_PRESENT"_sr);
return t.pack();
} catch (Error& e) {
// printf("FDBError1:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -493,7 +493,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<FDBStandalone<KeyRef>> f
} catch (Error& e) {
// printf("FDBError2:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -509,7 +509,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<Optional<FDBStandalone<V
if (value.present())
str = value.get();
else
str = LiteralStringRef("RESULT_NOT_PRESENT");
str = "RESULT_NOT_PRESENT"_sr;
Tuple t;
t.append(str);
@ -517,7 +517,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<Optional<FDBStandalone<V
} catch (Error& e) {
// printf("FDBError3:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -543,7 +543,7 @@ ACTOR Future<Standalone<StringRef>> getKey(Future<FDBStandalone<KeyRef>> f, Stan
} catch (Error& e) {
// printf("FDBError4:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -670,7 +670,7 @@ struct GetEstimatedRangeSize : InstructionFunc {
state Standalone<StringRef> endKey = Tuple::unpack(s2).getString(0);
Future<int64_t> fsize = instruction->tr->getEstimatedRangeSizeBytes(KeyRangeRef(beginKey, endKey));
int64_t size = wait(fsize);
data->stack.pushTuple(LiteralStringRef("GOT_ESTIMATED_RANGE_SIZE"));
data->stack.pushTuple("GOT_ESTIMATED_RANGE_SIZE"_sr);
return Void();
}
@ -698,7 +698,7 @@ struct GetRangeSplitPoints : InstructionFunc {
Future<FDBStandalone<VectorRef<KeyRef>>> fsplitPoints =
instruction->tr->getRangeSplitPoints(KeyRangeRef(beginKey, endKey), chunkSize);
FDBStandalone<VectorRef<KeyRef>> splitPoints = wait(fsplitPoints);
data->stack.pushTuple(LiteralStringRef("GOT_RANGE_SPLIT_POINTS"));
data->stack.pushTuple("GOT_RANGE_SPLIT_POINTS"_sr);
return Void();
}
@ -743,7 +743,7 @@ struct GetReadVersionFunc : InstructionFunc {
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
Version v = wait(instruction->tr->getReadVersion());
data->lastVersion = v;
data->stack.pushTuple(LiteralStringRef("GOT_READ_VERSION"));
data->stack.pushTuple("GOT_READ_VERSION"_sr);
return Void();
}
};
@ -767,7 +767,7 @@ struct GetCommittedVersionFunc : InstructionFunc {
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
data->lastVersion = instruction->tr->getCommittedVersion();
data->stack.pushTuple(LiteralStringRef("GOT_COMMITTED_VERSION"));
data->stack.pushTuple("GOT_COMMITTED_VERSION"_sr);
return Void();
}
};
@ -781,7 +781,7 @@ struct GetApproximateSizeFunc : InstructionFunc {
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
int64_t _ = wait(instruction->tr->getApproximateSize());
(void)_; // disable unused variable warning
data->stack.pushTuple(LiteralStringRef("GOT_APPROXIMATE_SIZE"));
data->stack.pushTuple("GOT_APPROXIMATE_SIZE"_sr);
return Void();
}
};
@ -1485,7 +1485,7 @@ struct ReadConflictKeyFunc : InstructionFunc {
// printf("=========READ_CONFLICT_KEY:%s\n", printable(key).c_str());
instruction->tr->addReadConflictKey(key);
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_KEY"));
data->stack.pushTuple("SET_CONFLICT_KEY"_sr);
return Void();
}
};
@ -1506,7 +1506,7 @@ struct WriteConflictKeyFunc : InstructionFunc {
// printf("=========WRITE_CONFLICT_KEY:%s\n", printable(key).c_str());
instruction->tr->addWriteConflictKey(key);
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_KEY"));
data->stack.pushTuple("SET_CONFLICT_KEY"_sr);
return Void();
}
};
@ -1529,7 +1529,7 @@ struct ReadConflictRangeFunc : InstructionFunc {
// printf("=========READ_CONFLICT_RANGE:%s:%s\n", printable(begin).c_str(), printable(end).c_str());
instruction->tr->addReadConflictRange(KeyRange(KeyRangeRef(begin, end)));
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_RANGE"));
data->stack.pushTuple("SET_CONFLICT_RANGE"_sr);
return Void();
}
};
@ -1553,7 +1553,7 @@ struct WriteConflictRangeFunc : InstructionFunc {
// printf("=========WRITE_CONFLICT_RANGE:%s:%s\n", printable(begin).c_str(), printable(end).c_str());
instruction->tr->addWriteConflictRange(KeyRange(KeyRangeRef(begin, end)));
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_RANGE"));
data->stack.pushTuple("SET_CONFLICT_RANGE"_sr);
return Void();
}
};
@ -1643,10 +1643,8 @@ struct UnitTestsFunc : InstructionFunc {
Optional<StringRef>(StringRef((const uint8_t*)&locationCacheSize, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MAX_WATCHES,
Optional<StringRef>(StringRef((const uint8_t*)&maxWatches, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_DATACENTER_ID,
Optional<StringRef>(LiteralStringRef("dc_id")));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MACHINE_ID,
Optional<StringRef>(LiteralStringRef("machine_id")));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_DATACENTER_ID, Optional<StringRef>("dc_id"_sr));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MACHINE_ID, Optional<StringRef>("machine_id"_sr));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_SNAPSHOT_RYW_ENABLE);
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_SNAPSHOT_RYW_DISABLE);
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_TRANSACTION_LOGGING_MAX_FIELD_LENGTH,
@ -1685,13 +1683,13 @@ struct UnitTestsFunc : InstructionFunc {
Optional<StringRef>(StringRef((const uint8_t*)&maxRetryDelay, 8)));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_USED_DURING_COMMIT_PROTECTION_DISABLE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_TRANSACTION_LOGGING_ENABLE,
Optional<StringRef>(LiteralStringRef("my_transaction")));
Optional<StringRef>("my_transaction"_sr));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_READ_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_INCLUDE_PORT_IN_ADDRESS);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_REPORT_CONFLICTING_KEYS);
Optional<FDBStandalone<ValueRef>> _ = wait(tr->get(LiteralStringRef("\xff")));
Optional<FDBStandalone<ValueRef>> _ = wait(tr->get("\xff"_sr));
tr->cancel();
return Void();
@ -1724,13 +1722,13 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
Tuple opTuple = Tuple::unpack(data->instructions[idx].value);
state Standalone<StringRef> op = opTuple.getString(0);
state bool isDatabase = op.endsWith(LiteralStringRef("_DATABASE"));
state bool isSnapshot = op.endsWith(LiteralStringRef("_SNAPSHOT"));
state bool isDirectory = op.startsWith(LiteralStringRef("DIRECTORY_"));
state bool isDatabase = op.endsWith("_DATABASE"_sr);
state bool isSnapshot = op.endsWith("_SNAPSHOT"_sr);
state bool isDirectory = op.startsWith("DIRECTORY_"_sr);
try {
if (LOG_INSTRUCTIONS) {
if (op != LiteralStringRef("SWAP") && op != LiteralStringRef("PUSH")) {
if (op != "SWAP"_sr && op != "PUSH"_sr) {
printf("%zu. %s\n", idx, tupleToString(opTuple).c_str());
fflush(stdout);
}
@ -1773,7 +1771,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
if (opsThatCreateDirectories.count(op.toString())) {
data->directoryData.directoryList.push_back(DirectoryOrSubspace());
}
data->stack.pushTuple(LiteralStringRef("DIRECTORY_ERROR"));
data->stack.pushTuple("DIRECTORY_ERROR"_sr);
} else {
data->stack.pushError(e.code());
}
@ -1883,15 +1881,14 @@ ACTOR void _test_versionstamp() {
state Future<FDBStandalone<StringRef>> ftrVersion = tr->getVersionstamp();
tr->atomicOp(LiteralStringRef("foo"),
LiteralStringRef("blahblahbl\x00\x00\x00\x00"),
FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
tr->atomicOp(
"foo"_sr, "blahblahbl\x00\x00\x00\x00"_sr, FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
wait(tr->commit()); // should use retry loop
tr->reset();
Optional<FDBStandalone<StringRef>> optionalDbVersion = wait(tr->get(LiteralStringRef("foo")));
Optional<FDBStandalone<StringRef>> optionalDbVersion = wait(tr->get("foo"_sr));
state FDBStandalone<StringRef> dbVersion = optionalDbVersion.get();
FDBStandalone<StringRef> trVersion = wait(ftrVersion);

View File

@ -71,7 +71,7 @@ struct FlowTesterStack {
void pushError(int errorCode) {
FDB::Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", errorCode));
// pack above as error string into another tuple
pushTuple(t.pack().toString());

View File

@ -266,6 +266,11 @@ func (o NetworkOptions) SetEnableRunLoopProfiling() error {
return o.setOpt(71, nil)
}
// Prevents the multi-version client API from being disabled, even if no external clients are configured. This option is required to use GRV caching.
func (o NetworkOptions) SetDisableClientBypass() error {
return o.setOpt(72, nil)
}
// Enable client buggify - will make requests randomly fail (intended for client testing)
func (o NetworkOptions) SetClientBuggifyEnable() error {
return o.setOpt(80, nil)
@ -622,7 +627,7 @@ func (o TransactionOptions) SetBypassUnreadable() error {
return o.setOpt(1100, nil)
}
// Allows this transaction to use cached GRV from the database context. Defaults to off. Upon first usage, starts a background updater to periodically update the cache to avoid stale read versions.
// Allows this transaction to use cached GRV from the database context. Defaults to off. Upon first usage, starts a background updater to periodically update the cache to avoid stale read versions. The disable_client_bypass option must also be set.
func (o TransactionOptions) SetUseGrvCache() error {
return o.setOpt(1101, nil)
}

View File

@ -1037,7 +1037,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBDatabase_Database_1verify
return 0;
}
FDBFuture* f = fdb_database_list_blobbified_ranges(
FDBFuture* f = fdb_database_verify_blob_range(
tr, startKey, jenv->GetArrayLength(beginKeyBytes), endKey, jenv->GetArrayLength(endKeyBytes), version);
jenv->ReleaseByteArrayElements(beginKeyBytes, (jbyte*)startKey, JNI_ABORT);
jenv->ReleaseByteArrayElements(endKeyBytes, (jbyte*)endKey, JNI_ABORT);

View File

@ -161,6 +161,19 @@ public interface Database extends AutoCloseable, TransactionContext {
*/
double getMainThreadBusyness();
/**
* Runs {@link #purgeBlobGranules(Function)} on the default executor.
*
* @param beginKey start of the key range
* @param endKey end of the key range
* @param force if true delete all data, if not keep data >= purgeVersion
*
* @return the key to watch for purge complete
*/
default CompletableFuture<byte[]> purgeBlobGranules(byte[] beginKey, byte[] endKey, boolean force) {
return purgeBlobGranules(beginKey, endKey, -2, force, getExecutor());
}
/**
* Runs {@link #purgeBlobGranules(Function)} on the default executor.
*
@ -278,6 +291,18 @@ public interface Database extends AutoCloseable, TransactionContext {
*/
CompletableFuture<KeyRangeArrayResult> listBlobbifiedRanges(byte[] beginKey, byte[] endKey, int rangeLimit, Executor e);
/**
* Runs {@link #verifyBlobRange(Function)} on the default executor.
*
* @param beginKey start of the key range
* @param endKey end of the key range
*
* @return a future with the version of the last blob granule.
*/
default CompletableFuture<Long> verifyBlobRange(byte[] beginKey, byte[] endKey) {
return verifyBlobRange(beginKey, endKey, -2, getExecutor());
}
/**
* Runs {@link #verifyBlobRange(Function)} on the default executor.
*

View File

@ -75,38 +75,3 @@ add_custom_command(OUTPUT ${package_file}
add_custom_target(python_package DEPENDS ${package_file})
add_dependencies(python_package python_binding)
add_dependencies(packages python_package)
if (NOT WIN32 AND NOT OPEN_FOR_IDE)
add_fdbclient_test(
NAME single_process_fdbcli_tests
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
)
add_fdbclient_test(
NAME multi_process_fdbcli_tests
PROCESS_NUMBER 5
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
5
)
if (TARGET external_client) # external_client copies fdb_c to bindings/c/libfdb_c_external.so
add_fdbclient_test(
NAME single_process_external_client_fdbcli_tests
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c_external.so
)
add_fdbclient_test(
NAME multi_process_external_client_fdbcli_tests
PROCESS_NUMBER 5
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
5
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c_external.so
)
endif()
endif()

View File

@ -1359,7 +1359,7 @@ else:
except:
# The system python on OS X can't find the library installed to /usr/local/lib if SIP is enabled
# find_library does find the location in /usr/local/lib, so if the above fails fallback to using it
lib_path = ctypes.util.find_library(capi_name)
lib_path = ctypes.util.find_library("fdb_c")
if lib_path is not None:
try:
_capi = ctypes.CDLL(lib_path)

View File

@ -57,19 +57,27 @@ function(compile_boost)
# Build boost
include(ExternalProject)
set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install")
ExternalProject_add("${COMPILE_BOOST_TARGET}Project"
URL "https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.tar.bz2"
URL_HASH SHA256=8681f175d4bdb26c52222665793eef08490d7758529330f98d3b29dd0735bccc
CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND} ${BOOTSTRAP_ARGS} --with-libraries=${BOOTSTRAP_LIBRARIES} --with-toolset=${BOOST_TOOLSET}
BUILD_COMMAND ${B2_COMMAND} link=static ${COMPILE_BOOST_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install
BUILD_IN_SOURCE ON
INSTALL_COMMAND ""
UPDATE_COMMAND ""
BUILD_BYPRODUCTS "${BOOST_INSTALL_DIR}/boost/config.hpp"
"${BOOST_INSTALL_DIR}/lib/libboost_context.a"
"${BOOST_INSTALL_DIR}/lib/libboost_filesystem.a"
"${BOOST_INSTALL_DIR}/lib/libboost_iostreams.a")
URL "https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.tar.bz2"
URL_HASH SHA256=8681f175d4bdb26c52222665793eef08490d7758529330f98d3b29dd0735bccc
CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND}
${BOOTSTRAP_ARGS}
--with-libraries=${BOOTSTRAP_LIBRARIES}
--with-toolset=${BOOST_TOOLSET}
BUILD_COMMAND ${B2_COMMAND}
link=static
${COMPILE_BOOST_BUILD_ARGS}
--prefix=${BOOST_INSTALL_DIR}
${USER_CONFIG_FLAG} install
BUILD_IN_SOURCE ON
INSTALL_COMMAND ""
UPDATE_COMMAND ""
BUILD_BYPRODUCTS "${BOOST_INSTALL_DIR}/boost/config.hpp"
"${BOOST_INSTALL_DIR}/lib/libboost_context.a"
"${BOOST_INSTALL_DIR}/lib/libboost_filesystem.a"
"${BOOST_INSTALL_DIR}/lib/libboost_iostreams.a")
add_library(${COMPILE_BOOST_TARGET}_context STATIC IMPORTED)
add_dependencies(${COMPILE_BOOST_TARGET}_context ${COMPILE_BOOST_TARGET}Project)

View File

@ -4,31 +4,42 @@ find_package(RocksDB 6.27.3)
include(ExternalProject)
if (RocksDB_FOUND)
set(RocksDB_CMAKE_ARGS
-DUSE_RTTI=1
-DPORTABLE=${PORTABLE_ROCKSDB}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_SHARED_LINKER_FLAGS=${CMAKE_SHARED_LINKER_FLAGS}
-DCMAKE_STATIC_LINKER_FLAGS=${CMAKE_STATIC_LINKER_FLAGS}
-DCMAKE_EXE_LINKER_FLAGS=${CMAKE_EXE_LINKER_FLAGS}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DFAIL_ON_WARNINGS=OFF
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
-DWITH_CORE_TOOLS=OFF
-DWITH_BENCHMARK_TOOLS=OFF
-DWITH_BZ2=OFF
-DWITH_LZ4=ON
-DWITH_SNAPPY=OFF
-DWITH_ZLIB=OFF
-DWITH_ZSTD=OFF
-DWITH_LIBURING=${WITH_LIBURING}
-DWITH_TSAN=${USE_TSAN}
-DWITH_ASAN=${USE_ASAN}
-DWITH_UBSAN=${USE_UBSAN}
-DROCKSDB_BUILD_SHARED=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=True
)
if(ROCKSDB_FOUND)
ExternalProject_Add(rocksdb
SOURCE_DIR "${RocksDB_ROOT}"
DOWNLOAD_COMMAND ""
CMAKE_ARGS -DUSE_RTTI=1 -DPORTABLE=${PORTABLE_ROCKSDB}
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DFAIL_ON_WARNINGS=OFF
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
-DWITH_CORE_TOOLS=OFF
-DWITH_BENCHMARK_TOOLS=OFF
-DWITH_BZ2=OFF
-DWITH_LZ4=ON
-DWITH_SNAPPY=OFF
-DWITH_ZLIB=OFF
-DWITH_ZSTD=OFF
-DWITH_LIBURING=${WITH_LIBURING}
-DWITH_TSAN=${USE_TSAN}
-DWITH_ASAN=${USE_ASAN}
-DWITH_UBSAN=${USE_UBSAN}
-DROCKSDB_BUILD_SHARED=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=True
CMAKE_ARGS ${RocksDB_CMAKE_ARGS}
BUILD_BYPRODUCTS <BINARY_DIR>/librocksdb.a
INSTALL_COMMAND ""
)
@ -38,29 +49,9 @@ if (RocksDB_FOUND)
${BINARY_DIR}/librocksdb.a)
else()
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v6.27.3.tar.gz
URL_HASH SHA256=ee29901749b9132692b26f0a6c1d693f47d1a9ed8e3771e60556afe80282bf58
CMAKE_ARGS -DUSE_RTTI=1 -DPORTABLE=${PORTABLE_ROCKSDB}
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DFAIL_ON_WARNINGS=OFF
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
-DWITH_CORE_TOOLS=OFF
-DWITH_BENCHMARK_TOOLS=OFF
-DWITH_BZ2=OFF
-DWITH_LZ4=ON
-DWITH_SNAPPY=OFF
-DWITH_ZLIB=OFF
-DWITH_ZSTD=OFF
-DWITH_LIBURING=${WITH_LIBURING}
-DWITH_TSAN=${USE_TSAN}
-DWITH_ASAN=${USE_ASAN}
-DWITH_UBSAN=${USE_UBSAN}
-DROCKSDB_BUILD_SHARED=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=True
URL https://github.com/facebook/rocksdb/archive/refs/tags/v6.27.3.tar.gz
URL_HASH SHA256=ee29901749b9132692b26f0a6c1d693f47d1a9ed8e3771e60556afe80282bf58
CMAKE_ARGS ${RocksDB_CMAKE_ARGS}
BUILD_BYPRODUCTS <BINARY_DIR>/librocksdb.a
INSTALL_COMMAND ""
)
@ -70,7 +61,7 @@ else()
${BINARY_DIR}/librocksdb.a)
ExternalProject_Get_Property(rocksdb SOURCE_DIR)
set (ROCKSDB_INCLUDE_DIR "${SOURCE_DIR}/include")
set(ROCKSDB_INCLUDE_DIR "${SOURCE_DIR}/include")
set(ROCKSDB_FOUND TRUE)
endif()

23
cmake/CompileZstd.cmake Normal file
View File

@ -0,0 +1,23 @@
# Compile zstd
function(compile_zstd)
include(FetchContent)
set(ZSTD_SOURCE_DIR ${CMAKE_BINARY_DIR}/zstd)
FetchContent_Declare(
ZSTD
GIT_REPOSITORY https://github.com/facebook/zstd.git
GIT_TAG v1.5.2
SOURCE_DIR ${ZSTD_SOURCE_DIR}
BINARY_DIR ${ZSTD_SOURCE_DIR}
SOURCE_SUBDIR "build/cmake"
)
FetchContent_MakeAvailable(ZSTD)
add_library(ZSTD::ZSTD STATIC IMPORTED)
set_target_properties(ZSTD::ZSTD PROPERTIES IMPORTED_LOCATION "${CMAKE_BINARY_DIR}/lib/libzstd.a")
target_include_directories(ZSTD::ZSTD PUBLIC ${ZSTD_INCLUDE_DIRS})
endfunction(compile_zstd)

View File

@ -25,6 +25,7 @@ env_set(STATIC_LINK_LIBCXX "${_static_link_libcxx}" BOOL "Statically link libstd
env_set(TRACE_PC_GUARD_INSTRUMENTATION_LIB "" STRING "Path to a library containing an implementation for __sanitizer_cov_trace_pc_guard. See https://clang.llvm.org/docs/SanitizerCoverage.html for more info.")
env_set(PROFILE_INSTR_GENERATE OFF BOOL "If set, build FDB as an instrumentation build to generate profiles")
env_set(PROFILE_INSTR_USE "" STRING "If set, build FDB with profile")
env_set(FULL_DEBUG_SYMBOLS OFF BOOL "Generate full debug symbols")
set(USE_SANITIZER OFF)
if(USE_ASAN OR USE_VALGRIND OR USE_MSAN OR USE_TSAN OR USE_UBSAN)
@ -164,9 +165,20 @@ else()
set(SANITIZER_COMPILE_OPTIONS)
set(SANITIZER_LINK_OPTIONS)
# we always compile with debug symbols. CPack will strip them out
# we always compile with debug symbols. For release builds CPack will strip them out
# and create a debuginfo rpm
add_compile_options(-ggdb -fno-omit-frame-pointer)
add_compile_options(-fno-omit-frame-pointer -gz)
add_link_options(-gz)
if(FDB_RELEASE OR FULL_DEBUG_SYMBOLS OR CMAKE_BUILD_TYPE STREQUAL "Debug")
# Configure with FULL_DEBUG_SYMBOLS=ON to generate all symbols for debugging with gdb
# Also generating full debug symbols in release builds, because they are packaged
# separately and installed optionally
add_compile_options(-ggdb)
else()
# Generating minimal debug symbols by default. They are sufficient for testing purposes
add_compile_options(-ggdb1)
endif()
if(TRACE_PC_GUARD_INSTRUMENTATION_LIB)
add_compile_options(-fsanitize-coverage=trace-pc-guard)
link_libraries(${TRACE_PC_GUARD_INSTRUMENTATION_LIB})
@ -280,19 +292,34 @@ else()
#add_compile_options(-fno-builtin-memcpy)
if (CLANG OR ICX)
add_compile_options()
if (APPLE OR USE_LIBCXX)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-stdlib=libc++>)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
if (NOT APPLE)
if (STATIC_LINK_LIBCXX)
add_link_options(-static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic")
else()
# Make sure that libc++ can be found be the platform's loader, so that thing's like cmake's "try_run" work.
find_library(LIBCXX_SO_PATH c++ /usr/local/lib)
if (LIBCXX_SO_PATH)
get_filename_component(LIBCXX_SO_DIR ${LIBCXX_SO_PATH} DIRECTORY)
if (APPLE)
set(ENV{DYLD_LIBRARY_PATH} "$ENV{DYLD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
elseif(WIN32)
set(ENV{PATH} "$ENV{PATH};${LIBCXX_SO_DIR}")
else()
set(ENV{LD_LIBRARY_PATH} "$ENV{LD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
endif()
endif()
endif()
add_link_options(-stdlib=libc++ -Wl,-build-id=sha1)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -Wl,-build-id=sha1")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -stdlib=libc++ -Wl,-build-id=sha1")
endif()
endif()
if (NOT APPLE AND NOT USE_LIBCXX)
message(STATUS "Linking libatomic")
add_link_options(-latomic)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -latomic")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -latomic")
endif()
if (OPEN_FOR_IDE)
add_compile_options(

View File

@ -232,7 +232,12 @@ set(COROUTINE_IMPL ${DEFAULT_COROUTINE_IMPL} CACHE STRING "Which coroutine imple
set(BUILD_AWS_BACKUP OFF CACHE BOOL "Build AWS S3 SDK backup client")
if (BUILD_AWS_BACKUP)
set(WITH_AWS_BACKUP ON)
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
set(WITH_AWS_BACKUP ON)
else()
message(WARNING "BUILD_AWS_BACKUP set but ignored ${CMAKE_SYSTEM_PROCESSOR} is not supported yet")
set(WITH_AWS_BACKUP OFF)
endif()
else()
set(WITH_AWS_BACKUP OFF)
endif()

View File

@ -2,10 +2,8 @@ project(awssdk-download NONE)
# Compile the sdk with clang and libc++, since otherwise we get libc++ vs libstdc++ link errors when compiling fdb with clang
set(AWSSDK_COMPILER_FLAGS "")
set(AWSSDK_LINK_FLAGS "")
if(APPLE OR CLANG OR USE_LIBCXX)
set(AWSSDK_COMPILER_FLAGS -stdlib=libc++ -nostdlib++)
set(AWSSDK_LINK_FLAGS -stdlib=libc++ -lc++abi)
if(APPLE OR USE_LIBCXX)
set(AWSSDK_COMPILER_FLAGS "-stdlib=libc++ -nostdlib++")
endif()
include(ExternalProject)
@ -21,11 +19,11 @@ ExternalProject_Add(awssdk_project
-DSIMPLE_INSTALL=ON
-DCMAKE_INSTALL_PREFIX=install # need to specify an install prefix so it doesn't install in /usr/lib - FIXME: use absolute path
-DBYO_CRYPTO=ON # we have our own crypto libraries that conflict if we let aws sdk build and link its own
-DBUILD_CURL=ON
-DBUILD_ZLIB=ON
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_EXE_LINKER_FLAGS=${AWSSDK_COMPILER_FLAGS}
-DCMAKE_CXX_FLAGS=${AWSSDK_LINK_FLAGS}
-DCMAKE_CXX_FLAGS=${AWSSDK_COMPILER_FLAGS}
TEST_COMMAND ""
# the sdk build produces a ton of artifacts, with their own dependency tree, so there is a very specific dependency order they must be linked in
BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-cpp-sdk-core.a"
@ -41,6 +39,8 @@ ExternalProject_Add(awssdk_project
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-compression.a"
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-cal.a"
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-common.a"
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/external-install/curl/lib/libcurl.a"
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/external-install/zlib/lib/libz.a"
)
add_library(awssdk_core STATIC IMPORTED)
@ -96,7 +96,15 @@ add_library(awssdk_c_common STATIC IMPORTED)
add_dependencies(awssdk_c_common awssdk_project)
set_target_properties(awssdk_c_common PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-common.a")
add_library(curl STATIC IMPORTED)
add_dependencies(curl awssdk_project)
set_property(TARGET curl PROPERTY IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/external-install/curl/lib/libcurl.a")
add_library(zlib STATIC IMPORTED)
add_dependencies(zlib awssdk_project)
set_property(TARGET zlib PROPERTY IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/external-install/zlib/lib/libz.a")
# link them all together in one interface target
add_library(awssdk_target INTERFACE)
target_include_directories(awssdk_target SYSTEM INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/include)
target_link_libraries(awssdk_target INTERFACE awssdk_core awssdk_crt awssdk_c_s3 awssdk_c_auth awssdk_c_eventstream awssdk_c_http awssdk_c_mqtt awssdk_c_sdkutils awssdk_c_io awssdk_checksums awssdk_c_compression awssdk_c_cal awssdk_c_common curl)
target_link_libraries(awssdk_target INTERFACE awssdk_core awssdk_crt awssdk_c_s3 awssdk_c_auth awssdk_c_eventstream awssdk_c_http awssdk_c_mqtt awssdk_c_sdkutils awssdk_c_io awssdk_checksums awssdk_c_compression awssdk_c_cal awssdk_c_common curl zlib)

View File

@ -1 +1,2 @@
using @BOOST_TOOLSET@ : : @BOOST_CXX_COMPILER@ : @BOOST_ADDITIONAL_COMPILE_OPTIONS@ ;
using zstd : 1.5.2 : <include>/@CMAKE_BINARY_DIR@/zstd/lib <search>/@CMAKE_BINARY_DIR@/lib ;

View File

@ -335,7 +335,12 @@ class TestRun:
command: List[str] = []
env: Dict[str, str] = os.environ.copy()
valgrind_file: Path | None = None
if self.use_valgrind:
if self.use_valgrind and self.binary == config.binary:
# Only run the binary under test under valgrind. There's nothing we
# can do about valgrind errors in old binaries anyway, and it makes
# the test take longer. Also old binaries weren't built with
# USE_VALGRIND=ON, and we have seen false positives with valgrind in
# such binaries.
command.append('valgrind')
valgrind_file = self.temp_path / Path('valgrind-{}.xml'.format(self.random_seed))
dbg_path = os.getenv('FDB_VALGRIND_DBGPATH')

View File

@ -7,16 +7,16 @@ For details, see http://sourceforge.net/projects/libb64
#include "libb64/cdecode.h"
int base64_decode_value(char value_in) {
static const char decoding[] = { 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -2, -1,
-1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51 };
static const char decoding_size = sizeof(decoding);
int base64_decode_value(int value_in) {
static const int decoding[] = { 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -2, -1,
-1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51 };
static const int decoding_size = sizeof(decoding) / sizeof(decoding[0]);
value_in -= 43;
if (value_in < 0 || value_in > decoding_size)
if (value_in < 0 || value_in >= decoding_size)
return -1;
return decoding[(int)value_in];
return decoding[value_in];
}
void base64_init_decodestate(base64_decodestate* state_in) {
@ -27,7 +27,7 @@ void base64_init_decodestate(base64_decodestate* state_in) {
int base64_decode_block(const char* code_in, const int length_in, char* plaintext_out, base64_decodestate* state_in) {
const char* codechar = code_in;
char* plainchar = plaintext_out;
char fragment;
int fragment = 0;
*plainchar = state_in->plainchar;
@ -40,9 +40,9 @@ int base64_decode_block(const char* code_in, const int length_in, char* plaintex
state_in->plainchar = *plainchar;
return plainchar - plaintext_out;
}
fragment = (char)base64_decode_value(*codechar++);
fragment = base64_decode_value(*codechar++);
} while (fragment < 0);
*plainchar = (fragment & 0x03f) << 2;
*plainchar = (char)((fragment & 0x03f) << 2);
case step_b:
do {
if (codechar == code_in + length_in) {
@ -50,10 +50,10 @@ int base64_decode_block(const char* code_in, const int length_in, char* plaintex
state_in->plainchar = *plainchar;
return plainchar - plaintext_out;
}
fragment = (char)base64_decode_value(*codechar++);
fragment = base64_decode_value(*codechar++);
} while (fragment < 0);
*plainchar++ |= (fragment & 0x030) >> 4;
*plainchar = (fragment & 0x00f) << 4;
*plainchar++ |= (char)((fragment & 0x030) >> 4);
*plainchar = (char)((fragment & 0x00f) << 4);
case step_c:
do {
if (codechar == code_in + length_in) {
@ -61,10 +61,10 @@ int base64_decode_block(const char* code_in, const int length_in, char* plaintex
state_in->plainchar = *plainchar;
return plainchar - plaintext_out;
}
fragment = (char)base64_decode_value(*codechar++);
fragment = base64_decode_value(*codechar++);
} while (fragment < 0);
*plainchar++ |= (fragment & 0x03c) >> 2;
*plainchar = (fragment & 0x003) << 6;
*plainchar++ |= (char)((fragment & 0x03c) >> 2);
*plainchar = (char)((fragment & 0x003) << 6);
case step_d:
do {
if (codechar == code_in + length_in) {
@ -72,9 +72,9 @@ int base64_decode_block(const char* code_in, const int length_in, char* plaintex
state_in->plainchar = *plainchar;
return plainchar - plaintext_out;
}
fragment = (char)base64_decode_value(*codechar++);
fragment = base64_decode_value(*codechar++);
} while (fragment < 0);
*plainchar++ |= (fragment & 0x03f);
*plainchar++ |= (char)((fragment & 0x03f));
}
}
/* control should not reach here */

View File

@ -17,7 +17,7 @@ typedef struct {
void base64_init_decodestate(base64_decodestate* state_in);
int base64_decode_value(char value_in);
int base64_decode_value(int value_in);
int base64_decode_block(const char* code_in, const int length_in, char* plaintext_out, base64_decodestate* state_in);

View File

@ -54,7 +54,7 @@ NOTE: All blocks except for the final block will have one last value which will
The code related to how a range file is written is in the `struct RangeFileWriter` in `namespace fileBackup`.
The code that decodes a range block is in `ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeRangeFileBlock(Reference<IAsyncFile> file, int64_t offset, int len)`.
The code that decodes a range block is in `ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeRangeFileBlock(Reference<IAsyncFile> file, int64_t offset, int len, Database cx)`.
### Data format in a log file

View File

@ -3,44 +3,48 @@
When the `GLOBAL_TAG_THROTTLING` knob is enabled, the ratekeeper will use the [transaction tagging feature](https://apple.github.io/foundationdb/transaction-tagging.html) to throttle tags according to the global tag throttling algorithm. This page describes the implementation of this algorithm.
### Tag Quotas
The global tag throttler bases throttling decisions on "quotas" provided by clients through the tag quota API. Each tag quota has four different components:
The global tag throttler bases throttling decisions on "quotas" provided by clients through the tag quota API. Each tag quota has two components:
* Reserved read quota
* Reserved write quota
* Total read quota
* Total write quota
* Reserved quota
* Total quota
The global tag throttler can not throttle tags to a throughput below the reserved quotas, and it cannot allow throughput to exceed the total quotas.
The global tag throttler cannot throttle tags to a throughput below the reserved quota, and it cannot allow throughput to exceed the total quota.
### Cost
The units for these quotas are computed as follows. The "cost" of a read operation is computed as:
Internally, the units for these quotas are "page costs", computed as follows. The "page cost" of a read operation is computed as:
```
readCost = bytesRead / SERVER_KNOBS->READ_COST_BYTE_FACTOR + 1;
readCost = ceiling(bytesRead / CLIENT_KNOBS->READ_COST_BYTE_FACTOR);
```
The "cost" of a write operation is computed as:
The "page cost" of a write operation is computed as:
```
writeCost = bytesWritten / CLIENT_KNOBS->WRITE_COST_BYTE_FACTOR + 1;
writeCost = SERVER_KNOBS->GLOBAL_TAG_THROTTLING_RW_FUNGIBILITY_RATIO * ceiling(bytesWritten / CLIENT_KNOBS->WRITE_COST_BYTE_FACTOR);
```
Here `bytesWritten` includes cleared bytes. The size of range clears is estimated at commit time.
### Tuple Layer
Tag quotas are stored inside of the system keyspace (with prefix `\xff/tagQuota/`). They are stored using the tuple layer, in a tuple of form: `(reservedReadQuota, totalReadQuota, reservedWriteQuota, totalWriteQuota)`. There is currently no custom code in the bindings for manipulating these system keys. However, in any language for which bindings are available, it is possible to use the tuple layer to manipulate tag quotas.
Tag quotas are stored inside of the system keyspace (with prefix `\xff/tagQuota/`). They are stored using the tuple layer, in a tuple of form: `(reservedQuota, totalQuota)`. There is currently no custom code in the bindings for manipulating these system keys. However, in any language for which bindings are available, it is possible to use the tuple layer to manipulate tag quotas.
### fdbcli
The easiest way for an external client to interact with tag quotas is through `fdbcli`. To get the quota of a particular tag, run the following command:
The easiest way for an external client to interact with tag quotas is through `fdbcli`. To get the quota (in bytes/second) of a particular tag, run the following command:
```
fdbcli> get <tag> [reserved|total] [read|write]
fdbcli> quota get <tag> [reserved_throughput|total_throughput]
```
To set the quota through `fdbcli`, run:
```
fdbcli> set <tag> [reserved|total] [read|write] <value>
fdbcli> quota set <tag> [reserved_throughput|total_throughput] <bytes_per_second>
```
Note that the quotas are specified in terms of bytes/second, and internally converted to page costs:
```
page_cost_quota = ceiling(byte_quota / CLIENT_KNOBS->READ_COST_BYTE_FACTOR)
```
### Limit Calculation

View File

@ -32,10 +32,10 @@ public:
explicit SKRExampleImpl(KeyRangeRef kr): SpecialKeyRangeReadImpl(kr) {
// Our implementation is quite simple here, the key-value pairs are formatted as:
// \xff\xff/example/<country_name> : <capital_city_name>
CountryToCapitalCity[LiteralStringRef("USA")] = LiteralStringRef("Washington, D.C.");
CountryToCapitalCity[LiteralStringRef("UK")] = LiteralStringRef("London");
CountryToCapitalCity[LiteralStringRef("Japan")] = LiteralStringRef("Tokyo");
CountryToCapitalCity[LiteralStringRef("China")] = LiteralStringRef("Beijing");
CountryToCapitalCity["USA"_sr] = "Washington, D.C."_sr;
CountryToCapitalCity["UK"_sr] = "London"_sr;
CountryToCapitalCity["Japan"_sr] = "Tokyo"_sr;
CountryToCapitalCity["China"_sr] = "Beijing"_sr;
}
// Implement the getRange interface
Future<RangeResult> getRange(ReadYourWritesTransaction* ryw,
@ -58,7 +58,7 @@ private:
};
// Instantiate the function object
// In development, you should have a function object pointer in DatabaseContext(DatabaseContext.h) and initialize in DatabaseContext's constructor(NativeAPI.actor.cpp)
const KeyRangeRef exampleRange(LiteralStringRef("\xff\xff/example/"), LiteralStringRef("\xff\xff/example/\xff"));
const KeyRangeRef exampleRange("\xff\xff/example/"_sr, "\xff\xff/example/\xff"_sr);
SKRExampleImpl exampleImpl(exampleRange);
// Assuming the database handler is `cx`, register to special-key-space
// In development, you should register all function objects in the constructor of DatabaseContext(NativeAPI.actor.cpp)
@ -67,16 +67,16 @@ cx->specialKeySpace->registerKeyRange(exampleRange, &exampleImpl);
state ReadYourWritesTransaction tr(cx);
// get
Optional<Value> res1 = wait(tr.get("\xff\xff/example/Japan"));
ASSERT(res1.present() && res.getValue() == LiteralStringRef("Tokyo"));
ASSERT(res1.present() && res.getValue() == "Tokyo"_sr);
// getRange
// Note: for getRange(key1, key2), both key1 and key2 should prefixed with \xff\xff
// something like getRange("normal_key", "\xff\xff/...") is not supported yet
RangeResult res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff")));
RangeResult res2 = wait(tr.getRange("\xff\xff/example/U"_sr, "\xff\xff/example/U\xff"_sr));
// res2 should contain USA and UK
ASSERT(
res2.size() == 2 &&
res2[0].value == LiteralStringRef("London") &&
res2[1].value == LiteralStringRef("Washington, D.C.")
res2[0].value == "London"_sr &&
res2[1].value == "Washington, D.C."_sr
);
```

View File

@ -69,7 +69,7 @@ release = root.find(".//{http://schemas.microsoft.com/developer/msbuild/2003}Ver
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
@ -185,7 +185,7 @@ html_show_copyright = True
htmlhelp_basename = 'FoundationDB'
# Disable permalinks
html_add_permalinks = ""
html_permalinks = False
# -- Options for LaTeX output --------------------------------------------------

View File

@ -42,7 +42,7 @@ from docutils.parsers.rst import directives, Directive
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.locale import _
from sphinx.domains import Domain, ObjType, Index
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
@ -83,18 +83,18 @@ class RubyObject(ObjectDescription):
}
doc_field_types = [
TypedField('parameter', label=l_('Parameters'),
TypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='obj', typenames=('paramtype', 'type')),
TypedField('variable', label=l_('Variables'), rolename='obj',
TypedField('variable', label=_('Variables'), rolename='obj',
names=('var', 'ivar', 'cvar'),
typerolename='obj', typenames=('vartype',)),
GroupedField('exceptions', label=l_('Raises'), rolename='exc',
GroupedField('exceptions', label=_('Raises'), rolename='exc',
names=('raises', 'raise', 'exception', 'except'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=l_('Return type'), has_arg=False,
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
]
@ -493,8 +493,8 @@ class RubyModuleIndex(Index):
"""
name = 'modindex'
localname = l_('Ruby Module Index')
shortname = l_('modules')
localname = _('Ruby Module Index')
shortname = _('modules')
def generate(self, docnames=None):
content = {}
@ -561,17 +561,17 @@ class RubyDomain(Domain):
name = 'rb'
label = 'Ruby'
object_types = {
'function': ObjType(l_('function'), 'func', 'obj'),
'global': ObjType(l_('global variable'), 'global', 'obj'),
'method': ObjType(l_('method'), 'meth', 'obj'),
'class': ObjType(l_('class'), 'class', 'obj'),
'exception': ObjType(l_('exception'), 'exc', 'obj'),
'classmethod': ObjType(l_('class method'), 'meth', 'obj'),
'attr_reader': ObjType(l_('attribute'), 'attr', 'obj'),
'attr_writer': ObjType(l_('attribute'), 'attr', 'obj'),
'attr_accessor': ObjType(l_('attribute'), 'attr', 'obj'),
'const': ObjType(l_('const'), 'const', 'obj'),
'module': ObjType(l_('module'), 'mod', 'obj'),
'function': ObjType(_('function'), 'func', 'obj'),
'global': ObjType(_('global variable'), 'global', 'obj'),
'method': ObjType(_('method'), 'meth', 'obj'),
'class': ObjType(_('class'), 'class', 'obj'),
'exception': ObjType(_('exception'), 'exc', 'obj'),
'classmethod': ObjType(_('class method'), 'meth', 'obj'),
'attr_reader': ObjType(_('attribute'), 'attr', 'obj'),
'attr_writer': ObjType(_('attribute'), 'attr', 'obj'),
'attr_accessor': ObjType(_('attribute'), 'attr', 'obj'),
'const': ObjType(_('const'), 'const', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
}
directives = {

View File

@ -1,6 +1,6 @@
--index-url https://pypi.python.org/simple
setuptools>=20.10.0,<=57.4.0
sphinx==1.5.6
sphinx-bootstrap-theme==0.4.8
docutils==0.16
Jinja2==3.0.3
setuptools==65.3.0
sphinx==5.1.1
sphinx-bootstrap-theme==0.8.1
docutils==0.19
Jinja2==3.1.2

View File

@ -222,7 +222,7 @@ The FoundationDB client library performs most tasks on a singleton thread (which
Future
======
Most functions in the FoundationDB API are asynchronous, meaning that they may return to the caller before actually delivering their result. These functions always return :type:`FDBFuture*`. An :type:`FDBFuture` object represents a result value or error to be delivered at some future time. You can wait for a Future to be "ready" -- to have a value or error delivered -- by setting a callback function, or by blocking a thread, or by polling. Once a Future is ready, you can extract either an error code or a value of the appropriate type (the documentation for the original function will tell you which :func:`fdb_future_get_*()` function you should call).
Most functions in the FoundationDB API are asynchronous, meaning that they may return to the caller before actually delivering their result. These functions always return ``FDBFuture*``. An :type:`FDBFuture` object represents a result value or error to be delivered at some future time. You can wait for a Future to be "ready" -- to have a value or error delivered -- by setting a callback function, or by blocking a thread, or by polling. Once a Future is ready, you can extract either an error code or a value of the appropriate type (the documentation for the original function will tell you which ``fdb_future_get_()`` function you should call).
To use the API in a synchronous way, you would typically do something like this for each asynchronous call::
@ -282,7 +282,7 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
.. type:: FDBCallback
A pointer to a function which takes :type:`FDBFuture*` and ``void*`` and returns ``void``.
A pointer to a function which takes ``FDBFuture*`` and ``void*`` and returns ``void``.
.. function:: void fdb_future_release_memory(FDBFuture* future)
@ -298,13 +298,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
.. function:: fdb_error_t fdb_future_get_int64(FDBFuture* future, int64_t* out)
Extracts a 64-bit integer from an :type:`FDBFuture*` into a caller-provided variable of type ``int64_t``. |future-warning|
Extracts a 64-bit integer from a pointer to :type:`FDBFuture` into a caller-provided variable of type ``int64_t``. |future-warning|
|future-get-return1| |future-get-return2|.
.. function:: fdb_error_t fdb_future_get_key_array( FDBFuture* f, FDBKey const** out_key_array, int* out_count)
Extracts an array of :type:`FDBKey` from an :type:`FDBFuture*` into a caller-provided variable of type ``FDBKey*``. The size of the array will also be extracted and passed back by a caller-provided variable of type ``int`` |future-warning|
Extracts an array of :type:`FDBKey` from an ``FDBFuture*`` into a caller-provided variable of type ``FDBKey*``. The size of the array will also be extracted and passed back by a caller-provided variable of type ``int`` |future-warning|
|future-get-return1| |future-get-return2|.
@ -547,13 +547,13 @@ Applications must provide error handling and an appropriate retry loop around th
.. function:: void fdb_transaction_set_read_version(FDBTransaction* transaction, int64_t version)
Sets the snapshot read version used by a transaction. This is not needed in simple cases. If the given version is too old, subsequent reads will fail with error_code_transaction_too_old; if it is too new, subsequent reads may be delayed indefinitely and/or fail with error_code_future_version. If any of :func:`fdb_transaction_get_*()` have been called on this transaction already, the result is undefined.
Sets the snapshot read version used by a transaction. This is not needed in simple cases. If the given version is too old, subsequent reads will fail with error_code_transaction_too_old; if it is too new, subsequent reads may be delayed indefinitely and/or fail with error_code_future_version. If any of ``fdb_transaction_get_*()`` have been called on this transaction already, the result is undefined.
.. function:: FDBFuture* fdb_transaction_get_read_version(FDBTransaction* transaction)
|future-return0| the transaction snapshot read version. |future-return1| call :func:`fdb_future_get_int64()` to extract the version into an int64_t that you provide, |future-return2|
The transaction obtains a snapshot read version automatically at the time of the first call to :func:`fdb_transaction_get_*()` (including this one) and (unless causal consistency has been deliberately compromised by transaction options) is guaranteed to represent all transactions which were reported committed before that call.
The transaction obtains a snapshot read version automatically at the time of the first call to ``fdb_transaction_get_*()`` (including this one) and (unless causal consistency has been deliberately compromised by transaction options) is guaranteed to represent all transactions which were reported committed before that call.
.. function:: FDBFuture* fdb_transaction_get(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t snapshot)
@ -829,7 +829,7 @@ Applications must provide error handling and an appropriate retry loop around th
|future-returnvoid|
Callers will usually want to retry a transaction if the commit or a prior :func:`fdb_transaction_get_*()` returns a retryable error (see :func:`fdb_transaction_on_error()`).
Callers will usually want to retry a transaction if the commit or a prior ``fdb_transaction_get_*()`` returns a retryable error (see :func:`fdb_transaction_on_error()`).
|commit-unknown-result-blurb|
@ -878,9 +878,9 @@ Applications must provide error handling and an appropriate retry loop around th
.. function:: FDBFuture* fdb_transaction_on_error(FDBTransaction* transaction, fdb_error_t error)
Implements the recommended retry and backoff behavior for a transaction. This function knows which of the error codes generated by other :func:`fdb_transaction_*()` functions represent temporary error conditions and which represent application errors that should be handled by the application. It also implements an exponential backoff strategy to avoid swamping the database cluster with excessive retries when there is a high level of conflict between transactions.
Implements the recommended retry and backoff behavior for a transaction. This function knows which of the error codes generated by other ``fdb_transaction_*()`` functions represent temporary error conditions and which represent application errors that should be handled by the application. It also implements an exponential backoff strategy to avoid swamping the database cluster with excessive retries when there is a high level of conflict between transactions.
On receiving any type of error from an :func:`fdb_transaction_*()` function, the application should:
On receiving any type of error from an ``fdb_transaction_*()`` function, the application should:
1. Call :func:`fdb_transaction_on_error()` with the returned :type:`fdb_error_t` code.
@ -963,15 +963,15 @@ Key selectors
In the FoundationDB C API, key selectors are not represented by a structure of any kind, but are instead expressed as sequential parameters to |get-key-func| and |get-range-func|. For convenience, the most common key selectors are available as C macros that expand to the appropriate parameters.
.. function:: FDB_KEYSEL_LAST_LESS_THAN(key_name, key_name_length)
.. type:: FDB_KEYSEL_LAST_LESS_THAN(key_name, key_name_length)
.. function:: FDB_KEYSEL_LAST_LESS_OR_EQUAL(key_name, key_name_length)
.. type:: FDB_KEYSEL_LAST_LESS_OR_EQUAL(key_name, key_name_length)
.. function:: FDB_KEYSEL_FIRST_GREATER_THAN(key_name, key_name_length)
.. type:: FDB_KEYSEL_FIRST_GREATER_THAN(key_name, key_name_length)
.. function:: FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(key_name, key_name_length)
.. type:: FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(key_name, key_name_length)
To use one of these macros, simply replace the four parameters in the function with one of :func:`FDB_KEYSEL_*`::
To use one of these macros, simply replace the four parameters in the function with one of ``FDB_KEYSEL_*``::
future = fdb_transaction_get_key(transaction, "key", 3, 0, 2, 0);

View File

@ -194,10 +194,6 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
|option-tls-key-bytes|
.. method :: fdb.options.set_tls_verify_peers(verification_pattern)
|option-tls-verify-peers|
.. method :: fdb.options.set_tls_ca_bytes(ca_bundle)
|option-tls-ca-bytes|
@ -210,10 +206,6 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
|option-tls-password|
.. method :: fdb.options.set_disable_multi_version_client_api()
|option-disable-multi-version-client-api|
.. method :: fdb.options.set_disable_local_client()
|option-set-disable-local-client|
@ -761,10 +753,6 @@ In each of the methods below, ``param`` should be a string appropriately packed
Committing
----------
.. decorator:: transactional()
The ``transactional`` decorator makes it easy to write transactional functions which accept a :class:`Database`, :class`Tenant`, or :class:`Transaction` as a parameter and automatically commit. See :func:`@fdb.transactional <transactional>` for explanation and examples.
.. method :: Transaction.commit()
Attempt to commit the changes made in the transaction to the database. Returns a :class:`FutureVoid` representing the asynchronous result of the commit. You **must** call the :meth:`Future.wait()` method on the returned :class:`FutureVoid`, which will raise an exception if the commit failed.

View File

@ -153,13 +153,6 @@ If ``description=<DESC>`` is specified, the description field in the cluster fil
For more information on setting the cluster description, see :ref:`configuration-setting-cluster-description`.
createtenant
------------
The ``createtenant`` command is used to create new tenants in the cluster. Its syntax is ``createtenant <TENANT_NAME>``.
The tenant name can be any byte string that does not begin with the ``\xff`` byte. If the tenant already exists, ``fdbcli`` will report an error.
defaulttenant
-------------
@ -167,13 +160,6 @@ The ``defaulttenant`` command configures ``fdbcli`` to run its commands without
The active tenant cannot be changed while a transaction (using ``begin``) is open.
deletetenant
------------
The ``deletetenant`` command is used to delete tenants from the cluster. Its syntax is ``deletetenant <TENANT_NAME>``.
In order to delete a tenant, it must be empty. To delete a tenant with data, first clear that data using the ``clear`` command. If the tenant does not exist, ``fdbcli`` will report an error.
exclude
-------
@ -231,33 +217,8 @@ The ``getrangekeys`` command fetches keys in a range. Its syntax is ``getrangeke
Note that :ref:`characters can be escaped <cli-escaping>` when specifying keys (or values) in ``fdbcli``.
gettenant
---------
The ``gettenant`` command fetches metadata for a given tenant and displays it. Its syntax is ``gettenant <TENANT_NAME> [JSON]``.
Included in the output of this command are the ``id`` and ``prefix`` assigned to the tenant. If the tenant does not exist, ``fdbcli`` will report an error. If ``JSON`` is specified, then the output will be written as a JSON document::
{
"tenant": {
"id": 0,
"prefix": {
"base64": "AAAAAAAAAAU=",
"printable": "\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05",
}
},
"type": "success"
}
In the event of an error, the output will include an error message::
{
"error": "...",
"type": "error"
}
getversion
----------
getversion
----------
The ``getversion`` command fetches the current read version of the cluster or currently running transaction.
@ -346,13 +307,6 @@ Attempts to kill all specified processes. Each address should include the IP and
Attempts to kill all known processes in the cluster.
listtenants
-----------
The ``listtenants`` command prints the names of tenants in the cluster. Its syntax is ``listtenants [BEGIN] [END] [LIMIT]``.
By default, the ``listtenants`` command will print up to 100 entries from the entire range of tenants. A narrower sub-range can be printed using the optional ``[BEGIN]`` and ``[END]`` parameters, and the limit can be changed by specifying an integer ``[LIMIT]`` parameter.
lock
----
@ -417,13 +371,6 @@ heap
Enables heap profiling for the specified process.
renametenant
------------
The ``renametenant`` command can rename an existing tenant to a new name. Its syntax is ``renametenant <OLD_NAME> <NEW_NAME>``.
This command requires that ``OLD_NAME`` is a tenant that already exists on the cluster, and that ``NEW_NAME`` is not already a name of a tenant in the cluster.
reset
-----
@ -484,6 +431,141 @@ status json
.. _cli-throttle:
tenant
------
The ``tenant`` command is used to view and manage the tenants in a cluster. The ``tenant`` command has the following subcommands:
create
^^^^^^
``tenant create <NAME> [tenant_group=<TENANT_GROUP>]``
Creates a new tenant in the cluster.
``NAME`` - The desired name of the tenant. The name can be any byte string that does not begin with the ``\xff`` byte.
``TENANT_GROUP`` - The tenant group the tenant will be placed in.
delete
^^^^^^
``tenant delete <NAME>``
Deletes a tenant from the cluster. The tenant must be empty.
``NAME`` - the name of the tenant to delete.
list
^^^^
``tenant list [BEGIN] [END] [LIMIT]``
Lists the tenants present in the cluster.
``BEGIN`` - the first tenant to list. Defaults to the empty tenant name ``""``.
``END`` - the exclusive end tenant to list. Defaults to ``\xff\xff``.
``LIMIT`` - the number of tenants to list. Defaults to 100.
get
^^^
``tenant get <NAME> [JSON]``
Prints the metadata for a tenant.
``NAME`` - the name of the tenant to print.
``JSON`` - if specified, the output of the command will be printed in the form of a JSON string::
{
"tenant": {
"id": 0,
"prefix": {
"base64": "AAAAAAAAAAU=",
"printable": "\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05",
}
},
"type": "success"
}
In the event of an error, the JSON output will include an error message::
{
"error": "...",
"type": "error"
}
configure
^^^^^^^^^
``tenant configure <TENANT_NAME> <[unset] tenant_group[=GROUP_NAME]>``
Changes the configuration of a tenant.
``TENANT_NAME`` - the name of the tenant to reconfigure.
The following tenant fields can be configured:
``tenant_group`` - changes the tenant group a tenant is assigned to. If ``unset`` is specified, the tenant will be configured to not be in a group. Otherwise, ``GROUP_NAME`` must be specified to the new group that the tenant should be made a member of.
rename
^^^^^^
``tenant rename <OLD_NAME> <NEW_NAME>``
Changes the name of an existing tenant.
``OLD_NAME`` - the name of the tenant being renamed.
``NEW_NAME`` - the desired name of the tenant. This name must not already be in use.
tenantgroup
-----------
The ``tenantgroup`` command is used to view details about the tenant groups in a cluster. The ``tenantgroup`` command has the following subcommands:
list
^^^^
``tenantgroup list [BEGIN] [END] [LIMIT]``
Lists the tenant groups present in the cluster.
``BEGIN`` - the first tenant group to list. Defaults to the empty tenant group name ``""``.
``END`` - the exclusive end tenant group to list. Defaults to ``\xff\xff``.
``LIMIT`` - the number of tenant groups to list. Defaults to 100.
get
^^^
``tenantgroup get <NAME> [JSON]``
Prints the metadata for a tenant group.
``NAME`` - the name of the tenant group to print.
``JSON`` - if specified, the output of the command will be printed in the form of a JSON string::
{
"tenant_group": {
"assigned_cluster": "cluster1",
},
"type": "success"
}
In the event of an error, the JSON output will include an error message::
{
"error": "...",
"type": "error"
}
throttle
--------

View File

@ -1,7 +1,6 @@
.. default-domain:: py
.. default-domain:: py
.. highlight:: python
.. module:: fdb
.. Required substitutions for api-common.rst.inc

View File

@ -1,7 +1,6 @@
.. default-domain:: py
.. default-domain:: py
.. highlight:: python
.. module:: fdb
.. Required substitutions for api-common.rst.inc

View File

@ -82,7 +82,7 @@ Values must always be encoded according to the :ref:`api-python-tuple-layer`.
// In GlobalConfig.actor.h
extern const KeyRef myGlobalConfigKey;
// In GlobalConfig.actor.cpp
const KeyRef myGlobalConfigKey = LiteralStringRef("config/key");
const KeyRef myGlobalConfigKey = "config/key"_sr;
// When you want to set the value..
Tuple value = Tuple::makeTuple((double)1.5);

View File

@ -50,6 +50,7 @@ The latest changes are detailed in :ref:`release-notes`. The documentation has t
:hidden:
local-dev
internal-dev-tools
why-foundationdb
technical-overview
client-design

View File

@ -0,0 +1,58 @@
##################
Internal Dev Tools
##################
Code Probes
===========
Code probes are a mechanism in FDB to prove that certain code-paths are being tested under the right conditions. They differ from code coverage in multiple ways (explained below).
The general format of a code probe is:
.. code-block:: C++
CODE_PROBE(<condition>, "Comment", [annotations...]);
A simple example of a code probe could look as follows:
.. code-block:: C++
CODE_PROBE(self->forceRecovery, "Resolver detects forced recovery", probe::context::sim2);
On a very high level, the above code will indicate that whenever this line is executed and ``self->forceRecovery`` is ``true``, we ran into some interesting case. In addition this probe is also annotated with ``probe::context::sim2``. This indicates that we expect this code to be eventually hit in simulation.
By default, FDB simply will write a trace-line when this code is hit and the condition is ``true``. If the code is never hit, the simulator will, at the end of the run, print the code probe but set the ``covered`` field to ``false``. This all happens in the context of a single simulation run (``fdbserver`` doesn't have a concept of ensembles). This information is written into the log file. ``TestHarness`` (see below) will then use this information to write code probe statistics to the ensemble in the Joshua cluster (if the test is run in Joshua).
We expect that ALL code probes will be hit in a nightly run. In the future we can potentially use this feature for other things (like instructing the simulator to do an extensive search starting when one of these probes is being hit).
In addition to ``context`` annotations, users can also define and pass assertions. For example:
.. code-block:: C++
CODE_PROBE(condition, "Some comment", assert::simOnly);
These will add an assertion to the code. In addition to that, the simulator will not print missed code probes that asserted that the probe won't be hit in simulation.
Test Harness
============
TestHarness is our primary testing tool. It has multiple jobs:
* *Running*: It can run a test in Joshua.
* *Statistics*: It will choose a test to run based on previous runs (within the same ensemble) spent CPU time for each test. It does that by writing statistics about the test at the end of each run.
* *Reporting*: After an ensemble has finished (or while it is running), ``TestHarness`` can be used to generate a report in ``xml`` or ``json``.
Test Harness can be found in the FDB source repository under ``contrib/TestHarness2``. It has a weak dependency to `joshua <https://github.com/foundationDB/fdb-joshua>`_ (if Test Harness can find joshua it will report back about failed tests, otherwise it will just print out general statistics about the ensemble). Joshua will call Test Harness as follows:
.. code-block:: shell
python3 -m test_harness.app -s ${JOSHUA_SEED} --old-binaries-path ${OLDBINDIR}
Here the seed is a random number generated by joshua and ``OLDBINDIR`` is a directory path where the old fdb binaries can be found (this is needed for restart tests). If one wants to retry a test they can pass the previous joshua seed, a directory path that has *exactly* the same content as ``OLDBINARYDIR``, plus the reported statistics to the test harness app. This should then re-run the same code as before.
In order to figure out what command line arguments ``test_harness.app`` (and ``test_harness.results``) accepts, one can check the contents of ``contrib/TestHarness2/test_harness/config.py``.
Reporting
---------
After a joshua ensemble completed, ``test_harness.results`` can be used in order to get a report on the ensemble. This will include, by default, a list of all failed tests (similar to ``joshua tail --errors``, though in a more human readable file). For completed ensemble it will also print code probes that weren't hit often enough. An ensemble is considered to be successful if no simulation runs completed with an error AND all code probes have been hit sufficiently often.

View File

@ -2,6 +2,21 @@
Release Notes
#############
7.1.23
======
* Same as 7.1.22 release with AVX enabled.
7.1.22
======
* Released with AVX disabled.
* Added new latency samples for GetValue, GetRange, QueueWait, and VersionWait in storage servers. `(PR #8215) <https://github.com/apple/foundationdb/pull/8215>`_
* Fixed a rare partial data write for TLogs. `(PR #8210) <https://github.com/apple/foundationdb/pull/8210>`_
* Added HTTP proxy support for backup agents. `(PR #8193) <https://github.com/apple/foundationdb/pull/8193>`_
* Fixed a memory bug of secondary queries in index prefetch. `(PR #8195) <https://github.com/apple/foundationdb/pull/8195>`_, `(PR #8190) <https://github.com/apple/foundationdb/pull/8190>`_
* Introduced STORAGE_SERVER_REBOOT_ON_IO_TIMEOUT knob to recreate SS at io_timeout errors. `(PR #8123) <https://github.com/apple/foundationdb/pull/8123>`_
* Fixed two TLog stopped bugs and a CC leader replacement bug. `(PR #8081) <https://github.com/apple/foundationdb/pull/8081>`_
* Added back RecoveryAvailable trace event for status's seconds_since_last_recovered field. `(PR #8068) <https://github.com/apple/foundationdb/pull/8068>`_
7.1.21
======
* Same as 7.1.20 release with AVX enabled.

View File

@ -126,11 +126,11 @@ Default Values
Certificate file default location
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The default behavior when the certificate or key file is not specified is to look for a file named ``fdb.pem`` in the current working directory. If this file is not present, an attempt is made to load a file from a system-dependent location as follows:
The default behavior when the certificate or key file is not specified is to look for files named ``cert.pem`` or ``key.pem`` respectively, in system-dependent locations as follows:
* Linux: ``/etc/foundationdb/fdb.pem``
* macOS: ``/usr/local/etc/foundationdb/fdb.pem``
* Windows: ``C:\ProgramData\foundationdb\fdb.pem``
* Linux: ``/etc/foundationdb/cert.pem`` and ``/etc/foundationdb/key.pem``
* macOS: ``/usr/local/etc/foundationdb/cert.pem`` and ``/usr/local/etc/foundationdb/key.pem``
* Windows: ``C:\ProgramData\foundationdb\cert.pem`` and ``C:\ProgramData\foundationdb\key.pem``
Default Peer Verification
^^^^^^^^^^^^^^^^^^^^^^^^^
@ -214,9 +214,12 @@ Certificate creation
If your organization already makes use of certificates for access control and securing communications, you should ask your security expert for organizational procedure for obtaining and verifying certificates. If the goal of enabling TLS is to make sure that only known machines can join or access the FoundationDB cluster and for securing communications, then creating your own certificates can serve these purposes.
The following set of commands uses the OpenSSL command-line tools to create a self-signed certificate and private key. The certificate is then joined with the private key in the output ``fdb.pem`` file::
The following set of commands uses the OpenSSL command-line tools to create a self-signed certificate and private key::
user@host:> openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout key.pem -out cert.pem
Optionally, the certificate can be joined with the private key as supplied as both certificate and key files::
user@host:> openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout private.key -out cert.crt
user@host:> cat cert.crt private.key > fdb.pem
Peer verification

View File

@ -478,7 +478,7 @@ ACTOR Future<Void> fdbClient() {
state Transaction tx(db);
state std::string keyPrefix = "/tut/";
state Key startKey;
state KeyRef endKey = LiteralStringRef("/tut0");
state KeyRef endKey = "/tut0"_sr;
state int beginIdx = 0;
loop {
try {
@ -494,7 +494,7 @@ ACTOR Future<Void> fdbClient() {
RangeResult range = wait(tx.getRange(KeyRangeRef(startKey, endKey), 100));
for (int i = 0; i < 10; ++i) {
Key k = Key(keyPrefix + std::to_string(beginIdx + deterministicRandom()->randomInt(0, 100)));
tx.set(k, LiteralStringRef("foo"));
tx.set(k, "foo"_sr);
}
wait(tx.commit());
std::cout << "Committed\n";

View File

@ -905,12 +905,12 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
SO_END_OF_OPTIONS
};
const KeyRef exeAgent = LiteralStringRef("backup_agent");
const KeyRef exeBackup = LiteralStringRef("fdbbackup");
const KeyRef exeRestore = LiteralStringRef("fdbrestore");
const KeyRef exeFastRestoreTool = LiteralStringRef("fastrestore_tool"); // must be lower case
const KeyRef exeDatabaseAgent = LiteralStringRef("dr_agent");
const KeyRef exeDatabaseBackup = LiteralStringRef("fdbdr");
const KeyRef exeAgent = "backup_agent"_sr;
const KeyRef exeBackup = "fdbbackup"_sr;
const KeyRef exeRestore = "fdbrestore"_sr;
const KeyRef exeFastRestoreTool = "fastrestore_tool"_sr; // must be lower case
const KeyRef exeDatabaseAgent = "dr_agent"_sr;
const KeyRef exeDatabaseBackup = "fdbdr"_sr;
extern const char* getSourceVersion();
@ -1351,7 +1351,7 @@ ProgramExe getProgramType(std::string programExe) {
}
#endif
// For debugging convenience, remove .debug suffix if present.
if (StringRef(programExe).endsWith(LiteralStringRef(".debug")))
if (StringRef(programExe).endsWith(".debug"_sr))
programExe = programExe.substr(0, programExe.size() - 6);
// Check if backup agent
@ -1856,11 +1856,7 @@ ACTOR Future<Void> submitDBBackup(Database src,
std::string tagName) {
try {
state DatabaseBackupAgent backupAgent(src);
// Backup everything, if no ranges were specified
if (backupRanges.size() == 0) {
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
}
ASSERT(!backupRanges.empty());
wait(backupAgent.submitBackup(
dest, KeyRef(tagName), backupRanges, StopWhenDone::False, StringRef(), StringRef(), LockDB::True));
@ -1906,6 +1902,7 @@ ACTOR Future<Void> submitBackup(Database db,
int initialSnapshotIntervalSeconds,
int snapshotIntervalSeconds,
Standalone<VectorRef<KeyRangeRef>> backupRanges,
bool encryptionEnabled,
std::string tagName,
bool dryRun,
WaitForComplete waitForCompletion,
@ -1914,11 +1911,7 @@ ACTOR Future<Void> submitBackup(Database db,
IncrementalBackupOnly incrementalBackupOnly) {
try {
state FileBackupAgent backupAgent;
// Backup everything, if no ranges were specified
if (backupRanges.size() == 0) {
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
}
ASSERT(!backupRanges.empty());
if (dryRun) {
state KeyBackedTag tag = makeBackupTag(tagName);
@ -1965,6 +1958,7 @@ ACTOR Future<Void> submitBackup(Database db,
snapshotIntervalSeconds,
tagName,
backupRanges,
encryptionEnabled,
stopWhenDone,
usePartitionedLog,
incrementalBackupOnly));
@ -2018,11 +2012,7 @@ ACTOR Future<Void> switchDBBackup(Database src,
ForceAction forceAction) {
try {
state DatabaseBackupAgent backupAgent(src);
// Backup everything, if no ranges were specified
if (backupRanges.size() == 0) {
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
}
ASSERT(!backupRanges.empty());
wait(backupAgent.atomicSwitchover(dest, KeyRef(tagName), backupRanges, StringRef(), StringRef(), forceAction));
printf("The DR on tag `%s' was successfully switched.\n", printable(StringRef(tagName)).c_str());
@ -2289,9 +2279,7 @@ ACTOR Future<Void> runRestore(Database db,
OnlyApplyMutationLogs onlyApplyMutationLogs,
InconsistentSnapshotOnly inconsistentSnapshotOnly,
Optional<std::string> encryptionKeyFile) {
if (ranges.empty()) {
ranges.push_back_deep(ranges.arena(), normalKeys);
}
ASSERT(!ranges.empty());
if (targetVersion != invalidVersion && !targetTimestamp.empty()) {
fprintf(stderr, "Restore target version and target timestamp cannot both be specified\n");
@ -2372,7 +2360,7 @@ ACTOR Future<Void> runRestore(Database db,
fmt::print("Restored to version {}\n", restoredVersion);
}
} else {
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(targetVersion, ranges));
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(targetVersion, db, ranges));
if (!rset.present()) {
fmt::print(stderr,
@ -2449,8 +2437,8 @@ ACTOR Future<Void> runFastRestoreTool(Database db,
dbVersion,
LockDB::True,
randomUID,
LiteralStringRef(""),
LiteralStringRef("")));
""_sr,
""_sr));
// TODO: Support addPrefix and removePrefix
if (waitForDone) {
// Wait for parallel restore to finish and unlock DB after that
@ -2482,7 +2470,7 @@ ACTOR Future<Void> runFastRestoreTool(Database db,
restoreVersion = dbVersion;
}
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(restoreVersion));
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(restoreVersion, db));
if (!rset.present()) {
fmt::print(stderr, "Insufficient data to restore to version {}\n", restoreVersion);
throw restore_invalid_version();
@ -2687,7 +2675,8 @@ ACTOR Future<Void> queryBackup(const char* name,
Version restoreVersion,
std::string originalClusterFile,
std::string restoreTimestamp,
Verbose verbose) {
Verbose verbose,
Optional<Database> cx) {
state UID operationId = deterministicRandom()->randomUniqueID();
state JsonBuilderObject result;
state std::string errorMessage;
@ -2752,7 +2741,7 @@ ACTOR Future<Void> queryBackup(const char* name,
format("the specified restorable version %lld is not valid", restoreVersion));
return Void();
}
Optional<RestorableFileSet> fileSet = wait(bc->getRestoreSet(restoreVersion, keyRangesFilter));
Optional<RestorableFileSet> fileSet = wait(bc->getRestoreSet(restoreVersion, cx, keyRangesFilter));
if (fileSet.present()) {
int64_t totalRangeFilesSize = 0, totalLogFilesSize = 0;
result["restore_version"] = fileSet.get().targetVersion;
@ -3089,7 +3078,7 @@ static void addKeyRange(std::string optionValue, Standalone<VectorRef<KeyRangeRe
Version parseVersion(const char* str) {
StringRef s((const uint8_t*)str, strlen(str));
if (s.endsWith(LiteralStringRef("days")) || s.endsWith(LiteralStringRef("d"))) {
if (s.endsWith("days"_sr) || s.endsWith("d"_sr)) {
float days;
if (sscanf(str, "%f", &days) != 1) {
fprintf(stderr, "Could not parse version: %s\n", str);
@ -3378,6 +3367,8 @@ int main(int argc, char* argv[]) {
bool trace = false;
bool quietDisplay = false;
bool dryRun = false;
// TODO (Nim): Set this value when we add optional encrypt_files CLI argument to backup agent start
bool encryptionEnabled = true;
std::string traceDir = "";
std::string traceFormat = "";
std::string traceLogGroup;
@ -3608,7 +3599,7 @@ int main(int argc, char* argv[]) {
case OPT_DESTCONTAINER:
destinationContainer = args->OptionArg();
// If the url starts with '/' then prepend "file://" for backwards compatibility
if (StringRef(destinationContainer).startsWith(LiteralStringRef("/")))
if (StringRef(destinationContainer).startsWith("/"_sr))
destinationContainer = std::string("file://") + destinationContainer;
modifyOptions.destURL = destinationContainer;
break;
@ -3654,7 +3645,7 @@ int main(int argc, char* argv[]) {
case OPT_RESTORECONTAINER:
restoreContainer = args->OptionArg();
// If the url starts with '/' then prepend "file://" for backwards compatibility
if (StringRef(restoreContainer).startsWith(LiteralStringRef("/")))
if (StringRef(restoreContainer).startsWith("/"_sr))
restoreContainer = std::string("file://") + restoreContainer;
break;
case OPT_DESCRIBE_DEEP:
@ -3945,6 +3936,12 @@ int main(int argc, char* argv[]) {
return result.present();
};
// The fastrestore tool does not yet support multiple ranges and is incompatible with tenants
// or other features that back up data in the system keys
if (backupKeys.empty() && programExe != ProgramExe::FASTRESTORE_TOOL) {
addDefaultBackupRanges(backupKeys);
}
switch (programExe) {
case ProgramExe::AGENT:
if (!initCluster())
@ -3964,6 +3961,7 @@ int main(int argc, char* argv[]) {
initialSnapshotIntervalSeconds,
snapshotIntervalSeconds,
backupKeys,
encryptionEnabled,
tagName,
dryRun,
waitForDone,
@ -4084,7 +4082,8 @@ int main(int argc, char* argv[]) {
restoreVersion,
restoreClusterFileOrig,
restoreTimestamp,
Verbose{ !quietDisplay }));
Verbose{ !quietDisplay },
db));
break;
case BackupType::DUMP:
@ -4323,19 +4322,19 @@ int main(int argc, char* argv[]) {
char* demangled = abi::__cxa_demangle(i->first, NULL, NULL, NULL);
if (demangled) {
s = demangled;
if (StringRef(s).startsWith(LiteralStringRef("(anonymous namespace)::")))
s = s.substr(LiteralStringRef("(anonymous namespace)::").size());
if (StringRef(s).startsWith("(anonymous namespace)::"_sr))
s = s.substr("(anonymous namespace)::"_sr.size());
free(demangled);
} else
s = i->first;
#else
s = i->first;
if (StringRef(s).startsWith(LiteralStringRef("class `anonymous namespace'::")))
s = s.substr(LiteralStringRef("class `anonymous namespace'::").size());
else if (StringRef(s).startsWith(LiteralStringRef("class ")))
s = s.substr(LiteralStringRef("class ").size());
else if (StringRef(s).startsWith(LiteralStringRef("struct ")))
s = s.substr(LiteralStringRef("struct ").size());
if (StringRef(s).startsWith("class `anonymous namespace'::"_sr))
s = s.substr("class `anonymous namespace'::"_sr.size());
else if (StringRef(s).startsWith("class "_sr))
s = s.substr("class "_sr.size());
else if (StringRef(s).startsWith("struct "_sr))
s = s.substr("struct "_sr.size());
#endif
typeNames.emplace_back(s, i->first);

View File

@ -31,7 +31,7 @@
namespace fdb_cli {
const KeyRef advanceVersionSpecialKey = LiteralStringRef("\xff\xff/management/min_required_commit_version");
const KeyRef advanceVersionSpecialKey = "\xff\xff/management/min_required_commit_version"_sr;
ACTOR Future<bool> advanceVersionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 2) {

View File

@ -0,0 +1,188 @@
/*
* BlobKeyCommand.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/IClientApi.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "fdbclient/NativeAPI.actor.h"
#include "flow/Arena.h"
#include "flow/FastRef.h"
#include "flow/ThreadHelper.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace {
ACTOR Future<bool> printBlobHistory(Database db, Key key, Optional<Version> version) {
fmt::print("Printing blob history for {0}", key.printable());
if (version.present()) {
fmt::print(" @ {0}", version.get());
}
fmt::print("\n");
state Transaction tr(db);
state KeyRange activeGranule;
state KeyRange queryRange(KeyRangeRef(key, keyAfter(key)));
loop {
try {
Standalone<VectorRef<KeyRangeRef>> granules = wait(tr.getBlobGranuleRanges(queryRange, 2));
if (granules.empty()) {
fmt::print("No active granule for {0}\n", key.printable());
return false;
}
ASSERT(granules.size() == 1);
activeGranule = granules[0];
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
fmt::print("Active granule: [{0} - {1})\n", activeGranule.begin.printable(), activeGranule.end.printable());
// get latest history entry for range
state GranuleHistory history;
loop {
try {
RangeResult result =
wait(tr.getRange(blobGranuleHistoryKeyRangeFor(activeGranule), 1, Snapshot::False, Reverse::True));
ASSERT(result.size() <= 1);
if (result.empty()) {
fmt::print("No history entry found\n");
return true;
}
std::pair<KeyRange, Version> decodedKey = decodeBlobGranuleHistoryKey(result[0].key);
ASSERT(activeGranule == decodedKey.first);
history = GranuleHistory(activeGranule, decodedKey.second, decodeBlobGranuleHistoryValue(result[0].value));
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
fmt::print("History:\n\n");
loop {
// print history
std::string boundaryChangeAction;
if (history.value.parentVersions.empty()) {
boundaryChangeAction = "root";
} else if (history.value.parentVersions.size() == 1) {
boundaryChangeAction = "split";
} else {
boundaryChangeAction = "merge";
}
fmt::print("{0}) {1}\n\t{2}\n\t{3}\n({4})\n\n",
history.version,
history.value.granuleID.toString(),
history.range.begin.printable(),
history.range.end.printable(),
boundaryChangeAction);
// traverse back
if (history.value.parentVersions.empty() || (version.present() && history.version <= version.get())) {
break;
}
int i;
for (i = 0; i < history.value.parentBoundaries.size(); i++) {
if (history.value.parentBoundaries[i] <= key) {
break;
}
}
// key should fall between boundaries
ASSERT(i < history.value.parentBoundaries.size());
KeyRangeRef parentRange(history.value.parentBoundaries[i], history.value.parentBoundaries[i + 1]);
Version parentVersion = history.value.parentVersions[i];
state Key parentHistoryKey = blobGranuleHistoryKeyFor(parentRange, parentVersion);
state bool foundParent;
loop {
try {
Optional<Value> parentHistoryValue = wait(tr.get(parentHistoryKey));
foundParent = parentHistoryValue.present();
if (foundParent) {
std::pair<KeyRange, Version> decodedKey = decodeBlobGranuleHistoryKey(parentHistoryKey);
history = GranuleHistory(
decodedKey.first, decodedKey.second, decodeBlobGranuleHistoryValue(parentHistoryValue.get()));
}
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
if (!foundParent) {
break;
}
}
fmt::print("Done\n");
return true;
}
} // namespace
namespace fdb_cli {
ACTOR Future<bool> blobKeyCommandActor(Database localDb,
Optional<TenantMapEntry> tenantEntry,
std::vector<StringRef> tokens) {
// enables blob writing for the given range
if (tokens.size() != 3 && tokens.size() != 4) {
printUsage(tokens[0]);
return false;
}
ASSERT(tokens[1] == "history"_sr);
Key key;
Optional<Version> version;
if (tenantEntry.present()) {
key = tokens[2].withPrefix(tenantEntry.get().prefix);
} else {
key = tokens[2];
}
if (tokens.size() > 3) {
Version v;
int n = 0;
if (sscanf(tokens[3].toString().c_str(), "%" PRId64 "%n", &v, &n) != 1 || n != tokens[3].size()) {
printUsage(tokens[0]);
return false;
}
version = v;
}
if (key >= LiteralStringRef("\xff")) {
fmt::print("No blob history for system keyspace\n", key.printable());
return false;
} else {
bool result = wait(printBlobHistory(localDb, key, version));
return result;
}
}
// can extend to other blobkey commands later
CommandFactory blobKeyFactory("blobkey", CommandHelp("blobkey history <key> [version]", "", ""));
} // namespace fdb_cli

View File

@ -112,7 +112,7 @@ ACTOR Future<bool> blobRangeCommandActor(Database localDb,
end = tokens[3];
}
if (end > LiteralStringRef("\xff")) {
if (end > "\xff"_sr) {
// TODO is this something we want?
fmt::print("Cannot blobbify system keyspace! Problematic End Key: {0}\n", tokens[3].printable());
return false;

View File

@ -1,3 +1,4 @@
include(AddFdbTest)
fdb_find_sources(FDBCLI_SRCS)
add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS})
@ -23,3 +24,38 @@ if(NOT OPEN_FOR_IDE)
fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/packages/bin/fdbcli DESTINATION bin COMPONENT clients)
endif()
endif()
if (NOT WIN32 AND NOT OPEN_FOR_IDE)
add_dependencies(fdbcli external_client)
add_fdbclient_test(
NAME single_process_fdbcli_tests
COMMAND ${CMAKE_SOURCE_DIR}/fdbcli/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
)
add_fdbclient_test(
NAME multi_process_fdbcli_tests
PROCESS_NUMBER 5
COMMAND ${CMAKE_SOURCE_DIR}/fdbcli/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
5
)
add_fdbclient_test(
NAME single_process_external_client_fdbcli_tests
COMMAND ${CMAKE_SOURCE_DIR}/fdbcli/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c_external.so
)
add_fdbclient_test(
NAME multi_process_external_client_fdbcli_tests
PROCESS_NUMBER 5
COMMAND ${CMAKE_SOURCE_DIR}/fdbcli/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
5
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c_external.so
)
endif()

View File

@ -44,20 +44,20 @@ ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
if (tokens.size() < 2)
result = ConfigurationResult::NO_OPTIONS_PROVIDED;
else {
if (tokens[startToken] == LiteralStringRef("FORCE")) {
if (tokens[startToken] == "FORCE"_sr) {
force = true;
startToken = 2;
}
state Optional<ConfigureAutoResult> conf;
if (tokens[startToken] == LiteralStringRef("auto")) {
if (tokens[startToken] == "auto"_sr) {
// get cluster status
state Reference<ITransaction> tr = db->createTransaction();
if (!tr->isValid()) {
StatusObject _s = wait(StatusClient::statusFetcher(localDb));
s = _s;
} else {
state ThreadFuture<Optional<Value>> statusValueF = tr->get(LiteralStringRef("\xff\xff/status/json"));
state ThreadFuture<Optional<Value>> statusValueF = tr->get("\xff\xff/status/json"_sr);
Optional<Value> statusValue = wait(safeThreadFutureToFuture(statusValueF));
if (!statusValue.present()) {
fprintf(stderr, "ERROR: Failed to get status json from the cluster\n");
@ -166,7 +166,7 @@ ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
case ConfigurationResult::CONFLICTING_OPTIONS:
case ConfigurationResult::UNKNOWN_OPTION:
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
printUsage(LiteralStringRef("configure"));
printUsage("configure"_sr);
ret = false;
break;
case ConfigurationResult::INVALID_CONFIGURATION:

View File

@ -30,7 +30,7 @@
namespace fdb_cli {
const KeyRef consistencyCheckSpecialKey = LiteralStringRef("\xff\xff/management/consistency_check_suspended");
const KeyRef consistencyCheckSpecialKey = "\xff\xff/management/consistency_check_suspended"_sr;
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
std::vector<StringRef> tokens,

View File

@ -65,8 +65,8 @@ ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<Strin
state StringRef new_cluster_description;
state std::string auto_coordinators_str;
state bool disableConfigDB = false;
StringRef nameTokenBegin = LiteralStringRef("description=");
StringRef noConfigDB = LiteralStringRef("--no-config-db");
StringRef nameTokenBegin = "description="_sr;
StringRef noConfigDB = "--no-config-db"_sr;
for (auto tok = tokens.begin() + 1; tok != tokens.end(); ++tok) {
if (tok->startsWith(nameTokenBegin) && new_cluster_description.empty()) {
new_cluster_description = tok->substr(nameTokenBegin.size());
@ -83,7 +83,7 @@ ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<Strin
}
}
state bool automatic = tokens.size() == 2 && tokens[1] == LiteralStringRef("auto");
state bool automatic = tokens.size() == 2 && tokens[1] == "auto"_sr;
state Reference<ITransaction> tr = db->createTransaction();
loop {
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
@ -186,10 +186,10 @@ ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<Strin
namespace fdb_cli {
const KeyRef clusterDescriptionSpecialKey = LiteralStringRef("\xff\xff/configuration/coordinators/cluster_description");
const KeyRef configDBSpecialKey = LiteralStringRef("\xff\xff/configuration/coordinators/config_db");
const KeyRef coordinatorsAutoSpecialKey = LiteralStringRef("\xff\xff/management/auto_coordinators");
const KeyRef coordinatorsProcessSpecialKey = LiteralStringRef("\xff\xff/configuration/coordinators/processes");
const KeyRef clusterDescriptionSpecialKey = "\xff\xff/configuration/coordinators/cluster_description"_sr;
const KeyRef configDBSpecialKey = "\xff\xff/configuration/coordinators/config_db"_sr;
const KeyRef coordinatorsAutoSpecialKey = "\xff\xff/management/auto_coordinators"_sr;
const KeyRef coordinatorsProcessSpecialKey = "\xff\xff/configuration/coordinators/processes"_sr;
ACTOR Future<bool> coordinatorsCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 2) {

View File

@ -108,8 +108,8 @@ Future<Void> setDDIgnoreRebalanceOff(Reference<IDatabase> db, uint8_t DDIgnoreOp
namespace fdb_cli {
const KeyRef ddModeSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/mode");
const KeyRef ddIgnoreRebalanceSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/rebalance_ignored");
const KeyRef ddModeSpecialKey = "\xff\xff/management/data_distribution/mode"_sr;
const KeyRef ddIgnoreRebalanceSpecialKey = "\xff\xff/management/data_distribution/rebalance_ignored"_sr;
constexpr auto usage =
"Usage: datadistribution <on|off|disable <ssfailure|rebalance|rebalance_disk|rebalance_read>|enable "
"<ssfailure|rebalance|rebalance_disk|rebalance_read>>\n";
@ -127,7 +127,7 @@ ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::ve
printf("Data distribution is turned off.\n");
} else if (tokencmp(tokens[1], "disable")) {
if (tokencmp(tokens[2], "ssfailure")) {
wait(success((setHealthyZone(db, LiteralStringRef("IgnoreSSFailures"), 0))));
wait(success((setHealthyZone(db, "IgnoreSSFailures"_sr, 0))));
printf("Data distribution is disabled for storage server failures.\n");
} else if (tokencmp(tokens[2], "rebalance")) {
wait(setDDIgnoreRebalanceOn(db, DDIgnore::REBALANCE_DISK | DDIgnore::REBALANCE_READ));

View File

@ -227,22 +227,19 @@ ACTOR Future<Void> checkForCoordinators(Reference<IDatabase> db, std::vector<Add
namespace fdb_cli {
const KeyRangeRef excludedServersSpecialKeyRange(LiteralStringRef("\xff\xff/management/excluded/"),
LiteralStringRef("\xff\xff/management/excluded0"));
const KeyRangeRef failedServersSpecialKeyRange(LiteralStringRef("\xff\xff/management/failed/"),
LiteralStringRef("\xff\xff/management/failed0"));
const KeyRangeRef excludedLocalitySpecialKeyRange(LiteralStringRef("\xff\xff/management/excluded_locality/"),
LiteralStringRef("\xff\xff/management/excluded_locality0"));
const KeyRangeRef failedLocalitySpecialKeyRange(LiteralStringRef("\xff\xff/management/failed_locality/"),
LiteralStringRef("\xff\xff/management/failed_locality0"));
const KeyRef excludedForceOptionSpecialKey = LiteralStringRef("\xff\xff/management/options/excluded/force");
const KeyRef failedForceOptionSpecialKey = LiteralStringRef("\xff\xff/management/options/failed/force");
const KeyRef excludedLocalityForceOptionSpecialKey =
LiteralStringRef("\xff\xff/management/options/excluded_locality/force");
const KeyRef failedLocalityForceOptionSpecialKey =
LiteralStringRef("\xff\xff/management/options/failed_locality/force");
const KeyRangeRef exclusionInProgressSpecialKeyRange(LiteralStringRef("\xff\xff/management/in_progress_exclusion/"),
LiteralStringRef("\xff\xff/management/in_progress_exclusion0"));
const KeyRangeRef excludedServersSpecialKeyRange("\xff\xff/management/excluded/"_sr,
"\xff\xff/management/excluded0"_sr);
const KeyRangeRef failedServersSpecialKeyRange("\xff\xff/management/failed/"_sr, "\xff\xff/management/failed0"_sr);
const KeyRangeRef excludedLocalitySpecialKeyRange("\xff\xff/management/excluded_locality/"_sr,
"\xff\xff/management/excluded_locality0"_sr);
const KeyRangeRef failedLocalitySpecialKeyRange("\xff\xff/management/failed_locality/"_sr,
"\xff\xff/management/failed_locality0"_sr);
const KeyRef excludedForceOptionSpecialKey = "\xff\xff/management/options/excluded/force"_sr;
const KeyRef failedForceOptionSpecialKey = "\xff\xff/management/options/failed/force"_sr;
const KeyRef excludedLocalityForceOptionSpecialKey = "\xff\xff/management/options/excluded_locality/force"_sr;
const KeyRef failedLocalityForceOptionSpecialKey = "\xff\xff/management/options/failed_locality/force"_sr;
const KeyRangeRef exclusionInProgressSpecialKeyRange("\xff\xff/management/in_progress_exclusion/"_sr,
"\xff\xff/management/in_progress_exclusion0"_sr);
ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens, Future<Void> warn) {
if (tokens.size() <= 1) {
@ -281,11 +278,11 @@ ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<Stri
if (!result)
return false;
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
if (*t == LiteralStringRef("FORCE")) {
if (*t == "FORCE"_sr) {
force = true;
} else if (*t == LiteralStringRef("no_wait")) {
} else if (*t == "no_wait"_sr) {
waitForAllExcluded = false;
} else if (*t == LiteralStringRef("failed")) {
} else if (*t == "failed"_sr) {
markFailed = true;
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) &&
t->toString().find(':') != std::string::npos) {

View File

@ -78,7 +78,7 @@ ACTOR Future<bool> fileConfigureCommandActor(Reference<IDatabase> db,
name + "=" +
json_spirit::write_string(json_spirit::mValue(value.get_array()), json_spirit::Output_options::none);
} else {
printUsage(LiteralStringRef("fileconfigure"));
printUsage("fileconfigure"_sr);
return false;
}
}

View File

@ -92,8 +92,7 @@ ACTOR Future<Void> includeServers(Reference<IDatabase> db, std::vector<AddressEx
// This is why we now make two clears: first only of the ip
// address, the second will delete all ports.
if (s.isWholeMachine())
tr->clear(KeyRangeRef(addr.withSuffix(LiteralStringRef(":")),
addr.withSuffix(LiteralStringRef(";"))));
tr->clear(KeyRangeRef(addr.withSuffix(":"_sr), addr.withSuffix(";"_sr)));
}
}
wait(safeThreadFutureToFuture(tr->commit()));
@ -112,9 +111,9 @@ ACTOR Future<bool> include(Reference<IDatabase> db, std::vector<StringRef> token
state bool failed = false;
state bool all = false;
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
if (*t == LiteralStringRef("all")) {
if (*t == "all"_sr) {
all = true;
} else if (*t == LiteralStringRef("failed")) {
} else if (*t == "failed"_sr) {
failed = true;
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) && t->toString().find(':') != std::string::npos) {
// if the token starts with 'locality_' prefix.

View File

@ -59,7 +59,7 @@ ACTOR Future<bool> lockDatabase(Reference<IDatabase> db, UID id) {
namespace fdb_cli {
const KeyRef lockSpecialKey = LiteralStringRef("\xff\xff/management/db_locked");
const KeyRef lockSpecialKey = "\xff\xff/management/db_locked"_sr;
ACTOR Future<bool> lockCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 1) {

View File

@ -69,10 +69,10 @@ ACTOR Future<Void> printHealthyZone(Reference<IDatabase> db) {
namespace fdb_cli {
const KeyRangeRef maintenanceSpecialKeyRange = KeyRangeRef(LiteralStringRef("\xff\xff/management/maintenance/"),
LiteralStringRef("\xff\xff/management/maintenance0"));
const KeyRangeRef maintenanceSpecialKeyRange =
KeyRangeRef("\xff\xff/management/maintenance/"_sr, "\xff\xff/management/maintenance0"_sr);
// The special key, if present, means data distribution is disabled for storage failures;
const KeyRef ignoreSSFailureSpecialKey = LiteralStringRef("\xff\xff/management/maintenance/IgnoreSSFailures");
const KeyRef ignoreSSFailureSpecialKey = "\xff\xff/management/maintenance/IgnoreSSFailures"_sr;
// add a zone to maintenance and specify the maintenance duration
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning) {

View File

@ -115,17 +115,13 @@ ACTOR Future<bool> profileCommandActor(Database db,
return false;
}
// Hold the reference to the standalone's memory
state ThreadFuture<RangeResult> kvsFuture =
tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY);
state ThreadFuture<RangeResult> kvsFuture = tr->getRange(
KeyRangeRef("\xff\xff/worker_interfaces/"_sr, "\xff\xff/worker_interfaces0"_sr), CLIENT_KNOBS->TOO_MANY);
RangeResult kvs = wait(safeThreadFutureToFuture(kvsFuture));
ASSERT(!kvs.more);
for (const auto& pair : kvs) {
auto ip_port =
(pair.key.endsWith(LiteralStringRef(":tls")) ? pair.key.removeSuffix(LiteralStringRef(":tls"))
: pair.key)
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
auto ip_port = (pair.key.endsWith(":tls"_sr) ? pair.key.removeSuffix(":tls"_sr) : pair.key)
.removePrefix("\xff\xff/worker_interfaces/"_sr);
printf("%s\n", printable(ip_port).c_str());
}
} else {

View File

@ -25,8 +25,6 @@ namespace {
enum class LimitType { RESERVED, TOTAL };
enum class OpType { READ, WRITE };
Optional<TransactionTag> parseTag(StringRef token) {
if (token.size() > CLIENT_KNOBS->MAX_TRANSACTION_TAG_LENGTH) {
return {};
@ -36,25 +34,15 @@ Optional<TransactionTag> parseTag(StringRef token) {
}
Optional<LimitType> parseLimitType(StringRef token) {
if (token == "reserved"_sr) {
if (token == "reserved_throughput"_sr) {
return LimitType::RESERVED;
} else if (token == "total"_sr) {
} else if (token == "total_throughput"_sr) {
return LimitType::TOTAL;
} else {
return {};
}
}
Optional<OpType> parseOpType(StringRef token) {
if (token == "read"_sr) {
return OpType::READ;
} else if (token == "write"_sr) {
return OpType::WRITE;
} else {
return {};
}
}
Optional<double> parseLimitValue(StringRef token) {
try {
return std::stod(token.toString());
@ -63,7 +51,7 @@ Optional<double> parseLimitValue(StringRef token) {
}
}
ACTOR Future<Void> getQuota(Reference<IDatabase> db, TransactionTag tag, LimitType limitType, OpType opType) {
ACTOR Future<Void> getQuota(Reference<IDatabase> db, TransactionTag tag, LimitType limitType) {
state Reference<ITransaction> tr = db->createTransaction();
loop {
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
@ -74,14 +62,10 @@ ACTOR Future<Void> getQuota(Reference<IDatabase> db, TransactionTag tag, LimitTy
fmt::print("<empty>\n");
} else {
auto const quota = ThrottleApi::TagQuotaValue::fromValue(v.get());
if (limitType == LimitType::TOTAL && opType == OpType::READ) {
fmt::print("{}\n", quota.totalReadQuota);
} else if (limitType == LimitType::TOTAL && opType == OpType::WRITE) {
fmt::print("{}\n", quota.totalWriteQuota);
} else if (limitType == LimitType::RESERVED && opType == OpType::READ) {
fmt::print("{}\n", quota.reservedReadQuota);
} else if (limitType == LimitType::RESERVED && opType == OpType::WRITE) {
fmt::print("{}\n", quota.reservedWriteQuota);
if (limitType == LimitType::TOTAL) {
fmt::print("{}\n", quota.totalQuota * CLIENT_KNOBS->READ_COST_BYTE_FACTOR);
} else if (limitType == LimitType::RESERVED) {
fmt::print("{}\n", quota.reservedQuota * CLIENT_KNOBS->READ_COST_BYTE_FACTOR);
}
}
return Void();
@ -91,11 +75,7 @@ ACTOR Future<Void> getQuota(Reference<IDatabase> db, TransactionTag tag, LimitTy
}
}
ACTOR Future<Void> setQuota(Reference<IDatabase> db,
TransactionTag tag,
LimitType limitType,
OpType opType,
double value) {
ACTOR Future<Void> setQuota(Reference<IDatabase> db, TransactionTag tag, LimitType limitType, double value) {
state Reference<ITransaction> tr = db->createTransaction();
state Key key = tag.withPrefix(tagQuotaPrefix);
loop {
@ -107,21 +87,14 @@ ACTOR Future<Void> setQuota(Reference<IDatabase> db,
if (v.present()) {
quota = ThrottleApi::TagQuotaValue::fromValue(v.get());
}
if (limitType == LimitType::TOTAL && opType == OpType::READ) {
quota.totalReadQuota = value;
} else if (limitType == LimitType::TOTAL && opType == OpType::WRITE) {
quota.totalWriteQuota = value;
} else if (limitType == LimitType::RESERVED && opType == OpType::READ) {
quota.reservedReadQuota = value;
} else if (limitType == LimitType::RESERVED && opType == OpType::WRITE) {
quota.reservedWriteQuota = value;
// Internally, costs are stored in terms of pages, but in the API,
// costs are specified in terms of bytes
if (limitType == LimitType::TOTAL) {
quota.totalQuota = (value - 1) / CLIENT_KNOBS->READ_COST_BYTE_FACTOR + 1;
} else if (limitType == LimitType::RESERVED) {
quota.reservedQuota = (value - 1) / CLIENT_KNOBS->READ_COST_BYTE_FACTOR + 1;
}
ThrottleApi::setTagQuota(tr,
tag,
quota.reservedReadQuota,
quota.totalReadQuota,
quota.reservedWriteQuota,
quota.totalWriteQuota);
ThrottleApi::setTagQuota(tr, tag, quota.reservedQuota, quota.totalQuota);
wait(safeThreadFutureToFuture(tr->commit()));
return Void();
} catch (Error& e) {
@ -130,8 +103,8 @@ ACTOR Future<Void> setQuota(Reference<IDatabase> db,
}
}
constexpr auto usage =
"quota [get <tag> [reserved|total] [read|write]|set <tag> [reserved|total] [read|write] <value>]";
constexpr auto usage = "quota [get <tag> [reserved_throughput|total_throughput] | set <tag> "
"[reserved_throughput|total_throughput] <value>]";
bool exitFailure() {
fmt::print(usage);
@ -149,25 +122,24 @@ ACTOR Future<bool> quotaCommandActor(Reference<IDatabase> db, std::vector<String
} else {
auto tag = parseTag(tokens[2]);
auto limitType = parseLimitType(tokens[3]);
auto opType = parseOpType(tokens[4]);
if (!tag.present() || !limitType.present() || !opType.present()) {
if (!tag.present() || !limitType.present()) {
return exitFailure();
}
if (tokens[1] == "get"_sr) {
if (tokens.size() != 4) {
return exitFailure();
}
wait(getQuota(db, tag.get(), limitType.get()));
return true;
} else if (tokens[1] == "set"_sr) {
if (tokens.size() != 5) {
return exitFailure();
}
wait(getQuota(db, tag.get(), limitType.get(), opType.get()));
return true;
} else if (tokens[1] == "set"_sr) {
if (tokens.size() != 6) {
return exitFailure();
}
auto const limitValue = parseLimitValue(tokens[5]);
auto const limitValue = parseLimitValue(tokens[4]);
if (!limitValue.present()) {
return exitFailure();
}
wait(setQuota(db, tag.get(), limitType.get(), opType.get(), limitValue.get()));
wait(setQuota(db, tag.get(), limitType.get(), limitValue.get()));
return true;
} else {
return exitFailure();

View File

@ -105,12 +105,10 @@ ACTOR Future<bool> setProcessClass(Reference<IDatabase> db, KeyRef network_addre
namespace fdb_cli {
const KeyRangeRef processClassSourceSpecialKeyRange =
KeyRangeRef(LiteralStringRef("\xff\xff/configuration/process/class_source/"),
LiteralStringRef("\xff\xff/configuration/process/class_source0"));
KeyRangeRef("\xff\xff/configuration/process/class_source/"_sr, "\xff\xff/configuration/process/class_source0"_sr);
const KeyRangeRef processClassTypeSpecialKeyRange =
KeyRangeRef(LiteralStringRef("\xff\xff/configuration/process/class_type/"),
LiteralStringRef("\xff\xff/configuration/process/class_type0"));
KeyRangeRef("\xff\xff/configuration/process/class_type/"_sr, "\xff\xff/configuration/process/class_type0"_sr);
ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 3 && tokens.size() != 1) {

View File

@ -40,7 +40,7 @@ ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<Str
for (int i = 1; i < tokens.size(); i++) {
snap_cmd = snap_cmd.withSuffix(tokens[i]);
if (i != tokens.size() - 1) {
snap_cmd = snap_cmd.withSuffix(LiteralStringRef(" "));
snap_cmd = snap_cmd.withSuffix(" "_sr);
}
}
try {

View File

@ -1256,7 +1256,7 @@ ACTOR Future<bool> statusCommandActor(Reference<IDatabase> db,
StatusObject _s = wait(StatusClient::statusFetcher(localDb));
s = _s;
} else {
state ThreadFuture<Optional<Value>> statusValueF = tr->get(LiteralStringRef("\xff\xff/status/json"));
state ThreadFuture<Optional<Value>> statusValueF = tr->get("\xff\xff/status/json"_sr);
Optional<Value> statusValue = wait(safeThreadFutureToFuture(statusValueF));
if (!statusValue.present()) {
fprintf(stderr, "ERROR: Failed to get status json from the cluster\n");

View File

@ -76,6 +76,8 @@ parseTenantConfiguration(std::vector<StringRef> const& tokens, int startIndex, b
if (tokencmp(param, "tenant_group")) {
configParams[param] = value;
} else if (tokencmp(param, "assigned_cluster")) {
configParams[param] = value;
} else {
fmt::print(stderr, "ERROR: unrecognized configuration parameter `{}'.\n", param.toString().c_str());
return {};
@ -93,6 +95,10 @@ void applyConfigurationToSpecialKeys(Reference<ITransaction> tr,
TenantNameRef tenantName,
std::map<Standalone<StringRef>, Optional<Value>> configuration) {
for (auto [configName, value] : configuration) {
if (configName == "assigned_cluster"_sr) {
fmt::print(stderr, "ERROR: assigned_cluster is only valid in metacluster configuration.\n");
throw invalid_tenant_configuration();
}
if (value.present()) {
tr->set(makeConfigKey(tenantName, configName), value.get());
} else {
@ -101,19 +107,23 @@ void applyConfigurationToSpecialKeys(Reference<ITransaction> tr,
}
}
// createtenant command
ACTOR Future<bool> createTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 2 || tokens.size() > 3) {
printUsage(tokens[0]);
// tenant create command
ACTOR Future<bool> tenantCreateCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 3 || tokens.size() > 5) {
fmt::print("Usage: tenant create <NAME> [tenant_group=<TENANT_GROUP>] [assigned_cluster=<CLUSTER_NAME>]\n\n");
fmt::print("Creates a new tenant in the cluster with the specified name.\n");
fmt::print("An optional group can be specified that will require this tenant\n");
fmt::print("to be placed on the same cluster as other tenants in the same group.\n");
fmt::print("An optional cluster name can be specified that this tenant will be placed in.\n");
return false;
}
state Key tenantNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[1]);
state Key tenantNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[2]);
state Reference<ITransaction> tr = db->createTransaction();
state bool doneExistenceCheck = false;
state Optional<std::map<Standalone<StringRef>, Optional<Value>>> configuration =
parseTenantConfiguration(tokens, 2, false);
parseTenantConfiguration(tokens, 3, false);
if (!configuration.present()) {
return false;
@ -129,7 +139,7 @@ ACTOR Future<bool> createTenantCommandActor(Reference<IDatabase> db, std::vector
for (auto const& [name, value] : configuration.get()) {
tenantEntry.configure(name, value);
}
wait(MetaclusterAPI::createTenant(db, tokens[1], tenantEntry));
wait(MetaclusterAPI::createTenant(db, tokens[2], tenantEntry));
} else {
if (!doneExistenceCheck) {
// Hold the reference to the standalone's memory
@ -142,7 +152,7 @@ ACTOR Future<bool> createTenantCommandActor(Reference<IDatabase> db, std::vector
}
tr->set(tenantNameKey, ValueRef());
applyConfigurationToSpecialKeys(tr, tokens[1], configuration.get());
applyConfigurationToSpecialKeys(tr, tokens[2], configuration.get());
wait(safeThreadFutureToFuture(tr->commit()));
}
@ -158,25 +168,20 @@ ACTOR Future<bool> createTenantCommandActor(Reference<IDatabase> db, std::vector
}
}
fmt::print("The tenant `{}' has been created\n", printable(tokens[1]).c_str());
fmt::print("The tenant `{}' has been created\n", printable(tokens[2]).c_str());
return true;
}
CommandFactory createTenantFactory(
"createtenant",
CommandHelp("createtenant <TENANT_NAME> [tenant_group=<TENANT_GROUP>]",
"creates a new tenant in the cluster",
"Creates a new tenant in the cluster with the specified name. An optional group can be specified"
"that will require this tenant to be placed on the same cluster as other tenants in the same group."));
// deletetenant command
ACTOR Future<bool> deleteTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 2) {
printUsage(tokens[0]);
// tenant delete command
ACTOR Future<bool> tenantDeleteCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 3) {
fmt::print("Usage: tenant delete <NAME>\n\n");
fmt::print("Deletes a tenant from the cluster.\n");
fmt::print("Deletion will be allowed only if the specified tenant contains no data.\n");
return false;
}
state Key tenantNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[1]);
state Key tenantNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[2]);
state Reference<ITransaction> tr = db->createTransaction();
state bool doneExistenceCheck = false;
@ -186,7 +191,7 @@ ACTOR Future<bool> deleteTenantCommandActor(Reference<IDatabase> db, std::vector
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
wait(MetaclusterAPI::deleteTenant(db, tokens[1]));
wait(MetaclusterAPI::deleteTenant(db, tokens[2]));
} else {
if (!doneExistenceCheck) {
// Hold the reference to the standalone's memory
@ -214,21 +219,17 @@ ACTOR Future<bool> deleteTenantCommandActor(Reference<IDatabase> db, std::vector
}
}
fmt::print("The tenant `{}' has been deleted\n", printable(tokens[1]).c_str());
fmt::print("The tenant `{}' has been deleted\n", printable(tokens[2]).c_str());
return true;
}
CommandFactory deleteTenantFactory(
"deletetenant",
CommandHelp(
"deletetenant <TENANT_NAME>",
"deletes a tenant from the cluster",
"Deletes a tenant from the cluster. Deletion will be allowed only if the specified tenant contains no data."));
// listtenants command
ACTOR Future<bool> listTenantsCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() > 4) {
printUsage(tokens[0]);
// tenant list command
ACTOR Future<bool> tenantListCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() > 5) {
fmt::print("Usage: tenant list [BEGIN] [END] [LIMIT]\n\n");
fmt::print("Lists the tenants in a cluster.\n");
fmt::print("Only tenants in the range BEGIN - END will be printed.\n");
fmt::print("An optional LIMIT can be specified to limit the number of results (default 100).\n");
return false;
}
@ -236,20 +237,20 @@ ACTOR Future<bool> listTenantsCommandActor(Reference<IDatabase> db, std::vector<
state StringRef endTenant = "\xff\xff"_sr;
state int limit = 100;
if (tokens.size() >= 2) {
beginTenant = tokens[1];
}
if (tokens.size() >= 3) {
endTenant = tokens[2];
beginTenant = tokens[2];
}
if (tokens.size() >= 4) {
endTenant = tokens[3];
if (endTenant <= beginTenant) {
fmt::print(stderr, "ERROR: end must be larger than begin");
return false;
}
}
if (tokens.size() == 4) {
if (tokens.size() == 5) {
int n = 0;
if (sscanf(tokens[3].toString().c_str(), "%d%n", &limit, &n) != 1 || n != tokens[3].size() || limit <= 0) {
fmt::print(stderr, "ERROR: invalid limit `{}'\n", tokens[3].toString().c_str());
if (sscanf(tokens[4].toString().c_str(), "%d%n", &limit, &n) != 1 || n != tokens[4].size() || limit <= 0) {
fmt::print(stderr, "ERROR: invalid limit `{}'\n", tokens[4].toString().c_str());
return false;
}
}
@ -262,7 +263,7 @@ ACTOR Future<bool> listTenantsCommandActor(Reference<IDatabase> db, std::vector<
try {
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
state std::vector<TenantNameRef> tenantNames;
state std::vector<TenantName> tenantNames;
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
std::vector<std::pair<TenantName, TenantMapEntry>> tenants =
wait(MetaclusterAPI::listTenantsTransaction(tr, beginTenant, endTenant, limit));
@ -280,7 +281,7 @@ ACTOR Future<bool> listTenantsCommandActor(Reference<IDatabase> db, std::vector<
}
if (tenantNames.empty()) {
if (tokens.size() == 1) {
if (tokens.size() == 2) {
fmt::print("The cluster has no tenants\n");
} else {
fmt::print("The cluster has no tenants in the specified range\n");
@ -305,22 +306,17 @@ ACTOR Future<bool> listTenantsCommandActor(Reference<IDatabase> db, std::vector<
}
}
CommandFactory listTenantsFactory(
"listtenants",
CommandHelp("listtenants [BEGIN] [END] [LIMIT]",
"print a list of tenants in the cluster",
"Print a list of tenants in the cluster. Only tenants in the range [BEGIN] - [END] will be printed. "
"The number of tenants to print can be specified using the [LIMIT] parameter, which defaults to 100."));
// gettenant command
ACTOR Future<bool> getTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 2 || tokens.size() > 3 || (tokens.size() == 3 && tokens[2] != "JSON"_sr)) {
printUsage(tokens[0]);
// tenant get command
ACTOR Future<bool> tenantGetCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 3 || tokens.size() > 4 || (tokens.size() == 4 && tokens[3] != "JSON"_sr)) {
fmt::print("Usage: tenant get <NAME> [JSON]\n\n");
fmt::print("Prints metadata associated with the given tenant.\n");
fmt::print("If JSON is specified, then the output will be in JSON format.\n");
return false;
}
state bool useJson = tokens.size() == 3;
state Key tenantNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[1]);
state bool useJson = tokens.size() == 4;
state Key tenantNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[2]);
state Reference<ITransaction> tr = db->createTransaction();
loop {
@ -329,7 +325,7 @@ ACTOR Future<bool> getTenantCommandActor(Reference<IDatabase> db, std::vector<St
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
state std::string tenantJson;
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
TenantMapEntry entry = wait(MetaclusterAPI::getTenantTransaction(tr, tokens[1]));
TenantMapEntry entry = wait(MetaclusterAPI::getTenantTransaction(tr, tokens[2]));
tenantJson = entry.toJson();
} else {
// Hold the reference to the standalone's memory
@ -409,21 +405,19 @@ ACTOR Future<bool> getTenantCommandActor(Reference<IDatabase> db, std::vector<St
}
}
CommandFactory getTenantFactory(
"gettenant",
CommandHelp("gettenant <TENANT_NAME> [JSON]",
"prints the metadata for a tenant",
"Prints the metadata for a tenant. If JSON is specified, then the output will be in JSON format."));
// configuretenant command
ACTOR Future<bool> configureTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 3) {
printUsage(tokens[0]);
// tenant configure command
ACTOR Future<bool> tenantConfigureCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() < 4) {
fmt::print("Usage: tenant configure <TENANT_NAME> <[unset] tenant_group[=<GROUP_NAME>]> ...\n\n");
fmt::print("Updates the configuration for a tenant.\n");
fmt::print("Use `tenant_group=<GROUP_NAME>' to change the tenant group that a\n");
fmt::print("tenant is assigned to or `unset tenant_group' to remove a tenant from\n");
fmt::print("its tenant group.");
return false;
}
state Optional<std::map<Standalone<StringRef>, Optional<Value>>> configuration =
parseTenantConfiguration(tokens, 2, true);
parseTenantConfiguration(tokens, 3, true);
if (!configuration.present()) {
return false;
@ -438,9 +432,9 @@ ACTOR Future<bool> configureTenantCommandActor(Reference<IDatabase> db, std::vec
ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
TenantMapEntry tenantEntry;
wait(MetaclusterAPI::configureTenant(db, tokens[1], configuration.get()));
wait(MetaclusterAPI::configureTenant(db, tokens[2], configuration.get()));
} else {
applyConfigurationToSpecialKeys(tr, tokens[1], configuration.get());
applyConfigurationToSpecialKeys(tr, tokens[2], configuration.get());
wait(safeThreadFutureToFuture(tr->commit()));
}
break;
@ -455,17 +449,10 @@ ACTOR Future<bool> configureTenantCommandActor(Reference<IDatabase> db, std::vec
}
}
fmt::print("The configuration for tenant `{}' has been updated\n", printable(tokens[1]).c_str());
fmt::print("The configuration for tenant `{}' has been updated\n", printable(tokens[2]).c_str());
return true;
}
CommandFactory configureTenantFactory(
"configuretenant",
CommandHelp("configuretenant <TENANT_NAME> <[unset] tenant_group[=<GROUP_NAME>]> ...",
"updates the configuration for a tenant",
"Updates the configuration for a tenant. Use `tenant_group=<GROUP_NAME>' to change the tenant group "
"that a tenant is assigned to or `unset tenant_group' to remove a tenant from its tenant group."));
// Helper function to extract tenant ID from json metadata string
int64_t getTenantId(Value metadata) {
json_spirit::mValue jsonObject;
@ -476,16 +463,18 @@ int64_t getTenantId(Value metadata) {
return id;
}
// renametenant command
ACTOR Future<bool> renameTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 3) {
printUsage(tokens[0]);
// tenant rename command
ACTOR Future<bool> tenantRenameCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 4) {
fmt::print("Usage: tenant rename <OLD_NAME> <NEW_NAME>\n\n");
fmt::print("Renames a tenant in the cluster. The old name must exist and the new\n");
fmt::print("name must not exist in the cluster.\n");
return false;
}
state Reference<ITransaction> tr = db->createTransaction();
state Key tenantRenameKey = tenantRenameSpecialKeyRange.begin.withSuffix(tokens[1]);
state Key tenantOldNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[1]);
state Key tenantNewNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[2]);
state Key tenantRenameKey = tenantRenameSpecialKeyRange.begin.withSuffix(tokens[2]);
state Key tenantOldNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[2]);
state Key tenantNewNameKey = tenantMapSpecialKeyRange.begin.withSuffix(tokens[3]);
state bool firstTry = true;
state int64_t id = -1;
loop {
@ -494,7 +483,7 @@ ACTOR Future<bool> renameTenantCommandActor(Reference<IDatabase> db, std::vector
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
wait(MetaclusterAPI::renameTenant(db, tokens[1], tokens[2]));
wait(MetaclusterAPI::renameTenant(db, tokens[2], tokens[3]));
} else {
// Hold the reference to the standalone's memory
state ThreadFuture<Optional<Value>> oldEntryFuture = tr->get(tenantOldNameKey);
@ -534,7 +523,7 @@ ACTOR Future<bool> renameTenantCommandActor(Reference<IDatabase> db, std::vector
throw tenant_not_found();
}
}
tr->set(tenantRenameKey, tokens[2]);
tr->set(tenantRenameKey, tokens[3]);
wait(safeThreadFutureToFuture(tr->commit()));
}
break;
@ -550,14 +539,118 @@ ACTOR Future<bool> renameTenantCommandActor(Reference<IDatabase> db, std::vector
}
fmt::print(
"The tenant `{}' has been renamed to `{}'\n", printable(tokens[1]).c_str(), printable(tokens[2]).c_str());
"The tenant `{}' has been renamed to `{}'\n", printable(tokens[2]).c_str(), printable(tokens[3]).c_str());
return true;
}
CommandFactory renameTenantFactory(
"renametenant",
CommandHelp(
"renametenant <OLD_NAME> <NEW_NAME>",
"renames a tenant in the cluster",
"Renames a tenant in the cluster. The old name must exist and the new name must not exist in the cluster."));
// tenant command
Future<bool> tenantCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() == 1) {
printUsage(tokens[0]);
return true;
} else if (tokencmp(tokens[1], "create")) {
return tenantCreateCommand(db, tokens);
} else if (tokencmp(tokens[1], "delete")) {
return tenantDeleteCommand(db, tokens);
} else if (tokencmp(tokens[1], "list")) {
return tenantListCommand(db, tokens);
} else if (tokencmp(tokens[1], "get")) {
return tenantGetCommand(db, tokens);
} else if (tokencmp(tokens[1], "configure")) {
return tenantConfigureCommand(db, tokens);
} else if (tokencmp(tokens[1], "rename")) {
return tenantRenameCommand(db, tokens);
} else {
printUsage(tokens[0]);
return true;
}
}
Future<bool> tenantCommandForwarder(Reference<IDatabase> db, std::vector<StringRef> tokens) {
ASSERT(!tokens.empty() && (tokens[0].endsWith("tenant"_sr) || tokens[0].endsWith("tenants"_sr)));
std::vector<StringRef> forwardedTokens = { "tenant"_sr,
tokens[0].endsWith("tenant"_sr) ? tokens[0].removeSuffix("tenant"_sr)
: tokens[0].removeSuffix("tenants"_sr) };
for (int i = 1; i < tokens.size(); ++i) {
forwardedTokens.push_back(tokens[i]);
}
return tenantCommand(db, forwardedTokens);
} // namespace fdb_cli
void tenantGenerator(const char* text,
const char* line,
std::vector<std::string>& lc,
std::vector<StringRef> const& tokens) {
if (tokens.size() == 1) {
const char* opts[] = { "create", "delete", "list", "get", "configure", "rename", nullptr };
arrayGenerator(text, line, opts, lc);
} else if (tokens.size() == 3 && tokencmp(tokens[1], "create")) {
const char* opts[] = { "tenant_group=", nullptr };
arrayGenerator(text, line, opts, lc);
} else if (tokens.size() == 3 && tokencmp(tokens[1], "get")) {
const char* opts[] = { "JSON", nullptr };
arrayGenerator(text, line, opts, lc);
} else if (tokencmp(tokens[1], "configure")) {
if (tokens.size() == 3) {
const char* opts[] = { "tenant_group=", "unset", nullptr };
arrayGenerator(text, line, opts, lc);
} else if (tokens.size() == 4 && tokencmp(tokens[3], "unset")) {
const char* opts[] = { "tenant_group", nullptr };
arrayGenerator(text, line, opts, lc);
}
}
}
std::vector<const char*> tenantHintGenerator(std::vector<StringRef> const& tokens, bool inArgument) {
if (tokens.size() == 1) {
return { "<create|delete|list|get|configure|rename>", "[ARGS]" };
} else if (tokencmp(tokens[1], "create") && tokens.size() < 4) {
static std::vector<const char*> opts = { "<NAME> [tenant_group=<TENANT_GROUP>]" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else if (tokencmp(tokens[1], "delete") && tokens.size() < 3) {
static std::vector<const char*> opts = { "<NAME>" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else if (tokencmp(tokens[1], "list") && tokens.size() < 5) {
static std::vector<const char*> opts = { "[BEGIN]", "[END]", "[LIMIT]" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else if (tokencmp(tokens[1], "get") && tokens.size() < 4) {
static std::vector<const char*> opts = { "<NAME>", "[JSON]" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else if (tokencmp(tokens[1], "configure")) {
if (tokens.size() < 4) {
static std::vector<const char*> opts = { "<TENANT_NAME>", "<[unset] tenant_group[=<GROUP_NAME>]>" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else if (tokens.size() == 4 && tokencmp(tokens[3], "unset")) {
static std::vector<const char*> opts = { "<tenant_group[=<GROUP_NAME>]>" };
return std::vector<const char*>(opts.begin() + tokens.size() - 4, opts.end());
}
return {};
} else if (tokencmp(tokens[1], "rename") && tokens.size() < 4) {
static std::vector<const char*> opts = { "<OLD_NAME>", "<NEW_NAME>" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else {
return {};
}
}
CommandFactory tenantRegisterFactory("tenant",
CommandHelp("tenant <create|delete|list|get|configure|rename> [ARGS]",
"view and manage tenants in a cluster or metacluster",
"`create' and `delete' add and remove tenants from the cluster.\n"
"`list' prints a list of tenants in the cluster.\n"
"`get' prints the metadata for a particular tenant.\n"
"`configure' modifies the configuration for a tenant.\n"
"`rename' changes the name of a tenant.\n"),
&tenantGenerator,
&tenantHintGenerator);
// Generate hidden commands for the old versions of the tenant commands
CommandFactory createTenantFactory("createtenant");
CommandFactory deleteTenantFactory("deletetenant");
CommandFactory listTenantsFactory("listtenants");
CommandFactory getTenantFactory("gettenant");
CommandFactory configureTenantFactory("configuretenant");
CommandFactory renameTenantFactory("renametenant");
} // namespace fdb_cli

View File

@ -0,0 +1,240 @@
/*
* TenantGroupCommands.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/GenericManagementAPI.actor.h"
#include "fdbclient/IClientApi.h"
#include "fdbclient/Knobs.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "fdbclient/MetaclusterManagement.actor.h"
#include "fdbclient/TenantManagement.actor.h"
#include "fdbclient/Schemas.h"
#include "flow/Arena.h"
#include "flow/FastRef.h"
#include "flow/ThreadHelper.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace fdb_cli {
// tenantgroup list command
ACTOR Future<bool> tenantGroupListCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() > 5) {
fmt::print("Usage: tenantgroup list [BEGIN] [END] [LIMIT]\n\n");
fmt::print("Lists the tenant groups in a cluster.\n");
fmt::print("Only tenant groups in the range BEGIN - END will be printed.\n");
fmt::print("An optional LIMIT can be specified to limit the number of results (default 100).\n");
return false;
}
state StringRef beginTenantGroup = ""_sr;
state StringRef endTenantGroup = "\xff\xff"_sr;
state int limit = 100;
if (tokens.size() >= 3) {
beginTenantGroup = tokens[2];
}
if (tokens.size() >= 4) {
endTenantGroup = tokens[3];
if (endTenantGroup <= beginTenantGroup) {
fmt::print(stderr, "ERROR: end must be larger than begin");
return false;
}
}
if (tokens.size() == 5) {
int n = 0;
if (sscanf(tokens[4].toString().c_str(), "%d%n", &limit, &n) != 1 || n != tokens[4].size() || limit <= 0) {
fmt::print(stderr, "ERROR: invalid limit `{}'\n", tokens[4].toString());
return false;
}
}
state Reference<ITransaction> tr = db->createTransaction();
loop {
try {
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
state std::vector<TenantGroupName> tenantGroupNames;
state std::vector<std::pair<TenantGroupName, TenantGroupEntry>> tenantGroups;
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
wait(store(tenantGroups,
MetaclusterAPI::listTenantGroupsTransaction(tr, beginTenantGroup, endTenantGroup, limit)));
} else {
wait(store(tenantGroups,
TenantAPI::listTenantGroupsTransaction(tr, beginTenantGroup, endTenantGroup, limit)));
}
if (tenantGroups.empty()) {
if (tokens.size() == 2) {
fmt::print("The cluster has no tenant groups\n");
} else {
fmt::print("The cluster has no tenant groups in the specified range\n");
}
}
int index = 0;
for (auto tenantGroup : tenantGroups) {
fmt::print(" {}. {}\n", ++index, printable(tenantGroup.first));
}
return true;
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
// tenantgroup get command
ACTOR Future<bool> tenantGroupGetCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() > 4 || (tokens.size() == 4 && tokens[3] != "JSON"_sr)) {
fmt::print("Usage: tenantgroup get <NAME> [JSON]\n\n");
fmt::print("Prints metadata associated with the given tenant group.\n");
fmt::print("If JSON is specified, then the output will be in JSON format.\n");
return false;
}
state bool useJson = tokens.size() == 4;
state Reference<ITransaction> tr = db->createTransaction();
loop {
try {
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
state std::string tenantJson;
state Optional<TenantGroupEntry> entry;
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
wait(store(entry, MetaclusterAPI::tryGetTenantGroupTransaction(tr, tokens[2])));
} else {
wait(store(entry, TenantAPI::tryGetTenantGroupTransaction(tr, tokens[2])));
Optional<MetaclusterRegistrationEntry> metaclusterRegistration =
wait(MetaclusterMetadata::metaclusterRegistration().get(tr));
// We don't store assigned clusters in the tenant group entry on data clusters, so we can instead
// populate it from the metacluster registration
if (entry.present() && metaclusterRegistration.present() &&
metaclusterRegistration.get().clusterType == ClusterType::METACLUSTER_DATA &&
!entry.get().assignedCluster.present()) {
entry.get().assignedCluster = metaclusterRegistration.get().name;
}
}
if (!entry.present()) {
throw tenant_not_found();
}
if (useJson) {
json_spirit::mObject resultObj;
resultObj["tenant_group"] = entry.get().toJson();
resultObj["type"] = "success";
fmt::print("{}\n",
json_spirit::write_string(json_spirit::mValue(resultObj), json_spirit::pretty_print));
} else {
if (entry.get().assignedCluster.present()) {
fmt::print(" assigned cluster: {}\n", printable(entry.get().assignedCluster));
} else {
// This is a placeholder output for when a tenant group is read in a non-metacluster, where
// it currently has no metadata. When metadata is eventually added, we can print that instead.
fmt::print("The tenant group is present in the cluster\n");
}
}
return true;
} catch (Error& e) {
try {
wait(safeThreadFutureToFuture(tr->onError(e)));
} catch (Error& finalErr) {
state std::string errorStr;
if (finalErr.code() == error_code_tenant_not_found) {
errorStr = "tenant group not found";
} else if (useJson) {
errorStr = finalErr.what();
} else {
throw finalErr;
}
if (useJson) {
json_spirit::mObject resultObj;
resultObj["type"] = "error";
resultObj["error"] = errorStr;
fmt::print("{}\n",
json_spirit::write_string(json_spirit::mValue(resultObj), json_spirit::pretty_print));
} else {
fmt::print(stderr, "ERROR: {}\n", errorStr);
}
return false;
}
}
}
}
// tenantgroup command
Future<bool> tenantGroupCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() == 1) {
printUsage(tokens[0]);
return true;
} else if (tokencmp(tokens[1], "list")) {
return tenantGroupListCommand(db, tokens);
} else if (tokencmp(tokens[1], "get")) {
return tenantGroupGetCommand(db, tokens);
} else {
printUsage(tokens[0]);
return true;
}
}
void tenantGroupGenerator(const char* text,
const char* line,
std::vector<std::string>& lc,
std::vector<StringRef> const& tokens) {
if (tokens.size() == 1) {
const char* opts[] = { "list", "get", nullptr };
arrayGenerator(text, line, opts, lc);
} else if (tokens.size() == 3 && tokencmp(tokens[1], "get")) {
const char* opts[] = { "JSON", nullptr };
arrayGenerator(text, line, opts, lc);
}
}
std::vector<const char*> tenantGroupHintGenerator(std::vector<StringRef> const& tokens, bool inArgument) {
if (tokens.size() == 1) {
return { "<list|get>", "[ARGS]" };
} else if (tokencmp(tokens[1], "list") && tokens.size() < 5) {
static std::vector<const char*> opts = { "[BEGIN]", "[END]", "[LIMIT]" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else if (tokencmp(tokens[1], "get") && tokens.size() < 4) {
static std::vector<const char*> opts = { "<NAME>", "[JSON]" };
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
} else {
return {};
}
}
CommandFactory tenantGroupRegisterFactory("tenantgroup",
CommandHelp("tenantgroup <list|get> [ARGS]",
"view tenant group information",
"`list' prints a list of tenant groups in the cluster.\n"
"`get' prints the metadata for a particular tenant group.\n"),
&tenantGroupGenerator,
&tenantGroupHintGenerator);
} // namespace fdb_cli

View File

@ -163,11 +163,11 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
}
}
if (tokens.size() == 7) {
if (tokens[6] == LiteralStringRef("default")) {
if (tokens[6] == "default"_sr) {
priority = TransactionPriority::DEFAULT;
} else if (tokens[6] == LiteralStringRef("immediate")) {
} else if (tokens[6] == "immediate"_sr) {
priority = TransactionPriority::IMMEDIATE;
} else if (tokens[6] == LiteralStringRef("batch")) {
} else if (tokens[6] == "batch"_sr) {
priority = TransactionPriority::BATCH;
} else {
fprintf(stderr,

View File

@ -89,7 +89,7 @@ ACTOR Future<bool> tssQuarantine(Reference<IDatabase> db, bool enable, UID tssId
}
if (enable) {
tr->set(tssQuarantineKeyFor(tssId), LiteralStringRef(""));
tr->set(tssQuarantineKeyFor(tssId), ""_sr);
// remove server from TSS mapping when quarantine is enabled
tssMapDB.erase(tr, ssi.tssPairID.get());
} else {
@ -112,19 +112,19 @@ namespace fdb_cli {
ACTOR Future<bool> tssqCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() == 2) {
if (tokens[1] != LiteralStringRef("list")) {
if (tokens[1] != "list"_sr) {
printUsage(tokens[0]);
return false;
} else {
wait(tssQuarantineList(db));
}
} else if (tokens.size() == 3) {
if ((tokens[1] != LiteralStringRef("start") && tokens[1] != LiteralStringRef("stop")) ||
(tokens[2].size() != 32) || !std::all_of(tokens[2].begin(), tokens[2].end(), &isxdigit)) {
if ((tokens[1] != "start"_sr && tokens[1] != "stop"_sr) || (tokens[2].size() != 32) ||
!std::all_of(tokens[2].begin(), tokens[2].end(), &isxdigit)) {
printUsage(tokens[0]);
return false;
} else {
bool enable = tokens[1] == LiteralStringRef("start");
bool enable = tokens[1] == "start"_sr;
UID tssId = UID::fromString(tokens[2].toString());
bool success = wait(tssQuarantine(db, enable, tssId));
return success;

View File

@ -74,17 +74,15 @@ void addInterfacesFromKVs(RangeResult& kvs,
return;
}
ClientLeaderRegInterface leaderInterf(workerInterf.address());
StringRef ip_port =
(kv.key.endsWith(LiteralStringRef(":tls")) ? kv.key.removeSuffix(LiteralStringRef(":tls")) : kv.key)
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
StringRef ip_port = (kv.key.endsWith(":tls"_sr) ? kv.key.removeSuffix(":tls"_sr) : kv.key)
.removePrefix("\xff\xff/worker_interfaces/"_sr);
(*address_interface)[ip_port] = std::make_pair(kv.value, leaderInterf);
if (workerInterf.reboot.getEndpoint().addresses.secondaryAddress.present()) {
Key full_ip_port2 =
StringRef(workerInterf.reboot.getEndpoint().addresses.secondaryAddress.get().toString());
StringRef ip_port2 = full_ip_port2.endsWith(LiteralStringRef(":tls"))
? full_ip_port2.removeSuffix(LiteralStringRef(":tls"))
: full_ip_port2;
StringRef ip_port2 =
full_ip_port2.endsWith(":tls"_sr) ? full_ip_port2.removeSuffix(":tls"_sr) : full_ip_port2;
(*address_interface)[ip_port2] = std::make_pair(kv.value, leaderInterf);
}
}
@ -99,8 +97,7 @@ ACTOR Future<Void> getWorkerInterfaces(Reference<ITransaction> tr,
}
// Hold the reference to the standalone's memory
state ThreadFuture<RangeResult> kvsFuture = tr->getRange(
KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY);
KeyRangeRef("\xff\xff/worker_interfaces/"_sr, "\xff\xff/worker_interfaces0"_sr), CLIENT_KNOBS->TOO_MANY);
state RangeResult kvs = wait(safeThreadFutureToFuture(kvsFuture));
ASSERT(!kvs.more);
if (verify) {

View File

@ -32,7 +32,7 @@
namespace fdb_cli {
const KeyRef versionEpochSpecialKey = LiteralStringRef("\xff\xff/management/version_epoch");
const KeyRef versionEpochSpecialKey = "\xff\xff/management/version_epoch"_sr;
struct VersionInfo {
int64_t version;

View File

@ -537,10 +537,10 @@ void initHelp() {
CommandHelp("getversion",
"Fetch the current read version",
"Displays the current read version of the database or currently running transaction.");
helpMap["quota"] =
CommandHelp("quota",
"quota [get <tag> [reserved|total] [read|write]|set <tag> [reserved|total] [read|write] <value>]",
"Get or modify the throughput quota for the specified tag.");
helpMap["quota"] = CommandHelp("quota",
"quota [get <tag> [reserved_throughput|total_throughput] | set <tag> "
"[reserved_throughput|total_throughput] <value>]",
"Get or modify the throughput quota for the specified tag.");
helpMap["reset"] =
CommandHelp("reset",
"reset the current transaction",
@ -654,7 +654,7 @@ ACTOR Future<Void> checkStatus(Future<Void> f,
StatusObject _s = wait(StatusClient::statusFetcher(localDb));
s = _s;
} else {
state ThreadFuture<Optional<Value>> statusValueF = tr->get(LiteralStringRef("\xff\xff/status/json"));
state ThreadFuture<Optional<Value>> statusValueF = tr->get("\xff\xff/status/json"_sr);
Optional<Value> statusValue = wait(safeThreadFutureToFuture(statusValueF));
if (!statusValue.present()) {
fprintf(stderr, "ERROR: Failed to get status json from the cluster\n");
@ -698,7 +698,7 @@ ACTOR Future<bool> createSnapshot(Database db, std::vector<StringRef> tokens) {
for (int i = 1; i < tokens.size(); i++) {
snapCmd = snapCmd.withSuffix(tokens[i]);
if (i != tokens.size() - 1) {
snapCmd = snapCmd.withSuffix(LiteralStringRef(" "));
snapCmd = snapCmd.withSuffix(" "_sr);
}
}
try {
@ -1328,13 +1328,10 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise, Reference<ClusterCo
}
if (tokencmp(tokens[0], "fileconfigure")) {
if (tokens.size() == 2 || (tokens.size() == 3 && (tokens[1] == LiteralStringRef("new") ||
tokens[1] == LiteralStringRef("FORCE")))) {
bool _result =
wait(makeInterruptable(fileConfigureCommandActor(db,
tokens.back().toString(),
tokens[1] == LiteralStringRef("new"),
tokens[1] == LiteralStringRef("FORCE"))));
if (tokens.size() == 2 ||
(tokens.size() == 3 && (tokens[1] == "new"_sr || tokens[1] == "FORCE"_sr))) {
bool _result = wait(makeInterruptable(fileConfigureCommandActor(
db, tokens.back().toString(), tokens[1] == "new"_sr, tokens[1] == "FORCE"_sr)));
if (!_result)
is_error = true;
} else {
@ -1393,6 +1390,13 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise, Reference<ClusterCo
continue;
}
if (tokencmp(tokens[0], "blobkey")) {
bool _result = wait(makeInterruptable(blobKeyCommandActor(localDb, tenantEntry, tokens)));
if (!_result)
is_error = true;
continue;
}
if (tokencmp(tokens[0], "unlock")) {
if ((tokens.size() != 2) || (tokens[1].size() != 32) ||
!std::all_of(tokens[1].begin(), tokens[1].end(), &isxdigit)) {
@ -1881,48 +1885,32 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise, Reference<ClusterCo
continue;
}
if (tokencmp(tokens[0], "createtenant")) {
bool _result = wait(makeInterruptable(createTenantCommandActor(db, tokens)));
if (!_result)
if (tokencmp(tokens[0], "tenant")) {
bool _result = wait(makeInterruptable(tenantCommand(db, tokens)));
if (!_result) {
is_error = true;
continue;
}
if (tokencmp(tokens[0], "deletetenant")) {
bool _result = wait(makeInterruptable(deleteTenantCommandActor(db, tokens)));
if (!_result)
is_error = true;
else if (tenantName.present() && tokens[1] == tenantName.get()) {
} else if (tokens.size() >= 3 && tenantName.present() && tokencmp(tokens[1], "delete") &&
tokens[2] == tenantName.get()) {
printAtCol("WARNING: the active tenant was deleted. Use the `usetenant' or `defaulttenant' "
"command to choose a new tenant.\n",
80);
}
continue;
}
if (tokencmp(tokens[0], "listtenants")) {
bool _result = wait(makeInterruptable(listTenantsCommandActor(db, tokens)));
if (!_result)
if (tokencmp(tokens[0], "createtenant") || tokencmp(tokens[0], "deletetenant") ||
tokencmp(tokens[0], "listtenants") || tokencmp(tokens[0], "gettenant") ||
tokencmp(tokens[0], "configuretenant") || tokencmp(tokens[0], "renametenant")) {
bool _result = wait(makeInterruptable(tenantCommandForwarder(db, tokens)));
if (!_result) {
is_error = true;
}
continue;
}
if (tokencmp(tokens[0], "gettenant")) {
bool _result = wait(makeInterruptable(getTenantCommandActor(db, tokens)));
if (!_result)
is_error = true;
continue;
}
if (tokencmp(tokens[0], "configuretenant")) {
bool _result = wait(makeInterruptable(configureTenantCommandActor(db, tokens)));
if (!_result)
is_error = true;
continue;
}
if (tokencmp(tokens[0], "renametenant")) {
bool _result = wait(makeInterruptable(renameTenantCommandActor(db, tokens)));
if (tokencmp(tokens[0], "tenantgroup")) {
bool _result = wait(makeInterruptable(tenantGroupCommand(db, tokens)));
if (!_result)
is_error = true;
continue;
@ -2072,9 +2060,7 @@ const char* checkTlsConfigAgainstCoordAddrs(const ClusterConnectionString& ccs)
tlsAddrs++;
totalAddrs++;
}
if (tlsConfigured && tlsAddrs == 0) {
return "fdbcli is configured with TLS, but none of the coordinators have TLS addresses.";
} else if (!tlsConfigured && tlsAddrs == totalAddrs) {
if (!tlsConfigured && tlsAddrs == totalAddrs) {
return "fdbcli is not configured with TLS, but all of the coordinators have TLS addresses.";
} else {
return nullptr;

View File

@ -120,7 +120,7 @@ extern const KeyRef ignoreSSFailureSpecialKey;
extern const KeyRangeRef processClassSourceSpecialKeyRange;
extern const KeyRangeRef processClassTypeSpecialKeyRange;
// Other special keys
inline const KeyRef errorMsgSpecialKey = LiteralStringRef("\xff\xff/error_message");
inline const KeyRef errorMsgSpecialKey = "\xff\xff/error_message"_sr;
inline const KeyRef workerInterfacesVerifyOptionSpecialKey = "\xff\xff/management/options/worker_interfaces/verify"_sr;
// help functions (Copied from fdbcli.actor.cpp)
@ -160,8 +160,6 @@ ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
std::vector<StringRef> tokens,
LineNoise* linenoise,
Future<Void> warn);
// configuretenant command
ACTOR Future<bool> configureTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// consistency command
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
std::vector<StringRef> tokens,
@ -170,12 +168,8 @@ ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
ACTOR Future<bool> consistencyScanCommandActor(Database localDb, std::vector<StringRef> tokens);
// coordinators command
ACTOR Future<bool> coordinatorsCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// createtenant command
ACTOR Future<bool> createTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// datadistribution command
ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// deletetenant command
ACTOR Future<bool> deleteTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// exclude command
ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens, Future<Void> warn);
// expensive_data_check command
@ -191,8 +185,6 @@ ACTOR Future<bool> fileConfigureCommandActor(Reference<IDatabase> db,
bool force);
// force_recovery_with_data_loss command
ACTOR Future<bool> forceRecoveryWithDataLossCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// gettenant command
ACTOR Future<bool> getTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// include command
ACTOR Future<bool> includeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// kill command
@ -200,8 +192,6 @@ ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
Reference<ITransaction> tr,
std::vector<StringRef> tokens,
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
// listtenants command
ACTOR Future<bool> listTenantsCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// lock/unlock command
ACTOR Future<bool> lockCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
ACTOR Future<bool> unlockDatabaseActor(Reference<IDatabase> db, UID uid);
@ -218,6 +208,11 @@ ACTOR Future<bool> changeFeedCommandActor(Database localDb,
ACTOR Future<bool> blobRangeCommandActor(Database localDb,
Optional<TenantMapEntry> tenantEntry,
std::vector<StringRef> tokens);
// blobkey command
ACTOR Future<bool> blobKeyCommandActor(Database localDb,
Optional<TenantMapEntry> tenantEntry,
std::vector<StringRef> tokens);
// maintenance command
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning = false);
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db,
@ -229,8 +224,6 @@ ACTOR Future<bool> profileCommandActor(Database db,
Reference<ITransaction> tr,
std::vector<StringRef> tokens,
bool intrans);
// renametenant command
ACTOR Future<bool> renameTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// quota command
ACTOR Future<bool> quotaCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// setclass command
@ -247,6 +240,12 @@ ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
Reference<ITransaction> tr,
std::vector<StringRef> tokens,
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
// tenant command
Future<bool> tenantCommand(Reference<IDatabase> db, std::vector<StringRef> tokens);
// tenant command compatibility layer
Future<bool> tenantCommandForwarder(Reference<IDatabase> db, std::vector<StringRef> tokens);
// tenantgroup command
Future<bool> tenantGroupCommand(Reference<IDatabase> db, std::vector<StringRef> tokens);
// throttle command
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// triggerteaminfolog command

View File

@ -593,38 +593,105 @@ def triggerddteaminfolog(logger):
output = run_fdbcli_command('triggerddteaminfolog')
assert output == 'Triggered team info logging in data distribution.'
def setup_tenants(tenants):
command = '; '.join(['tenant create %s' % t for t in tenants])
run_fdbcli_command(command)
def clear_database_and_tenants():
run_fdbcli_command('writemode on; option on SPECIAL_KEY_SPACE_ENABLE_WRITES; clearrange "" \\xff; clearrange \\xff\\xff/management/tenant/map/ \\xff\\xff/management/tenant/map0')
def run_tenant_test(test_func):
test_func()
clear_database_and_tenants()
@enable_logging()
def tenants(logger):
output = run_fdbcli_command('listtenants')
assert output == 'The cluster has no tenants'
def tenant_create(logger):
output1 = run_fdbcli_command('tenant create tenant')
assert output1 == 'The tenant `tenant\' has been created'
output = run_fdbcli_command('createtenant tenant')
assert output == 'The tenant `tenant\' has been created'
output = run_fdbcli_command('createtenant tenant2 tenant_group=tenant_group2')
output = run_fdbcli_command('tenant create tenant2 tenant_group=tenant_group2')
assert output == 'The tenant `tenant2\' has been created'
output = run_fdbcli_command('listtenants')
output = run_fdbcli_command_and_get_error('tenant create tenant')
assert output == 'ERROR: A tenant with the given name already exists (2132)'
@enable_logging()
def tenant_delete(logger):
setup_tenants(['tenant', 'tenant2'])
run_fdbcli_command('writemode on; usetenant tenant2; set tenant_test value')
# delete a tenant while the fdbcli is using that tenant
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
cmd_sequence = ['writemode on', 'usetenant tenant', 'tenant delete tenant', 'get tenant_test', 'defaulttenant', 'usetenant tenant']
output, error_output = process.communicate(input='\n'.join(cmd_sequence).encode())
lines = output.decode().strip().split('\n')[-6:]
error_lines = error_output.decode().strip().split('\n')[-2:]
assert lines[0] == 'Using tenant `tenant\''
assert lines[1] == 'The tenant `tenant\' has been deleted'
assert lines[2] == 'WARNING: the active tenant was deleted. Use the `usetenant\' or `defaulttenant\''
assert lines[3] == 'command to choose a new tenant.'
assert error_lines[0] == 'ERROR: Tenant does not exist (2131)'
assert lines[5] == 'Using the default tenant'
assert error_lines[1] == 'ERROR: Tenant `tenant\' does not exist'
# delete a non-empty tenant
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
cmd_sequence = ['writemode on', 'tenant delete tenant2', 'usetenant tenant2', 'clear tenant_test', 'defaulttenant', 'tenant delete tenant2']
output, error_output = process.communicate(input='\n'.join(cmd_sequence).encode())
lines = output.decode().strip().split('\n')[-4:]
error_lines = error_output.decode().strip().split('\n')[-1:]
assert error_lines[0] == 'ERROR: Cannot delete a non-empty tenant (2133)'
assert lines[0] == 'Using tenant `tenant2\''
assert lines[1].startswith('Committed')
assert lines[2] == 'Using the default tenant'
assert lines[3] == 'The tenant `tenant2\' has been deleted'
# delete a non-existing tenant
output = run_fdbcli_command_and_get_error('tenant delete tenant')
assert output == 'ERROR: Tenant does not exist (2131)'
@enable_logging()
def tenant_list(logger):
output = run_fdbcli_command('tenant list')
assert output == 'The cluster has no tenants'
setup_tenants(['tenant', 'tenant2'])
output = run_fdbcli_command('tenant list')
assert output == '1. tenant\n 2. tenant2'
output = run_fdbcli_command('listtenants a z 1')
output = run_fdbcli_command('tenant list a z 1')
assert output == '1. tenant'
output = run_fdbcli_command('listtenants a tenant2')
output = run_fdbcli_command('tenant list a tenant2')
assert output == '1. tenant'
output = run_fdbcli_command('listtenants tenant2 z')
output = run_fdbcli_command('tenant list tenant2 z')
assert output == '1. tenant2'
output = run_fdbcli_command('gettenant tenant')
output = run_fdbcli_command('tenant list a b')
assert output == 'The cluster has no tenants in the specified range'
output = run_fdbcli_command_and_get_error('tenant list b a')
assert output == 'ERROR: end must be larger than begin'
output = run_fdbcli_command_and_get_error('tenant list a b 12x')
assert output == 'ERROR: invalid limit `12x\''
@enable_logging()
def tenant_get(logger):
setup_tenants(['tenant', 'tenant2 tenant_group=tenant_group2'])
output = run_fdbcli_command('tenant get tenant')
lines = output.split('\n')
assert len(lines) == 3
assert lines[0].strip().startswith('id: ')
assert lines[1].strip().startswith('prefix: ')
assert lines[2].strip() == 'tenant state: ready'
output = run_fdbcli_command('gettenant tenant JSON')
output = run_fdbcli_command('tenant get tenant JSON')
json_output = json.loads(output, strict=False)
assert(len(json_output) == 2)
assert('tenant' in json_output)
@ -638,7 +705,7 @@ def tenants(logger):
assert('printable' in json_output['tenant']['prefix'])
assert(json_output['tenant']['tenant_state'] == 'ready')
output = run_fdbcli_command('gettenant tenant2')
output = run_fdbcli_command('tenant get tenant2')
lines = output.split('\n')
assert len(lines) == 4
assert lines[0].strip().startswith('id: ')
@ -646,7 +713,7 @@ def tenants(logger):
assert lines[2].strip() == 'tenant state: ready'
assert lines[3].strip() == 'tenant group: tenant_group2'
output = run_fdbcli_command('gettenant tenant2 JSON')
output = run_fdbcli_command('tenant get tenant2 JSON')
json_output = json.loads(output, strict=False)
assert(len(json_output) == 2)
assert('tenant' in json_output)
@ -661,36 +728,57 @@ def tenants(logger):
assert('base64' in json_output['tenant']['tenant_group'])
assert(json_output['tenant']['tenant_group']['printable'] == 'tenant_group2')
output = run_fdbcli_command('configuretenant tenant tenant_group=tenant_group1')
@enable_logging()
def tenant_configure(logger):
setup_tenants(['tenant'])
output = run_fdbcli_command('tenant configure tenant tenant_group=tenant_group1')
assert output == 'The configuration for tenant `tenant\' has been updated'
output = run_fdbcli_command('gettenant tenant')
output = run_fdbcli_command('tenant get tenant')
lines = output.split('\n')
assert len(lines) == 4
assert lines[3].strip() == 'tenant group: tenant_group1'
output = run_fdbcli_command('configuretenant tenant unset tenant_group')
output = run_fdbcli_command('tenant configure tenant unset tenant_group')
assert output == 'The configuration for tenant `tenant\' has been updated'
output = run_fdbcli_command('gettenant tenant')
output = run_fdbcli_command('tenant get tenant')
lines = output.split('\n')
assert len(lines) == 3
output = run_fdbcli_command_and_get_error('configuretenant tenant tenant_group=tenant_group1 tenant_group=tenant_group2')
output = run_fdbcli_command_and_get_error('tenant configure tenant tenant_group=tenant_group1 tenant_group=tenant_group2')
assert output == 'ERROR: configuration parameter `tenant_group\' specified more than once.'
output = run_fdbcli_command_and_get_error('configuretenant tenant unset')
output = run_fdbcli_command_and_get_error('tenant configure tenant unset')
assert output == 'ERROR: `unset\' specified without a configuration parameter.'
output = run_fdbcli_command_and_get_error('configuretenant tenant unset tenant_group=tenant_group1')
output = run_fdbcli_command_and_get_error('tenant configure tenant unset tenant_group=tenant_group1')
assert output == 'ERROR: unrecognized configuration parameter `tenant_group=tenant_group1\'.'
output = run_fdbcli_command_and_get_error('configuretenant tenant tenant_group')
output = run_fdbcli_command_and_get_error('tenant configure tenant tenant_group')
assert output == 'ERROR: invalid configuration string `tenant_group\'. String must specify a value using `=\'.'
output = run_fdbcli_command_and_get_error('configuretenant tenant3 tenant_group=tenant_group1')
output = run_fdbcli_command_and_get_error('tenant configure tenant3 tenant_group=tenant_group1')
assert output == 'ERROR: Tenant does not exist (2131)'
@enable_logging()
def tenant_rename(logger):
setup_tenants(['tenant', 'tenant2'])
output = run_fdbcli_command('tenant rename tenant tenant3')
assert output == 'The tenant `tenant\' has been renamed to `tenant3\''
output = run_fdbcli_command_and_get_error('tenant rename tenant tenant4')
assert output == 'ERROR: Tenant does not exist (2131)'
output = run_fdbcli_command_and_get_error('tenant rename tenant2 tenant3')
assert output == 'ERROR: A tenant with the given name already exists (2132)'
@enable_logging()
def tenant_usetenant(logger):
setup_tenants(['tenant', 'tenant2'])
output = run_fdbcli_command('usetenant')
assert output == 'Using the default tenant'
@ -722,44 +810,103 @@ def tenants(logger):
assert lines[3] == '`tenant_test\' is `tenant2\''
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
cmd_sequence = ['usetenant tenant', 'get tenant_test', 'defaulttenant', 'get tenant_test']
cmd_sequence = ['usetenant tenant', 'get tenant_test', 'usetenant tenant2', 'get tenant_test', 'defaulttenant', 'get tenant_test']
output, _ = process.communicate(input='\n'.join(cmd_sequence).encode())
lines = output.decode().strip().split('\n')[-4:]
lines = output.decode().strip().split('\n')[-6:]
assert lines[0] == 'Using tenant `tenant\''
assert lines[1] == '`tenant_test\' is `tenant\''
assert lines[2] == 'Using the default tenant'
assert lines[3] == '`tenant_test\' is `default_tenant\''
assert lines[2] == 'Using tenant `tenant2\''
assert lines[3] == '`tenant_test\' is `tenant2\''
assert lines[4] == 'Using the default tenant'
assert lines[5] == '`tenant_test\' is `default_tenant\''
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
cmd_sequence = ['writemode on', 'usetenant tenant', 'clear tenant_test',
'deletetenant tenant', 'get tenant_test', 'defaulttenant', 'usetenant tenant']
output, error_output = process.communicate(input='\n'.join(cmd_sequence).encode())
@enable_logging()
def tenant_old_commands(logger):
create_output = run_fdbcli_command('tenant create tenant')
list_output = run_fdbcli_command('tenant list')
get_output = run_fdbcli_command('tenant get tenant')
# Run the gettenant command here because the ID will be different in the second block
get_output_old = run_fdbcli_command('gettenant tenant')
configure_output = run_fdbcli_command('tenant configure tenant tenant_group=tenant_group1')
rename_output = run_fdbcli_command('tenant rename tenant tenant2')
delete_output = run_fdbcli_command('tenant delete tenant2')
lines = output.decode().strip().split('\n')[-7:]
error_lines = error_output.decode().strip().split('\n')[-2:]
assert lines[0] == 'Using tenant `tenant\''
assert lines[1].startswith('Committed')
assert lines[2] == 'The tenant `tenant\' has been deleted'
assert lines[3] == 'WARNING: the active tenant was deleted. Use the `usetenant\' or `defaulttenant\''
assert lines[4] == 'command to choose a new tenant.'
assert error_lines[0] == 'ERROR: Tenant does not exist (2131)'
assert lines[6] == 'Using the default tenant'
assert error_lines[1] == 'ERROR: Tenant `tenant\' does not exist'
create_output_old = run_fdbcli_command('createtenant tenant')
list_output_old = run_fdbcli_command('listtenants')
configure_output_old = run_fdbcli_command('configuretenant tenant tenant_group=tenant_group1')
rename_output_old = run_fdbcli_command('renametenant tenant tenant2')
delete_output_old = run_fdbcli_command('deletetenant tenant2')
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
cmd_sequence = ['writemode on', 'deletetenant tenant2', 'usetenant tenant2', 'clear tenant_test', 'defaulttenant', 'deletetenant tenant2']
output, error_output = process.communicate(input='\n'.join(cmd_sequence).encode())
assert create_output == create_output_old
assert list_output == list_output_old
assert get_output == get_output_old
assert configure_output == configure_output_old
assert rename_output == rename_output_old
assert delete_output == delete_output_old
lines = output.decode().strip().split('\n')[-4:]
error_lines = error_output.decode().strip().split('\n')[-1:]
assert error_lines[0] == 'ERROR: Cannot delete a non-empty tenant (2133)'
assert lines[0] == 'Using tenant `tenant2\''
assert lines[1].startswith('Committed')
assert lines[2] == 'Using the default tenant'
assert lines[3] == 'The tenant `tenant2\' has been deleted'
@enable_logging()
def tenant_group_list(logger):
output = run_fdbcli_command('tenantgroup list')
assert output == 'The cluster has no tenant groups'
run_fdbcli_command('writemode on; clear tenant_test')
setup_tenants(['tenant', 'tenant2 tenant_group=tenant_group2', 'tenant3 tenant_group=tenant_group3'])
output = run_fdbcli_command('tenantgroup list')
assert output == '1. tenant_group2\n 2. tenant_group3'
output = run_fdbcli_command('tenantgroup list a z 1')
assert output == '1. tenant_group2'
output = run_fdbcli_command('tenantgroup list a tenant_group3')
assert output == '1. tenant_group2'
output = run_fdbcli_command('tenantgroup list tenant_group3 z')
assert output == '1. tenant_group3'
output = run_fdbcli_command('tenantgroup list a b')
assert output == 'The cluster has no tenant groups in the specified range'
output = run_fdbcli_command_and_get_error('tenantgroup list b a')
assert output == 'ERROR: end must be larger than begin'
output = run_fdbcli_command_and_get_error('tenantgroup list a b 12x')
assert output == 'ERROR: invalid limit `12x\''
@enable_logging()
def tenant_group_get(logger):
setup_tenants(['tenant tenant_group=tenant_group'])
output = run_fdbcli_command('tenantgroup get tenant_group')
assert output == 'The tenant group is present in the cluster'
output = run_fdbcli_command('tenantgroup get tenant_group JSON')
json_output = json.loads(output, strict=False)
assert(len(json_output) == 2)
assert('tenant_group' in json_output)
assert(json_output['type'] == 'success')
assert(len(json_output['tenant_group']) == 0)
output = run_fdbcli_command_and_get_error('tenantgroup get tenant_group2')
assert output == 'ERROR: tenant group not found'
output = run_fdbcli_command('tenantgroup get tenant_group2 JSON')
json_output = json.loads(output, strict=False)
assert(len(json_output) == 2)
assert(json_output['type'] == 'error')
assert(json_output['error'] == 'tenant group not found')
def tenants():
run_tenant_test(tenant_create)
run_tenant_test(tenant_delete)
run_tenant_test(tenant_list)
run_tenant_test(tenant_get)
run_tenant_test(tenant_configure)
run_tenant_test(tenant_rename)
run_tenant_test(tenant_usetenant)
run_tenant_test(tenant_old_commands)
run_tenant_test(tenant_group_list)
run_tenant_test(tenant_group_get)
def integer_options():
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
@ -772,66 +919,22 @@ def integer_options():
assert error_output == b''
def tls_address_suffix():
# fdbcli shall prevent a non-TLS fdbcli run from connecting to an all-TLS cluster, and vice versa
# fdbcli shall prevent a non-TLS fdbcli run from connecting to an all-TLS cluster
preamble = 'eNW1yf1M:eNW1yf1M@'
def make_addr(port: int, tls: bool = False):
return "127.0.0.1:{}{}".format(port, ":tls" if tls else "")
testcases = [
# IsServerTLS, NumServerAddrs
(True, 1),
(False, 1),
(True, 3),
(False, 3),
]
err_output_server_no_tls = "ERROR: fdbcli is configured with TLS, but none of the coordinators have TLS addresses."
num_server_addrs = [1, 2, 5]
err_output_server_tls = "ERROR: fdbcli is not configured with TLS, but all of the coordinators have TLS addresses."
# technically the contents of the certs and key files are not evaluated
# before tls-suffix check against tls configuration takes place,
# but we generate the certs and keys anyway to avoid
# imposing nuanced TLSConfig evaluation ordering requirement on the testcase
with tempfile.TemporaryDirectory() as tmpdir:
cert_file = tmpdir + "/client-cert.pem"
key_file = tmpdir + "/client-key.pem"
ca_file = tmpdir + "/server-ca.pem"
mkcert_process = subprocess.run([
args.build_dir + "/bin/mkcert",
"--server-chain-length", "1",
"--client-chain-length", "1",
"--server-cert-file", tmpdir + "/server-cert.pem",
"--client-cert-file", tmpdir + "/client-cert.pem",
"--server-key-file", tmpdir + "/server-key.pem",
"--client-key-file", tmpdir + "/client-key.pem",
"--server-ca-file", tmpdir + "/server-ca.pem",
"--client-ca-file", tmpdir + "/client-ca.pem",
],
capture_output=True)
if mkcert_process.returncode != 0:
print("mkcert returned with code {}".format(mkcert_process.returncode))
print("Output:\n{}{}\n".format(
mkcert_process.stdout.decode("utf8").strip(),
mkcert_process.stderr.decode("utf8").strip()))
assert False
cluster_fn = tmpdir + "/fdb.cluster"
for testcase in testcases:
is_server_tls, num_server_addrs = testcase
for num_server_addr in num_server_addrs:
with open(cluster_fn, "w") as fp:
fp.write(preamble + ",".join(
[make_addr(port=4000 + addr_idx, tls=is_server_tls) for addr_idx in range(num_server_addrs)]))
["127.0.0.1:{}:tls".format(4000 + addr_idx) for addr_idx in range(num_server_addr)]))
fp.close()
tls_args = ["--tls-certificate-file",
cert_file,
"--tls-key-file",
key_file,
"--tls-ca-file",
ca_file] if not is_server_tls else []
fdbcli_process = subprocess.run(command_template[:2] + [cluster_fn] + tls_args, capture_output=True)
fdbcli_process = subprocess.run(command_template[:2] + [cluster_fn], capture_output=True)
assert fdbcli_process.returncode != 0
err_out = fdbcli_process.stderr.decode("utf8").strip()
if is_server_tls:
assert err_out == err_output_server_tls, f"unexpected output: {err_out}"
else:
assert err_out == err_output_server_no_tls, f"unexpected output: {err_out}"
assert err_out == err_output_server_tls, f"unexpected output: {err_out}"
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,

View File

@ -35,6 +35,8 @@ TEST_CASE("/Atomic/DoAppendIfFits") {
{
Value existingValue = makeString(CLIENT_KNOBS->VALUE_SIZE_LIMIT - 1, arena);
Value otherOperand = makeString(2, arena);
deterministicRandom()->randomBytes(mutateString(existingValue), existingValue.size());
deterministicRandom()->randomBytes(mutateString(otherOperand), otherOperand.size());
// Appended values cannot fit in result, should return existingValue
auto result = doAppendIfFits(existingValue, otherOperand, arena);
ASSERT(compare(existingValue, result) == 0);

View File

@ -22,6 +22,10 @@
#include <time.h>
#include "fdbclient/BackupAgent.actor.h"
#include "fdbclient/BlobCipher.h"
#include "fdbclient/GetEncryptCipherKeys.actor.h"
#include "fdbclient/DatabaseContext.h"
#include "fdbclient/Metacluster.h"
#include "fdbrpc/simulator.h"
#include "flow/ActorCollection.h"
#include "flow/actorcompiler.h" // has to be last include
@ -253,16 +257,18 @@ std::pair<Version, uint32_t> decodeBKMutationLogKey(Key key) {
bigEndian32(*(int32_t*)(key.begin() + backupLogPrefixBytes + sizeof(UID) + sizeof(uint8_t) + sizeof(int64_t))));
}
void decodeBackupLogValue(Arena& arena,
VectorRef<MutationRef>& result,
int& mutationSize,
StringRef value,
StringRef addPrefix,
StringRef removePrefix,
Version version,
Reference<KeyRangeMap<Version>> key_version) {
ACTOR static Future<Void> decodeBackupLogValue(Arena* arena,
VectorRef<MutationRef>* result,
VectorRef<Optional<MutationRef>>* encryptedResult,
int* mutationSize,
Standalone<StringRef> value,
Key addPrefix,
Key removePrefix,
Version version,
Reference<KeyRangeMap<Version>> key_version,
Database cx) {
try {
uint64_t offset(0);
state uint64_t offset(0);
uint64_t protocolVersion = 0;
memcpy(&protocolVersion, value.begin(), sizeof(uint64_t));
offset += sizeof(uint64_t);
@ -274,36 +280,48 @@ void decodeBackupLogValue(Arena& arena,
throw incompatible_protocol_version();
}
uint32_t totalBytes = 0;
state uint32_t totalBytes = 0;
memcpy(&totalBytes, value.begin() + offset, sizeof(uint32_t));
offset += sizeof(uint32_t);
uint32_t consumed = 0;
state uint32_t consumed = 0;
if (totalBytes + offset > value.size())
throw restore_missing_data();
int originalOffset = offset;
state int originalOffset = offset;
while (consumed < totalBytes) {
uint32_t type = 0;
memcpy(&type, value.begin() + offset, sizeof(uint32_t));
offset += sizeof(uint32_t);
uint32_t len1 = 0;
state uint32_t len1 = 0;
memcpy(&len1, value.begin() + offset, sizeof(uint32_t));
offset += sizeof(uint32_t);
uint32_t len2 = 0;
state uint32_t len2 = 0;
memcpy(&len2, value.begin() + offset, sizeof(uint32_t));
offset += sizeof(uint32_t);
ASSERT(offset + len1 + len2 <= value.size() && isValidMutationType(type));
MutationRef logValue;
Arena tempArena;
state MutationRef logValue;
state Arena tempArena;
logValue.type = type;
logValue.param1 = value.substr(offset, len1);
offset += len1;
logValue.param2 = value.substr(offset, len2);
offset += len2;
state Optional<MutationRef> encryptedLogValue = Optional<MutationRef>();
// Decrypt mutation ref if encrypted
if (logValue.isEncrypted()) {
encryptedLogValue = logValue;
Reference<AsyncVar<ClientDBInfo> const> dbInfo = cx->clientInfo;
TextAndHeaderCipherKeys cipherKeys =
wait(getEncryptCipherKeys(dbInfo, *logValue.encryptionHeader(), BlobCipherMetrics::BACKUP));
logValue = logValue.decrypt(cipherKeys, tempArena, BlobCipherMetrics::BACKUP);
}
ASSERT(!logValue.isEncrypted());
MutationRef originalLogValue = logValue;
if (logValue.type == MutationRef::ClearRange) {
KeyRangeRef range(logValue.param1, logValue.param2);
@ -311,7 +329,7 @@ void decodeBackupLogValue(Arena& arena,
for (auto r : ranges) {
if (version > r.value() && r.value() != invalidVersion) {
KeyRef minKey = std::min(r.range().end, range.end);
if (minKey == (removePrefix == StringRef() ? normalKeys.end : strinc(removePrefix))) {
if (minKey == (removePrefix == StringRef() ? allKeys.end : strinc(removePrefix))) {
logValue.param1 = std::max(r.range().begin, range.begin);
if (removePrefix.size()) {
logValue.param1 = logValue.param1.removePrefix(removePrefix);
@ -319,9 +337,9 @@ void decodeBackupLogValue(Arena& arena,
if (addPrefix.size()) {
logValue.param1 = logValue.param1.withPrefix(addPrefix, tempArena);
}
logValue.param2 = addPrefix == StringRef() ? normalKeys.end : strinc(addPrefix, tempArena);
result.push_back_deep(arena, logValue);
mutationSize += logValue.expectedSize();
logValue.param2 = addPrefix == StringRef() ? allKeys.end : strinc(addPrefix, tempArena);
result->push_back_deep(*arena, logValue);
*mutationSize += logValue.expectedSize();
} else {
logValue.param1 = std::max(r.range().begin, range.begin);
logValue.param2 = minKey;
@ -333,8 +351,13 @@ void decodeBackupLogValue(Arena& arena,
logValue.param1 = logValue.param1.withPrefix(addPrefix, tempArena);
logValue.param2 = logValue.param2.withPrefix(addPrefix, tempArena);
}
result.push_back_deep(arena, logValue);
mutationSize += logValue.expectedSize();
result->push_back_deep(*arena, logValue);
*mutationSize += logValue.expectedSize();
}
if (originalLogValue.param1 == logValue.param1 && originalLogValue.param2 == logValue.param2) {
encryptedResult->push_back_deep(*arena, encryptedLogValue);
} else {
encryptedResult->push_back_deep(*arena, Optional<MutationRef>());
}
}
}
@ -348,8 +371,15 @@ void decodeBackupLogValue(Arena& arena,
if (addPrefix.size()) {
logValue.param1 = logValue.param1.withPrefix(addPrefix, tempArena);
}
result.push_back_deep(arena, logValue);
mutationSize += logValue.expectedSize();
result->push_back_deep(*arena, logValue);
*mutationSize += logValue.expectedSize();
// If we did not remove/add prefixes to the mutation then keep the original encrypted mutation so we
// do not have to re-encrypt unnecessarily
if (originalLogValue.param1 == logValue.param1 && originalLogValue.param2 == logValue.param2) {
encryptedResult->push_back_deep(*arena, encryptedLogValue);
} else {
encryptedResult->push_back_deep(*arena, Optional<MutationRef>());
}
}
}
@ -374,6 +404,7 @@ void decodeBackupLogValue(Arena& arena,
.detail("Value", value);
throw;
}
return Void();
}
static double lastErrorTime = 0;
@ -614,21 +645,24 @@ ACTOR Future<int> dumpData(Database cx,
state int mutationSize = 0;
loop {
try {
RCGroup group = waitNext(results.getFuture());
state RCGroup group = waitNext(results.getFuture());
lock->release(group.items.expectedSize());
BinaryWriter bw(Unversioned());
for (int i = 0; i < group.items.size(); ++i) {
bw.serializeBytes(group.items[i].value);
}
decodeBackupLogValue(req.arena,
req.transaction.mutations,
mutationSize,
bw.toValue(),
addPrefix,
removePrefix,
group.groupKey,
keyVersion);
Standalone<StringRef> value = bw.toValue();
wait(decodeBackupLogValue(&req.arena,
&req.transaction.mutations,
&req.transaction.encryptedMutations,
&mutationSize,
value,
addPrefix,
removePrefix,
group.groupKey,
keyVersion,
cx));
newBeginVersion = group.groupKey + 1;
if (mutationSize >= CLIENT_KNOBS->BACKUP_LOG_WRITE_BATCH_MAX_SIZE) {
break;
@ -652,8 +686,10 @@ ACTOR Future<int> dumpData(Database cx,
Key rangeEnd = getApplyKey(newBeginVersion, uid);
req.transaction.mutations.push_back_deep(req.arena, MutationRef(MutationRef::SetValue, applyBegin, versionKey));
req.transaction.encryptedMutations.push_back_deep(req.arena, Optional<MutationRef>());
req.transaction.write_conflict_ranges.push_back_deep(req.arena, singleKeyRange(applyBegin));
req.transaction.mutations.push_back_deep(req.arena, MutationRef(MutationRef::ClearRange, rangeBegin, rangeEnd));
req.transaction.encryptedMutations.push_back_deep(req.arena, Optional<MutationRef>());
req.transaction.write_conflict_ranges.push_back_deep(req.arena, singleKeyRange(rangeBegin));
// The commit request contains no read conflict ranges, so regardless of what read version we
@ -968,10 +1004,9 @@ ACTOR Future<Void> cleanupLogMutations(Database cx, Value destUidValue, bool del
.get(BackupAgentBase::keySourceStates)
.get(currLogUid)
.pack(DatabaseBackupAgent::keyStateStatus));
state Future<Optional<Value>> foundBackupKey =
tr->get(Subspace(currLogUid.withPrefix(LiteralStringRef("uid->config/"))
.withPrefix(fileBackupPrefixRange.begin))
.pack(LiteralStringRef("stateEnum")));
state Future<Optional<Value>> foundBackupKey = tr->get(
Subspace(currLogUid.withPrefix("uid->config/"_sr).withPrefix(fileBackupPrefixRange.begin))
.pack("stateEnum"_sr));
wait(success(foundDRKey) && success(foundBackupKey));
if (foundDRKey.get().present() && foundBackupKey.get().present()) {
@ -1165,3 +1200,38 @@ Standalone<StringRef> BackupAgentBase::getCurrentTime() {
}
std::string const BackupAgentBase::defaultTagName = "default";
void addDefaultBackupRanges(Standalone<VectorRef<KeyRangeRef>>& backupKeys) {
backupKeys.push_back_deep(backupKeys.arena(), normalKeys);
for (auto& r : getSystemBackupRanges()) {
backupKeys.push_back_deep(backupKeys.arena(), r);
}
}
VectorRef<KeyRangeRef> const& getSystemBackupRanges() {
static Standalone<VectorRef<KeyRangeRef>> systemBackupRanges;
if (systemBackupRanges.empty()) {
systemBackupRanges.push_back_deep(systemBackupRanges.arena(), prefixRange(TenantMetadata::subspace()));
systemBackupRanges.push_back_deep(systemBackupRanges.arena(),
singleKeyRange(MetaclusterMetadata::metaclusterRegistration().key));
}
return systemBackupRanges;
}
KeyRangeMap<bool> const& systemBackupMutationMask() {
static KeyRangeMap<bool> mask;
if (mask.size() == 1) {
for (auto r : getSystemBackupRanges()) {
mask.insert(r, true);
}
}
return mask;
}
KeyRangeRef const& getDefaultBackupSharedRange() {
static KeyRangeRef defaultSharedRange(""_sr, ""_sr);
return defaultSharedRange;
}

View File

@ -906,6 +906,7 @@ public:
ACTOR static Future<Optional<RestorableFileSet>> getRestoreSet(Reference<BackupContainerFileSystem> bc,
Version targetVersion,
VectorRef<KeyRangeRef> keyRangesFilter,
Optional<Database> cx,
bool logsOnly = false,
Version beginVersion = invalidVersion) {
for (const auto& range : keyRangesFilter) {
@ -982,7 +983,7 @@ public:
restorable.ranges.end(),
[file = rit->first](const RangeFile f) { return f.fileName == file; });
ASSERT(it != restorable.ranges.end());
KeyRange result = wait(bc->getSnapshotFileKeyRange(*it));
KeyRange result = wait(bc->getSnapshotFileKeyRange(*it, cx));
ASSERT(rit->second.begin <= result.begin && rit->second.end >= result.end);
}
}
@ -1349,7 +1350,9 @@ Future<Void> BackupContainerFileSystem::expireData(Version expireEndVersion,
Reference<BackupContainerFileSystem>::addRef(this), expireEndVersion, force, progress, restorableBeginVersion);
}
ACTOR static Future<KeyRange> getSnapshotFileKeyRange_impl(Reference<BackupContainerFileSystem> bc, RangeFile file) {
ACTOR static Future<KeyRange> getSnapshotFileKeyRange_impl(Reference<BackupContainerFileSystem> bc,
RangeFile file,
Optional<Database> cx) {
state int readFileRetries = 0;
state bool beginKeySet = false;
state Key beginKey;
@ -1361,7 +1364,8 @@ ACTOR static Future<KeyRange> getSnapshotFileKeyRange_impl(Reference<BackupConta
state int64_t j = 0;
for (; j < file.fileSize; j += file.blockSize) {
int64_t len = std::min<int64_t>(file.blockSize, file.fileSize - j);
Standalone<VectorRef<KeyValueRef>> blockData = wait(fileBackup::decodeRangeFileBlock(inFile, j, len));
Standalone<VectorRef<KeyValueRef>> blockData =
wait(fileBackup::decodeRangeFileBlock(inFile, j, len, cx));
if (!beginKeySet) {
beginKey = blockData.front().key;
beginKeySet = true;
@ -1434,17 +1438,18 @@ ACTOR static Future<Optional<Version>> readVersionProperty(Reference<BackupConta
}
}
Future<KeyRange> BackupContainerFileSystem::getSnapshotFileKeyRange(const RangeFile& file) {
Future<KeyRange> BackupContainerFileSystem::getSnapshotFileKeyRange(const RangeFile& file, Optional<Database> cx) {
ASSERT(g_network->isSimulated());
return getSnapshotFileKeyRange_impl(Reference<BackupContainerFileSystem>::addRef(this), file);
return getSnapshotFileKeyRange_impl(Reference<BackupContainerFileSystem>::addRef(this), file, cx);
}
Future<Optional<RestorableFileSet>> BackupContainerFileSystem::getRestoreSet(Version targetVersion,
Optional<Database> cx,
VectorRef<KeyRangeRef> keyRangesFilter,
bool logsOnly,
Version beginVersion) {
return BackupContainerFileSystemImpl::getRestoreSet(
Reference<BackupContainerFileSystem>::addRef(this), targetVersion, keyRangesFilter, logsOnly, beginVersion);
Reference<BackupContainerFileSystem>::addRef(this), targetVersion, keyRangesFilter, cx, logsOnly, beginVersion);
}
Future<Optional<Version>> BackupContainerFileSystem::VersionProperty::get() {
@ -1666,7 +1671,8 @@ ACTOR static Future<Void> testWriteSnapshotFile(Reference<IBackupFile> file, Key
ACTOR Future<Void> testBackupContainer(std::string url,
Optional<std::string> proxy,
Optional<std::string> encryptionKeyFileName) {
Optional<std::string> encryptionKeyFileName,
Optional<Database> cx) {
state FlowLock lock(100e6);
if (encryptionKeyFileName.present()) {
@ -1697,7 +1703,7 @@ ACTOR Future<Void> testBackupContainer(std::string url,
// List of sizes to use to test edge cases on underlying file implementations
state std::vector<int> fileSizes = { 0 };
if (StringRef(url).startsWith(LiteralStringRef("blob"))) {
if (StringRef(url).startsWith("blob"_sr)) {
fileSizes.push_back(CLIENT_KNOBS->BLOBSTORE_MULTIPART_MIN_PART_SIZE);
fileSizes.push_back(CLIENT_KNOBS->BLOBSTORE_MULTIPART_MIN_PART_SIZE + 10);
}
@ -1705,8 +1711,8 @@ ACTOR Future<Void> testBackupContainer(std::string url,
loop {
state Version logStart = v;
state int kvfiles = deterministicRandom()->randomInt(0, 3);
state Key begin = LiteralStringRef("");
state Key end = LiteralStringRef("");
state Key begin = ""_sr;
state Key end = ""_sr;
state int blockSize = 3 * sizeof(uint32_t) + begin.size() + end.size() + 8;
while (kvfiles > 0) {
@ -1773,13 +1779,13 @@ ACTOR Future<Void> testBackupContainer(std::string url,
for (; i < listing.snapshots.size(); ++i) {
{
// Ensure we can still restore to the latest version
Optional<RestorableFileSet> rest = wait(c->getRestoreSet(desc.maxRestorableVersion.get()));
Optional<RestorableFileSet> rest = wait(c->getRestoreSet(desc.maxRestorableVersion.get(), cx));
ASSERT(rest.present());
}
{
// Ensure we can restore to the end version of snapshot i
Optional<RestorableFileSet> rest = wait(c->getRestoreSet(listing.snapshots[i].endVersion));
Optional<RestorableFileSet> rest = wait(c->getRestoreSet(listing.snapshots[i].endVersion, cx));
ASSERT(rest.present());
}
@ -1820,14 +1826,16 @@ ACTOR Future<Void> testBackupContainer(std::string url,
}
TEST_CASE("/backup/containers/localdir/unencrypted") {
wait(testBackupContainer(format("file://%s/fdb_backups/%llx", params.getDataDir().c_str(), timer_int()), {}, {}));
wait(testBackupContainer(
format("file://%s/fdb_backups/%llx", params.getDataDir().c_str(), timer_int()), {}, {}, {}));
return Void();
}
TEST_CASE("/backup/containers/localdir/encrypted") {
wait(testBackupContainer(format("file://%s/fdb_backups/%llx", params.getDataDir().c_str(), timer_int()),
{},
format("%s/test_encryption_key", params.getDataDir().c_str())));
format("%s/test_encryption_key", params.getDataDir().c_str()),
{}));
return Void();
}
@ -1835,7 +1843,7 @@ TEST_CASE("/backup/containers/url") {
if (!g_network->isSimulated()) {
const char* url = getenv("FDB_TEST_BACKUP_URL");
ASSERT(url != nullptr);
wait(testBackupContainer(url, {}, {}));
wait(testBackupContainer(url, {}, {}, {}));
}
return Void();
}

View File

@ -103,16 +103,15 @@ ACTOR static Future<BackupContainerFileSystem::FilesAndSizesT> listFiles_impl(st
// Remove .lnk files from results, they are a side effect of a backup that was *read* during simulation. See
// openFile() above for more info on why they are created.
if (g_network->isSimulated())
files.erase(
std::remove_if(files.begin(),
files.end(),
[](std::string const& f) { return StringRef(f).endsWith(LiteralStringRef(".lnk")); }),
files.end());
files.erase(std::remove_if(files.begin(),
files.end(),
[](std::string const& f) { return StringRef(f).endsWith(".lnk"_sr); }),
files.end());
for (const auto& f : files) {
// Hide .part or .temp files.
StringRef s(f);
if (!s.endsWith(LiteralStringRef(".part")) && !s.endsWith(LiteralStringRef(".temp")))
if (!s.endsWith(".part"_sr) && !s.endsWith(".temp"_sr))
results.push_back({ f.substr(m_path.size() + 1), ::fileSize(f) });
}

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,9 @@
#include "flow/Arena.h"
#include "flow/CompressionUtils.h"
#include "flow/DeterministicRandom.h"
#include "flow/EncryptUtils.h"
#include "flow/IRandom.h"
#include "flow/Knobs.h"
#include "flow/Trace.h"
#include "flow/serialize.h"
#include "flow/UnitTest.h"
@ -60,21 +62,6 @@ uint16_t MIN_SUPPORTED_BG_FORMAT_VERSION = 1;
const uint8_t SNAPSHOT_FILE_TYPE = 'S';
const uint8_t DELTA_FILE_TYPE = 'D';
static int getDefaultCompressionLevel(CompressionFilter filter) {
if (filter == CompressionFilter::NONE) {
return -1;
#ifdef ZLIB_LIB_SUPPORTED
} else if (filter == CompressionFilter::GZIP) {
// opt for high speed compression, larger levels have a high cpu cost and not much compression ratio
// improvement, according to benchmarks
return 1;
#endif
} else {
ASSERT(false);
return -1;
}
}
// Deltas in key order
// For key-ordered delta files, the format for both sets and range clears is that you store boundaries ordered by key.
@ -300,12 +287,13 @@ struct IndexBlockRef {
TraceEvent(SevDebug, "IndexBlockEncrypt_Before").detail("Chksum", chksum);
}
EncryptBlobCipherAes265Ctr encryptor(eKeys.textCipherKey,
eKeys.headerCipherKey,
cipherKeysCtx.ivRef.begin(),
AES_256_IV_LENGTH,
ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE,
BlobCipherMetrics::BLOB_GRANULE);
EncryptBlobCipherAes265Ctr encryptor(
eKeys.textCipherKey,
eKeys.headerCipherKey,
cipherKeysCtx.ivRef.begin(),
AES_256_IV_LENGTH,
getEncryptAuthTokenMode(EncryptAuthTokenMode::ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE),
BlobCipherMetrics::BLOB_GRANULE);
Value serializedBuff = ObjectWriter::toValue(block, IncludeVersion(ProtocolVersion::withBlobGranuleFile()));
BlobCipherEncryptHeader header;
buffer = encryptor.encrypt(serializedBuff.contents().begin(), serializedBuff.contents().size(), &header, arena)
@ -423,12 +411,13 @@ struct IndexBlobGranuleFileChunkRef {
TraceEvent(SevDebug, "BlobChunkEncrypt_Before").detail("Chksum", chksum);
}
EncryptBlobCipherAes265Ctr encryptor(eKeys.textCipherKey,
eKeys.headerCipherKey,
cipherKeysCtx.ivRef.begin(),
AES_256_IV_LENGTH,
ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE,
BlobCipherMetrics::BLOB_GRANULE);
EncryptBlobCipherAes265Ctr encryptor(
eKeys.textCipherKey,
eKeys.headerCipherKey,
cipherKeysCtx.ivRef.begin(),
AES_256_IV_LENGTH,
getEncryptAuthTokenMode(EncryptAuthTokenMode::ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE),
BlobCipherMetrics::BLOB_GRANULE);
BlobCipherEncryptHeader header;
chunkRef.buffer =
encryptor.encrypt(chunkRef.buffer.begin(), chunkRef.buffer.size(), &header, arena)->toStringRef();
@ -475,8 +464,10 @@ struct IndexBlobGranuleFileChunkRef {
const CompressionFilter compFilter,
Arena& arena) {
chunkRef.compressionFilter = compFilter;
chunkRef.buffer = CompressionUtils::compress(
chunkRef.compressionFilter.get(), chunk.contents(), getDefaultCompressionLevel(compFilter), arena);
chunkRef.buffer = CompressionUtils::compress(chunkRef.compressionFilter.get(),
chunk.contents(),
CompressionUtils::getDefaultCompressionLevel(compFilter),
arena);
if (BG_ENCRYPT_COMPRESS_DEBUG) {
XXH64_hash_t chunkChksum = XXH3_64bits(chunk.contents().begin(), chunk.contents().size());
@ -1558,7 +1549,8 @@ ErrorOr<RangeResult> loadAndMaterializeBlobGranules(const Standalone<VectorRef<B
const KeyRangeRef& keyRange,
Version beginVersion,
Version readVersion,
ReadBlobGranuleContext granuleContext) {
ReadBlobGranuleContext granuleContext,
GranuleMaterializeStats& stats) {
int64_t parallelism = granuleContext.granuleParallelism;
if (parallelism < 1) {
parallelism = 1;
@ -1568,6 +1560,8 @@ ErrorOr<RangeResult> loadAndMaterializeBlobGranules(const Standalone<VectorRef<B
}
GranuleLoadIds loadIds[files.size()];
int64_t inputBytes = 0;
int64_t outputBytes = 0;
try {
// Kick off first file reads if parallelism > 1
@ -1592,6 +1586,7 @@ ErrorOr<RangeResult> loadAndMaterializeBlobGranules(const Standalone<VectorRef<B
if (!snapshotData.get().begin()) {
return ErrorOr<RangeResult>(blob_granule_file_load_error());
}
inputBytes += snapshotData.get().size();
}
// +1 to avoid UBSAN variable length array of size zero
@ -1604,18 +1599,25 @@ ErrorOr<RangeResult> loadAndMaterializeBlobGranules(const Standalone<VectorRef<B
if (!deltaData[i].begin()) {
return ErrorOr<RangeResult>(blob_granule_file_load_error());
}
inputBytes += deltaData[i].size();
}
inputBytes += files[chunkIdx].newDeltas.expectedSize();
// materialize rows from chunk
chunkRows =
materializeBlobGranule(files[chunkIdx], keyRange, beginVersion, readVersion, snapshotData, deltaData);
outputBytes += chunkRows.expectedSize();
results.arena().dependsOn(chunkRows.arena());
results.append(results.arena(), chunkRows.begin(), chunkRows.size());
// free once done by forcing FreeHandles to trigger
loadIds[chunkIdx].freeHandles.clear();
}
stats.inputBytes = inputBytes;
stats.outputBytes = outputBytes;
return ErrorOr<RangeResult>(results);
} catch (Error& e) {
return ErrorOr<RangeResult>(e);
@ -1980,7 +1982,7 @@ struct KeyValueGen {
sharedPrefix = sharedPrefix.substr(0, sharedPrefixLen) + "_";
targetValueLength = deterministicRandom()->randomExp(0, 12);
allRange = KeyRangeRef(StringRef(sharedPrefix),
sharedPrefix.size() == 0 ? LiteralStringRef("\xff") : strinc(StringRef(sharedPrefix)));
sharedPrefix.size() == 0 ? "\xff"_sr : strinc(StringRef(sharedPrefix)));
if (deterministicRandom()->coinflip()) {
clearFrequency = 0.0;
@ -2015,11 +2017,7 @@ struct KeyValueGen {
cipherKeys = getCipherKeysCtx(ar);
}
if (deterministicRandom()->coinflip()) {
#ifdef ZLIB_LIB_SUPPORTED
compressFilter = CompressionFilter::GZIP;
#else
compressFilter = CompressionFilter::NONE;
#endif
compressFilter = CompressionUtils::getRandomFilter();
}
}
@ -2199,10 +2197,8 @@ TEST_CASE("/blobgranule/files/validateEncryptionCompression") {
BlobGranuleCipherKeysCtx cipherKeys = getCipherKeysCtx(ar);
std::vector<bool> encryptionModes = { false, true };
std::vector<Optional<CompressionFilter>> compressionModes;
compressionModes.push_back({});
#ifdef ZLIB_LIB_SUPPORTED
compressionModes.push_back(CompressionFilter::GZIP);
#endif
compressionModes.insert(
compressionModes.end(), CompressionUtils::supportedFilters.begin(), CompressionUtils::supportedFilters.end());
std::vector<Value> snapshotValues;
for (bool encryptionMode : encryptionModes) {
@ -2299,9 +2295,9 @@ TEST_CASE("/blobgranule/files/snapshotFormatUnitTest") {
}
checkSnapshotEmpty(serialized, normalKeys.begin, data.front().key, kvGen.cipherKeys);
checkSnapshotEmpty(serialized, normalKeys.begin, LiteralStringRef("\x00"), kvGen.cipherKeys);
checkSnapshotEmpty(serialized, normalKeys.begin, "\x00"_sr, kvGen.cipherKeys);
checkSnapshotEmpty(serialized, keyAfter(data.back().key), normalKeys.end, kvGen.cipherKeys);
checkSnapshotEmpty(serialized, LiteralStringRef("\xfe"), normalKeys.end, kvGen.cipherKeys);
checkSnapshotEmpty(serialized, "\xfe"_sr, normalKeys.end, kvGen.cipherKeys);
fmt::print("Snapshot format test done!\n");
@ -2916,9 +2912,8 @@ TEST_CASE("!/blobgranule/files/benchFromFiles") {
std::vector<bool> encryptionModes = { false, true };
std::vector<Optional<CompressionFilter>> compressionModes;
compressionModes.push_back({});
#ifdef ZLIB_LIB_SUPPORTED
compressionModes.push_back(CompressionFilter::GZIP);
#endif
compressionModes.insert(
compressionModes.end(), CompressionUtils::supportedFilters.begin(), CompressionUtils::supportedFilters.end());
std::vector<std::string> runNames = { "logical" };
std::vector<std::pair<int64_t, double>> snapshotMetrics;
@ -2948,6 +2943,10 @@ TEST_CASE("!/blobgranule/files/benchFromFiles") {
if (!chunk && compressionFilter.present()) {
continue;
}
if (compressionFilter.present() && CompressionFilter::NONE == compressionFilter.get()) {
continue;
}
std::string name;
if (!chunk) {
name = "old";
@ -3020,11 +3019,15 @@ TEST_CASE("!/blobgranule/files/benchFromFiles") {
if (!chunk && encrypt) {
continue;
}
Optional<BlobGranuleCipherKeysCtx> keys = encrypt ? cipherKeys : Optional<BlobGranuleCipherKeysCtx>();
for (auto& compressionFilter : compressionModes) {
if (!chunk && compressionFilter.present()) {
continue;
}
if (compressionFilter.present() && CompressionFilter::NONE == compressionFilter.get()) {
continue;
}
std::string name;
if (!chunk) {
name = "old";

View File

@ -167,7 +167,7 @@ TEST_CASE("/fdbserver/blobgranule/isRangeCoveredByBlob") {
}
// check '' to \xff
{ ASSERT(isRangeFullyCovered(KeyRangeRef(LiteralStringRef(""), LiteralStringRef("\xff")), chunks) == false); }
{ ASSERT(isRangeFullyCovered(KeyRangeRef(""_sr, "\xff"_sr), chunks) == false); }
// check {key_a1, key_a9}
{ ASSERT(isRangeFullyCovered(KeyRangeRef("key_a1"_sr, "key_a9"_sr), chunks)); }

View File

@ -264,12 +264,13 @@ void ClientKnobs::initialize(Randomize randomize) {
init( MAX_TAGS_PER_TRANSACTION, 5 );
init( MAX_TRANSACTION_TAG_LENGTH, 16 );
init( COMMIT_SAMPLE_COST, 100 ); if( randomize && BUGGIFY ) COMMIT_SAMPLE_COST = 10;
init( WRITE_COST_BYTE_FACTOR, 16384 ); if( randomize && BUGGIFY ) WRITE_COST_BYTE_FACTOR = 4096;
init( INCOMPLETE_SHARD_PLUS, 4096 );
init( READ_TAG_SAMPLE_RATE, 0.01 ); if( randomize && BUGGIFY ) READ_TAG_SAMPLE_RATE = 1.0; // Communicated to clients from cluster
init( TAG_THROTTLE_SMOOTHING_WINDOW, 2.0 );
init( TAG_THROTTLE_RECHECK_INTERVAL, 5.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_RECHECK_INTERVAL = 0.0;
init( TAG_THROTTLE_EXPIRATION_INTERVAL, 60.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_EXPIRATION_INTERVAL = 1.0;
init( WRITE_COST_BYTE_FACTOR, 16384 ); if( randomize && BUGGIFY ) WRITE_COST_BYTE_FACTOR = 4096;
init( READ_COST_BYTE_FACTOR, 16384 ); if( randomize && BUGGIFY ) READ_COST_BYTE_FACTOR = 4096;
// busyness reporting
init( BUSYNESS_SPIKE_START_THRESHOLD, 0.100 );

View File

@ -37,11 +37,11 @@
#include "flow/actorcompiler.h" // has to be last include
const Key DatabaseBackupAgent::keyAddPrefix = LiteralStringRef("add_prefix");
const Key DatabaseBackupAgent::keyRemovePrefix = LiteralStringRef("remove_prefix");
const Key DatabaseBackupAgent::keyRangeVersions = LiteralStringRef("range_versions");
const Key DatabaseBackupAgent::keyCopyStop = LiteralStringRef("copy_stop");
const Key DatabaseBackupAgent::keyDatabasesInSync = LiteralStringRef("databases_in_sync");
const Key DatabaseBackupAgent::keyAddPrefix = "add_prefix"_sr;
const Key DatabaseBackupAgent::keyRemovePrefix = "remove_prefix"_sr;
const Key DatabaseBackupAgent::keyRangeVersions = "range_versions"_sr;
const Key DatabaseBackupAgent::keyCopyStop = "copy_stop"_sr;
const Key DatabaseBackupAgent::keyDatabasesInSync = "databases_in_sync"_sr;
const int DatabaseBackupAgent::LATEST_DR_VERSION = 1;
DatabaseBackupAgent::DatabaseBackupAgent()
@ -75,8 +75,7 @@ DatabaseBackupAgent::DatabaseBackupAgent(Database src)
class DRConfig {
public:
DRConfig(UID uid = UID())
: uid(uid),
configSpace(uidPrefixKey(LiteralStringRef("uid->config/").withPrefix(databaseBackupPrefixRange.begin), uid)) {}
: uid(uid), configSpace(uidPrefixKey("uid->config/"_sr.withPrefix(databaseBackupPrefixRange.begin), uid)) {}
DRConfig(Reference<Task> task)
: DRConfig(BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())) {}
@ -203,7 +202,7 @@ struct BackupRangeTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
ACTOR static Future<Void> _execute(Database cx,
@ -405,10 +404,10 @@ struct BackupRangeTaskFunc : TaskFuncBase {
break;
if (backupVersions.get()[versionLoc + 1].key ==
(removePrefix == StringRef() ? normalKeys.end : strinc(removePrefix))) {
(removePrefix == StringRef() ? allKeys.end : strinc(removePrefix))) {
tr->clear(KeyRangeRef(
backupVersions.get()[versionLoc].key.removePrefix(removePrefix).withPrefix(addPrefix),
addPrefix == StringRef() ? normalKeys.end : strinc(addPrefix)));
addPrefix == StringRef() ? allKeys.end : strinc(addPrefix)));
} else {
tr->clear(KeyRangeRef(backupVersions.get()[versionLoc].key,
backupVersions.get()[versionLoc + 1].key)
@ -536,9 +535,9 @@ struct BackupRangeTaskFunc : TaskFuncBase {
return Void();
}
};
StringRef BackupRangeTaskFunc::name = LiteralStringRef("dr_backup_range");
const Key BackupRangeTaskFunc::keyAddBackupRangeTasks = LiteralStringRef("addBackupRangeTasks");
const Key BackupRangeTaskFunc::keyBackupRangeBeginKey = LiteralStringRef("backupRangeBeginKey");
StringRef BackupRangeTaskFunc::name = "dr_backup_range"_sr;
const Key BackupRangeTaskFunc::keyAddBackupRangeTasks = "addBackupRangeTasks"_sr;
const Key BackupRangeTaskFunc::keyBackupRangeBeginKey = "backupRangeBeginKey"_sr;
REGISTER_TASKFUNC(BackupRangeTaskFunc);
struct FinishFullBackupTaskFunc : TaskFuncBase {
@ -588,7 +587,7 @@ struct FinishFullBackupTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
StringRef getName() const override { return name; };
@ -606,7 +605,7 @@ struct FinishFullBackupTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef FinishFullBackupTaskFunc::name = LiteralStringRef("dr_finish_full_backup");
StringRef FinishFullBackupTaskFunc::name = "dr_finish_full_backup"_sr;
REGISTER_TASKFUNC(FinishFullBackupTaskFunc);
struct EraseLogRangeTaskFunc : TaskFuncBase {
@ -683,7 +682,7 @@ struct EraseLogRangeTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr,
@ -697,7 +696,7 @@ struct EraseLogRangeTaskFunc : TaskFuncBase {
return Void();
}
};
StringRef EraseLogRangeTaskFunc::name = LiteralStringRef("dr_erase_log_range");
StringRef EraseLogRangeTaskFunc::name = "dr_erase_log_range"_sr;
REGISTER_TASKFUNC(EraseLogRangeTaskFunc);
struct CopyLogRangeTaskFunc : TaskFuncBase {
@ -958,7 +957,7 @@ struct CopyLogRangeTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr,
@ -989,8 +988,8 @@ struct CopyLogRangeTaskFunc : TaskFuncBase {
return Void();
}
};
StringRef CopyLogRangeTaskFunc::name = LiteralStringRef("dr_copy_log_range");
const Key CopyLogRangeTaskFunc::keyNextBeginVersion = LiteralStringRef("nextBeginVersion");
StringRef CopyLogRangeTaskFunc::name = "dr_copy_log_range"_sr;
const Key CopyLogRangeTaskFunc::keyNextBeginVersion = "nextBeginVersion"_sr;
REGISTER_TASKFUNC(CopyLogRangeTaskFunc);
struct CopyLogsTaskFunc : TaskFuncBase {
@ -1125,7 +1124,7 @@ struct CopyLogsTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
StringRef getName() const override { return name; };
@ -1143,7 +1142,7 @@ struct CopyLogsTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef CopyLogsTaskFunc::name = LiteralStringRef("dr_copy_logs");
StringRef CopyLogsTaskFunc::name = "dr_copy_logs"_sr;
REGISTER_TASKFUNC(CopyLogsTaskFunc);
struct FinishedFullBackupTaskFunc : TaskFuncBase {
@ -1235,7 +1234,7 @@ struct FinishedFullBackupTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr,
@ -1283,8 +1282,8 @@ struct FinishedFullBackupTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef FinishedFullBackupTaskFunc::name = LiteralStringRef("dr_finished_full_backup");
const Key FinishedFullBackupTaskFunc::keyInsertTask = LiteralStringRef("insertTask");
StringRef FinishedFullBackupTaskFunc::name = "dr_finished_full_backup"_sr;
const Key FinishedFullBackupTaskFunc::keyInsertTask = "insertTask"_sr;
REGISTER_TASKFUNC(FinishedFullBackupTaskFunc);
struct CopyDiffLogsTaskFunc : TaskFuncBase {
@ -1396,7 +1395,7 @@ struct CopyDiffLogsTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
StringRef getName() const override { return name; };
@ -1414,7 +1413,7 @@ struct CopyDiffLogsTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef CopyDiffLogsTaskFunc::name = LiteralStringRef("dr_copy_diff_logs");
StringRef CopyDiffLogsTaskFunc::name = "dr_copy_diff_logs"_sr;
REGISTER_TASKFUNC(CopyDiffLogsTaskFunc);
// Skip unneeded EraseLogRangeTaskFunc in 5.1
@ -1446,7 +1445,7 @@ struct SkipOldEraseLogRangeTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef SkipOldEraseLogRangeTaskFunc::name = LiteralStringRef("dr_skip_legacy_task");
StringRef SkipOldEraseLogRangeTaskFunc::name = "dr_skip_legacy_task"_sr;
REGISTER_TASKFUNC(SkipOldEraseLogRangeTaskFunc);
REGISTER_TASKFUNC_ALIAS(SkipOldEraseLogRangeTaskFunc, db_erase_log_range);
@ -1652,7 +1651,7 @@ struct OldCopyLogRangeTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr,
@ -1683,8 +1682,8 @@ struct OldCopyLogRangeTaskFunc : TaskFuncBase {
return Void();
}
};
StringRef OldCopyLogRangeTaskFunc::name = LiteralStringRef("db_copy_log_range");
const Key OldCopyLogRangeTaskFunc::keyNextBeginVersion = LiteralStringRef("nextBeginVersion");
StringRef OldCopyLogRangeTaskFunc::name = "db_copy_log_range"_sr;
const Key OldCopyLogRangeTaskFunc::keyNextBeginVersion = "nextBeginVersion"_sr;
REGISTER_TASKFUNC(OldCopyLogRangeTaskFunc);
struct AbortOldBackupTaskFunc : TaskFuncBase {
@ -1753,7 +1752,7 @@ struct AbortOldBackupTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
StringRef getName() const override { return name; };
@ -1771,7 +1770,7 @@ struct AbortOldBackupTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef AbortOldBackupTaskFunc::name = LiteralStringRef("dr_abort_legacy_backup");
StringRef AbortOldBackupTaskFunc::name = "dr_abort_legacy_backup"_sr;
REGISTER_TASKFUNC(AbortOldBackupTaskFunc);
REGISTER_TASKFUNC_ALIAS(AbortOldBackupTaskFunc, db_backup_range);
REGISTER_TASKFUNC_ALIAS(AbortOldBackupTaskFunc, db_finish_full_backup);
@ -1834,13 +1833,16 @@ struct CopyDiffLogsUpgradeTaskFunc : TaskFuncBase {
return Void();
}
if (backupRanges.size() == 1) {
if (backupRanges.size() == 1 || isDefaultBackup(backupRanges)) {
RangeResult existingDestUidValues = wait(srcTr->getRange(
KeyRangeRef(destUidLookupPrefix, strinc(destUidLookupPrefix)), CLIENT_KNOBS->TOO_MANY));
bool found = false;
KeyRangeRef targetRange =
(backupRanges.size() == 1) ? backupRanges[0] : getDefaultBackupSharedRange();
for (auto it : existingDestUidValues) {
if (BinaryReader::fromStringRef<KeyRange>(it.key.removePrefix(destUidLookupPrefix),
IncludeVersion()) == backupRanges[0]) {
KeyRange uidRange = BinaryReader::fromStringRef<KeyRange>(
it.key.removePrefix(destUidLookupPrefix), IncludeVersion());
if (uidRange == targetRange) {
if (destUidValue != it.value) {
// existing backup/DR is running
return Void();
@ -1856,7 +1858,7 @@ struct CopyDiffLogsUpgradeTaskFunc : TaskFuncBase {
}
srcTr->set(
BinaryWriter::toValue(backupRanges[0], IncludeVersion(ProtocolVersion::withSharedMutations()))
BinaryWriter::toValue(targetRange, IncludeVersion(ProtocolVersion::withSharedMutations()))
.withPrefix(destUidLookupPrefix),
destUidValue);
}
@ -1918,7 +1920,7 @@ struct CopyDiffLogsUpgradeTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef CopyDiffLogsUpgradeTaskFunc::name = LiteralStringRef("db_copy_diff_logs");
StringRef CopyDiffLogsUpgradeTaskFunc::name = "db_copy_diff_logs"_sr;
REGISTER_TASKFUNC(CopyDiffLogsUpgradeTaskFunc);
struct BackupRestorableTaskFunc : TaskFuncBase {
@ -2031,7 +2033,7 @@ struct BackupRestorableTaskFunc : TaskFuncBase {
task,
parentTask->params[Task::reservedTaskParamValidKey],
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
StringRef getName() const override { return name; };
@ -2049,7 +2051,7 @@ struct BackupRestorableTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef BackupRestorableTaskFunc::name = LiteralStringRef("dr_backup_restorable");
StringRef BackupRestorableTaskFunc::name = "dr_backup_restorable"_sr;
REGISTER_TASKFUNC(BackupRestorableTaskFunc);
struct StartFullBackupTaskFunc : TaskFuncBase {
@ -2078,24 +2080,29 @@ struct StartFullBackupTaskFunc : TaskFuncBase {
srcTr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
// Initialize destUid
if (backupRanges.size() == 1) {
if (backupRanges.size() == 1 || isDefaultBackup(backupRanges)) {
RangeResult existingDestUidValues = wait(srcTr->getRange(
KeyRangeRef(destUidLookupPrefix, strinc(destUidLookupPrefix)), CLIENT_KNOBS->TOO_MANY));
KeyRangeRef targetRange =
(backupRanges.size() == 1) ? backupRanges[0] : getDefaultBackupSharedRange();
bool found = false;
for (auto it : existingDestUidValues) {
if (BinaryReader::fromStringRef<KeyRange>(it.key.removePrefix(destUidLookupPrefix),
IncludeVersion()) == backupRanges[0]) {
KeyRange uidRange = BinaryReader::fromStringRef<KeyRange>(
it.key.removePrefix(destUidLookupPrefix), IncludeVersion());
if (uidRange == targetRange) {
destUidValue = it.value;
found = true;
CODE_PROBE(targetRange == getDefaultBackupSharedRange(),
"DR mutation sharing with default backup");
break;
}
}
if (!found) {
destUidValue = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
srcTr->set(BinaryWriter::toValue(backupRanges[0],
IncludeVersion(ProtocolVersion::withSharedMutations()))
.withPrefix(destUidLookupPrefix),
destUidValue);
srcTr->set(
BinaryWriter::toValue(targetRange, IncludeVersion(ProtocolVersion::withSharedMutations()))
.withPrefix(destUidLookupPrefix),
destUidValue);
}
}
@ -2281,7 +2288,7 @@ struct StartFullBackupTaskFunc : TaskFuncBase {
task->params[BackupAgentBase::keyConfigBackupRanges] = keyConfigBackupRanges;
task->params[BackupAgentBase::keyTagName] = tagName;
task->params[DatabaseBackupAgent::keyDatabasesInSync] =
backupAction == DatabaseBackupAgent::PreBackupAction::NONE ? LiteralStringRef("t") : LiteralStringRef("f");
backupAction == DatabaseBackupAgent::PreBackupAction::NONE ? "t"_sr : "f"_sr;
if (!waitFor) {
return taskBucket->addTask(tr,
@ -2301,7 +2308,7 @@ struct StartFullBackupTaskFunc : TaskFuncBase {
.get(logUid)
.pack(BackupAgentBase::keyFolderId),
task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
return "OnSetAddTask"_sr;
}
StringRef getName() const override { return name; };
@ -2319,7 +2326,7 @@ struct StartFullBackupTaskFunc : TaskFuncBase {
return _finish(tr, tb, fb, task);
};
};
StringRef StartFullBackupTaskFunc::name = LiteralStringRef("dr_start_full_backup");
StringRef StartFullBackupTaskFunc::name = "dr_start_full_backup"_sr;
REGISTER_TASKFUNC(StartFullBackupTaskFunc);
} // namespace dbBackup
@ -2625,7 +2632,7 @@ public:
int64_t startCount = 0;
state Key mapPrefix = logUidValue.withPrefix(applyMutationsKeyVersionMapRange.begin);
Key mapEnd = normalKeys.end.withPrefix(mapPrefix);
Key mapEnd = allKeys.end.withPrefix(mapPrefix);
tr->set(logUidValue.withPrefix(applyMutationsAddPrefixRange.begin), addPrefix);
tr->set(logUidValue.withPrefix(applyMutationsRemovePrefixRange.begin), removePrefix);
tr->set(logUidValue.withPrefix(applyMutationsKeyVersionCountRange.begin), StringRef((uint8_t*)&startCount, 8));
@ -3061,6 +3068,9 @@ public:
loop {
try {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
wait(success(tr->getReadVersion())); // get the read version before getting a version from the source
// database to prevent the time differential from going negative
@ -3072,9 +3082,6 @@ public:
state UID logUid = wait(backupAgent->getLogUid(tr, tagName));
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state Future<Optional<Value>> fPaused = tr->get(backupAgent->taskBucket->getPauseKey());
state Future<RangeResult> fErrorValues =
errorLimit > 0

View File

@ -560,38 +560,38 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
KeyRef ck = key.removePrefix(configKeysPrefix);
int type;
if (ck == LiteralStringRef("initialized")) {
if (ck == "initialized"_sr) {
initialized = true;
} else if (ck == LiteralStringRef("commit_proxies")) {
} else if (ck == "commit_proxies"_sr) {
commitProxyCount = toInt(value);
if (commitProxyCount == -1)
overwriteProxiesCount();
} else if (ck == LiteralStringRef("grv_proxies")) {
} else if (ck == "grv_proxies"_sr) {
grvProxyCount = toInt(value);
if (grvProxyCount == -1)
overwriteProxiesCount();
} else if (ck == LiteralStringRef("resolvers")) {
} else if (ck == "resolvers"_sr) {
parse(&resolverCount, value);
} else if (ck == LiteralStringRef("logs")) {
} else if (ck == "logs"_sr) {
parse(&desiredTLogCount, value);
} else if (ck == LiteralStringRef("log_replicas")) {
} else if (ck == "log_replicas"_sr) {
parse(&tLogReplicationFactor, value);
tLogWriteAntiQuorum = std::min(tLogWriteAntiQuorum, tLogReplicationFactor / 2);
} else if (ck == LiteralStringRef("log_anti_quorum")) {
} else if (ck == "log_anti_quorum"_sr) {
parse(&tLogWriteAntiQuorum, value);
if (tLogReplicationFactor > 0) {
tLogWriteAntiQuorum = std::min(tLogWriteAntiQuorum, tLogReplicationFactor / 2);
}
} else if (ck == LiteralStringRef("storage_replicas")) {
} else if (ck == "storage_replicas"_sr) {
parse(&storageTeamSize, value);
} else if (ck == LiteralStringRef("tss_count")) {
} else if (ck == "tss_count"_sr) {
parse(&desiredTSSCount, value);
} else if (ck == LiteralStringRef("log_version")) {
} else if (ck == "log_version"_sr) {
parse((&type), value);
type = std::max((int)TLogVersion::MIN_RECRUITABLE, type);
type = std::min((int)TLogVersion::MAX_SUPPORTED, type);
tLogVersion = (TLogVersion::Version)type;
} else if (ck == LiteralStringRef("log_engine")) {
} else if (ck == "log_engine"_sr) {
parse((&type), value);
tLogDataStoreType = (KeyValueStoreType::StoreType)type;
// TODO: Remove this once Redwood works as a log engine
@ -602,62 +602,62 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
if (tLogDataStoreType == KeyValueStoreType::MEMORY_RADIXTREE) {
tLogDataStoreType = KeyValueStoreType::SSD_BTREE_V2;
}
} else if (ck == LiteralStringRef("log_spill")) {
} else if (ck == "log_spill"_sr) {
parse((&type), value);
tLogSpillType = (TLogSpillType::SpillType)type;
} else if (ck == LiteralStringRef("storage_engine")) {
} else if (ck == "storage_engine"_sr) {
parse((&type), value);
storageServerStoreType = (KeyValueStoreType::StoreType)type;
} else if (ck == LiteralStringRef("tss_storage_engine")) {
} else if (ck == "tss_storage_engine"_sr) {
parse((&type), value);
testingStorageServerStoreType = (KeyValueStoreType::StoreType)type;
} else if (ck == LiteralStringRef("auto_commit_proxies")) {
} else if (ck == "auto_commit_proxies"_sr) {
parse(&autoCommitProxyCount, value);
} else if (ck == LiteralStringRef("auto_grv_proxies")) {
} else if (ck == "auto_grv_proxies"_sr) {
parse(&autoGrvProxyCount, value);
} else if (ck == LiteralStringRef("auto_resolvers")) {
} else if (ck == "auto_resolvers"_sr) {
parse(&autoResolverCount, value);
} else if (ck == LiteralStringRef("auto_logs")) {
} else if (ck == "auto_logs"_sr) {
parse(&autoDesiredTLogCount, value);
} else if (ck == LiteralStringRef("storage_replication_policy")) {
} else if (ck == "storage_replication_policy"_sr) {
parseReplicationPolicy(&storagePolicy, value);
} else if (ck == LiteralStringRef("log_replication_policy")) {
} else if (ck == "log_replication_policy"_sr) {
parseReplicationPolicy(&tLogPolicy, value);
} else if (ck == LiteralStringRef("log_routers")) {
} else if (ck == "log_routers"_sr) {
parse(&desiredLogRouterCount, value);
} else if (ck == LiteralStringRef("remote_logs")) {
} else if (ck == "remote_logs"_sr) {
parse(&remoteDesiredTLogCount, value);
} else if (ck == LiteralStringRef("remote_log_replicas")) {
} else if (ck == "remote_log_replicas"_sr) {
parse(&remoteTLogReplicationFactor, value);
} else if (ck == LiteralStringRef("remote_log_policy")) {
} else if (ck == "remote_log_policy"_sr) {
parseReplicationPolicy(&remoteTLogPolicy, value);
} else if (ck == LiteralStringRef("backup_worker_enabled")) {
} else if (ck == "backup_worker_enabled"_sr) {
parse((&type), value);
backupWorkerEnabled = (type != 0);
} else if (ck == LiteralStringRef("usable_regions")) {
} else if (ck == "usable_regions"_sr) {
parse(&usableRegions, value);
} else if (ck == LiteralStringRef("repopulate_anti_quorum")) {
} else if (ck == "repopulate_anti_quorum"_sr) {
parse(&repopulateRegionAntiQuorum, value);
} else if (ck == LiteralStringRef("regions")) {
} else if (ck == "regions"_sr) {
parse(&regions, value);
} else if (ck == LiteralStringRef("perpetual_storage_wiggle")) {
} else if (ck == "perpetual_storage_wiggle"_sr) {
parse(&perpetualStorageWiggleSpeed, value);
} else if (ck == LiteralStringRef("perpetual_storage_wiggle_locality")) {
} else if (ck == "perpetual_storage_wiggle_locality"_sr) {
if (!isValidPerpetualStorageWiggleLocality(value.toString())) {
return false;
}
perpetualStorageWiggleLocality = value.toString();
} else if (ck == LiteralStringRef("storage_migration_type")) {
} else if (ck == "storage_migration_type"_sr) {
parse((&type), value);
storageMigrationType = (StorageMigrationType::MigrationType)type;
} else if (ck == LiteralStringRef("tenant_mode")) {
} else if (ck == "tenant_mode"_sr) {
tenantMode = TenantMode::fromValue(value);
} else if (ck == LiteralStringRef("proxies")) {
} else if (ck == "proxies"_sr) {
overwriteProxiesCount();
} else if (ck == LiteralStringRef("blob_granules_enabled")) {
} else if (ck == "blob_granules_enabled"_sr) {
parse((&type), value);
blobGranulesEnabled = (type != 0);
} else if (ck == LiteralStringRef("encryption_at_rest_mode")) {
} else if (ck == "encryption_at_rest_mode"_sr) {
encryptionAtRestMode = EncryptionAtRestMode::fromValue(value);
} else {
return false;

File diff suppressed because it is too large Load Diff

View File

@ -28,14 +28,14 @@
#include "flow/actorcompiler.h" // This must be the last #include.
const KeyRef fdbClientInfoTxnSampleRate = LiteralStringRef("config/fdb_client_info/client_txn_sample_rate");
const KeyRef fdbClientInfoTxnSizeLimit = LiteralStringRef("config/fdb_client_info/client_txn_size_limit");
const KeyRef fdbClientInfoTxnSampleRate = "config/fdb_client_info/client_txn_sample_rate"_sr;
const KeyRef fdbClientInfoTxnSizeLimit = "config/fdb_client_info/client_txn_size_limit"_sr;
const KeyRef transactionTagSampleRate = LiteralStringRef("config/transaction_tag_sample_rate");
const KeyRef transactionTagSampleCost = LiteralStringRef("config/transaction_tag_sample_cost");
const KeyRef transactionTagSampleRate = "config/transaction_tag_sample_rate"_sr;
const KeyRef transactionTagSampleCost = "config/transaction_tag_sample_cost"_sr;
const KeyRef samplingFrequency = LiteralStringRef("visibility/sampling/frequency");
const KeyRef samplingWindow = LiteralStringRef("visibility/sampling/window");
const KeyRef samplingFrequency = "visibility/sampling/frequency"_sr;
const KeyRef samplingWindow = "visibility/sampling/window"_sr;
GlobalConfig::GlobalConfig(DatabaseContext* cx) : cx(cx), lastUpdate(0) {}
@ -62,7 +62,7 @@ void GlobalConfig::applyChanges(Transaction& tr,
// Write version key to trigger update in cluster controller.
tr.atomicOp(globalConfigVersionKey,
LiteralStringRef("0123456789\x00\x00\x00\x00"), // versionstamp
"0123456789\x00\x00\x00\x00"_sr, // versionstamp
MutationRef::SetVersionstampedValue);
}

View File

@ -246,7 +246,7 @@ static Future<Void> krmSetRangeCoalescing_(Transaction* tr,
// Determine how far to extend this range at the beginning
auto beginRange = keys[0].get();
bool hasBegin = beginRange.size() > 0 && beginRange[0].key.startsWith(mapPrefix);
Value beginValue = hasBegin ? beginRange[0].value : LiteralStringRef("");
Value beginValue = hasBegin ? beginRange[0].value : ""_sr;
state Key beginKey = withPrefix.begin;
if (beginValue == value) {
@ -259,7 +259,7 @@ static Future<Void> krmSetRangeCoalescing_(Transaction* tr,
bool hasEnd = endRange.size() >= 1 && endRange[0].key.startsWith(mapPrefix) && endRange[0].key <= withPrefix.end;
bool hasNext = (endRange.size() == 2 && endRange[1].key.startsWith(mapPrefix)) ||
(endRange.size() == 1 && withPrefix.end < endRange[0].key && endRange[0].key.startsWith(mapPrefix));
Value existingValue = hasEnd ? endRange[0].value : LiteralStringRef("");
Value existingValue = hasEnd ? endRange[0].value : ""_sr;
bool valueMatches = value == existingValue;
KeyRange conflictRange = KeyRangeRef(hasBegin ? beginRange[0].key : mapPrefix, withPrefix.begin);
@ -317,20 +317,20 @@ Future<Void> krmSetRangeCoalescing(Reference<ReadYourWritesTransaction> const& t
TEST_CASE("/keyrangemap/decoderange/aligned") {
Arena arena;
Key prefix = LiteralStringRef("/prefix/");
StringRef fullKeyA = StringRef(arena, LiteralStringRef("/prefix/a"));
StringRef fullKeyB = StringRef(arena, LiteralStringRef("/prefix/b"));
StringRef fullKeyC = StringRef(arena, LiteralStringRef("/prefix/c"));
StringRef fullKeyD = StringRef(arena, LiteralStringRef("/prefix/d"));
Key prefix = "/prefix/"_sr;
StringRef fullKeyA = StringRef(arena, "/prefix/a"_sr);
StringRef fullKeyB = StringRef(arena, "/prefix/b"_sr);
StringRef fullKeyC = StringRef(arena, "/prefix/c"_sr);
StringRef fullKeyD = StringRef(arena, "/prefix/d"_sr);
StringRef keyA = StringRef(arena, LiteralStringRef("a"));
StringRef keyB = StringRef(arena, LiteralStringRef("b"));
StringRef keyC = StringRef(arena, LiteralStringRef("c"));
StringRef keyD = StringRef(arena, LiteralStringRef("d"));
StringRef keyE = StringRef(arena, LiteralStringRef("e"));
StringRef keyAB = StringRef(arena, LiteralStringRef("ab"));
StringRef keyAC = StringRef(arena, LiteralStringRef("ac"));
StringRef keyCD = StringRef(arena, LiteralStringRef("cd"));
StringRef keyA = StringRef(arena, "a"_sr);
StringRef keyB = StringRef(arena, "b"_sr);
StringRef keyC = StringRef(arena, "c"_sr);
StringRef keyD = StringRef(arena, "d"_sr);
StringRef keyE = StringRef(arena, "e"_sr);
StringRef keyAB = StringRef(arena, "ab"_sr);
StringRef keyAC = StringRef(arena, "ac"_sr);
StringRef keyCD = StringRef(arena, "cd"_sr);
// Fake getRange() call.
RangeResult kv;
@ -369,20 +369,20 @@ TEST_CASE("/keyrangemap/decoderange/aligned") {
TEST_CASE("/keyrangemap/decoderange/unaligned") {
Arena arena;
Key prefix = LiteralStringRef("/prefix/");
StringRef fullKeyA = StringRef(arena, LiteralStringRef("/prefix/a"));
StringRef fullKeyB = StringRef(arena, LiteralStringRef("/prefix/b"));
StringRef fullKeyC = StringRef(arena, LiteralStringRef("/prefix/c"));
StringRef fullKeyD = StringRef(arena, LiteralStringRef("/prefix/d"));
Key prefix = "/prefix/"_sr;
StringRef fullKeyA = StringRef(arena, "/prefix/a"_sr);
StringRef fullKeyB = StringRef(arena, "/prefix/b"_sr);
StringRef fullKeyC = StringRef(arena, "/prefix/c"_sr);
StringRef fullKeyD = StringRef(arena, "/prefix/d"_sr);
StringRef keyA = StringRef(arena, LiteralStringRef("a"));
StringRef keyB = StringRef(arena, LiteralStringRef("b"));
StringRef keyC = StringRef(arena, LiteralStringRef("c"));
StringRef keyD = StringRef(arena, LiteralStringRef("d"));
StringRef keyE = StringRef(arena, LiteralStringRef("e"));
StringRef keyAB = StringRef(arena, LiteralStringRef("ab"));
StringRef keyAC = StringRef(arena, LiteralStringRef("ac"));
StringRef keyCD = StringRef(arena, LiteralStringRef("cd"));
StringRef keyA = StringRef(arena, "a"_sr);
StringRef keyB = StringRef(arena, "b"_sr);
StringRef keyC = StringRef(arena, "c"_sr);
StringRef keyD = StringRef(arena, "d"_sr);
StringRef keyE = StringRef(arena, "e"_sr);
StringRef keyAB = StringRef(arena, "ab"_sr);
StringRef keyAC = StringRef(arena, "ac"_sr);
StringRef keyCD = StringRef(arena, "cd"_sr);
// Fake getRange() call.
RangeResult kv;

View File

@ -1156,10 +1156,8 @@ struct AutoQuorumChange final : IQuorumChange {
}
ACTOR static Future<int> getRedundancy(AutoQuorumChange* self, Transaction* tr) {
state Future<Optional<Value>> fStorageReplicas =
tr->get(LiteralStringRef("storage_replicas").withPrefix(configKeysPrefix));
state Future<Optional<Value>> fLogReplicas =
tr->get(LiteralStringRef("log_replicas").withPrefix(configKeysPrefix));
state Future<Optional<Value>> fStorageReplicas = tr->get("storage_replicas"_sr.withPrefix(configKeysPrefix));
state Future<Optional<Value>> fLogReplicas = tr->get("log_replicas"_sr.withPrefix(configKeysPrefix));
wait(success(fStorageReplicas) && success(fLogReplicas));
int redundancy = std::min(atoi(fStorageReplicas.get().get().toString().c_str()),
atoi(fLogReplicas.get().get().toString().c_str()));
@ -1321,10 +1319,7 @@ struct AutoQuorumChange final : IQuorumChange {
std::map<StringRef, std::map<StringRef, int>> currentCounts;
std::map<StringRef, int> hardLimits;
std::vector<StringRef> fields({ LiteralStringRef("dcid"),
LiteralStringRef("data_hall"),
LiteralStringRef("zoneid"),
LiteralStringRef("machineid") });
std::vector<StringRef> fields({ "dcid"_sr, "data_hall"_sr, "zoneid"_sr, "machineid"_sr });
for (auto field = fields.begin(); field != fields.end(); field++) {
if (field->toString() == "zoneid") {
@ -1350,7 +1345,7 @@ struct AutoQuorumChange final : IQuorumChange {
if (maxCounts[*field] == 0) {
maxCounts[*field] = 1;
}
auto value = worker->locality.get(*field).orDefault(LiteralStringRef(""));
auto value = worker->locality.get(*field).orDefault(""_sr);
auto currentCount = currentCounts[*field][value];
if (currentCount >= maxCounts[*field]) {
valid = false;
@ -1359,7 +1354,7 @@ struct AutoQuorumChange final : IQuorumChange {
}
if (valid) {
for (auto field = fields.begin(); field != fields.end(); field++) {
auto value = worker->locality.get(*field).orDefault(LiteralStringRef(""));
auto value = worker->locality.get(*field).orDefault(""_sr);
currentCounts[*field][value] += 1;
}
chosen.push_back(worker->address);
@ -1541,8 +1536,7 @@ ACTOR Future<Void> includeServers(Database cx, std::vector<AddressExclusion> ser
// This is why we now make two clears: first only of the ip
// address, the second will delete all ports.
if (s.isWholeMachine())
ryw.clear(KeyRangeRef(addr.withSuffix(LiteralStringRef(":")),
addr.withSuffix(LiteralStringRef(";"))));
ryw.clear(KeyRangeRef(addr.withSuffix(":"_sr), addr.withSuffix(";"_sr)));
}
}
TraceEvent("IncludeServersCommit").detail("Servers", describe(servers)).detail("Failed", failed);
@ -2122,9 +2116,7 @@ ACTOR Future<Void> lockDatabase(Transaction* tr, UID id) {
}
tr->atomicOp(databaseLockedKey,
BinaryWriter::toValue(id, Unversioned())
.withPrefix(LiteralStringRef("0123456789"))
.withSuffix(LiteralStringRef("\x00\x00\x00\x00")),
BinaryWriter::toValue(id, Unversioned()).withPrefix("0123456789"_sr).withSuffix("\x00\x00\x00\x00"_sr),
MutationRef::SetVersionstampedValue);
tr->addWriteConflictRange(normalKeys);
return Void();
@ -2145,9 +2137,7 @@ ACTOR Future<Void> lockDatabase(Reference<ReadYourWritesTransaction> tr, UID id)
}
tr->atomicOp(databaseLockedKey,
BinaryWriter::toValue(id, Unversioned())
.withPrefix(LiteralStringRef("0123456789"))
.withSuffix(LiteralStringRef("\x00\x00\x00\x00")),
BinaryWriter::toValue(id, Unversioned()).withPrefix("0123456789"_sr).withSuffix("\x00\x00\x00\x00"_sr),
MutationRef::SetVersionstampedValue);
tr->addWriteConflictRange(normalKeys);
return Void();
@ -2617,11 +2607,11 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
auto dataHall = dataCenter + std::to_string(i / 2 % 2);
auto rack = dataHall + std::to_string(i % 2);
auto machineId = rack + std::to_string(i);
data.locality.set(LiteralStringRef("dcid"), StringRef(dataCenter));
data.locality.set(LiteralStringRef("data_hall"), StringRef(dataHall));
data.locality.set(LiteralStringRef("rack"), StringRef(rack));
data.locality.set(LiteralStringRef("zoneid"), StringRef(rack));
data.locality.set(LiteralStringRef("machineid"), StringRef(machineId));
data.locality.set("dcid"_sr, StringRef(dataCenter));
data.locality.set("data_hall"_sr, StringRef(dataHall));
data.locality.set("rack"_sr, StringRef(rack));
data.locality.set("zoneid"_sr, StringRef(rack));
data.locality.set("machineid"_sr, StringRef(machineId));
data.address.ip = IPAddress(i);
if (g_network->isSimulated()) {
@ -2647,10 +2637,7 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
std::map<StringRef, std::set<StringRef>> chosenValues;
ASSERT(chosen.size() == 5);
std::vector<StringRef> fields({ LiteralStringRef("dcid"),
LiteralStringRef("data_hall"),
LiteralStringRef("zoneid"),
LiteralStringRef("machineid") });
std::vector<StringRef> fields({ "dcid"_sr, "data_hall"_sr, "zoneid"_sr, "machineid"_sr });
for (auto worker = chosen.begin(); worker != chosen.end(); worker++) {
ASSERT(worker->ip.toV4() < workers.size());
LocalityData data = workers[worker->ip.toV4()].locality;
@ -2659,10 +2646,10 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
}
}
ASSERT(chosenValues[LiteralStringRef("dcid")].size() == 2);
ASSERT(chosenValues[LiteralStringRef("data_hall")].size() == 4);
ASSERT(chosenValues[LiteralStringRef("zoneid")].size() == 5);
ASSERT(chosenValues[LiteralStringRef("machineid")].size() == 5);
ASSERT(chosenValues["dcid"_sr].size() == 2);
ASSERT(chosenValues["data_hall"_sr].size() == 4);
ASSERT(chosenValues["zoneid"_sr].size() == 5);
ASSERT(chosenValues["machineid"_sr].size() == 5);
ASSERT(std::find(chosen.begin(), chosen.end(), workers[noAssignIndex].address) != chosen.end());
return Void();

View File

@ -248,7 +248,7 @@ TEST_CASE("/fdbclient/MonitorLeader/ConnectionString/hostname") {
hostnames.push_back(Hostname::parse(hn1 + ":" + port1));
hostnames.push_back(Hostname::parse(hn2 + ":" + port2));
ClusterConnectionString cs(hostnames, LiteralStringRef("TestCluster:0"));
ClusterConnectionString cs(hostnames, "TestCluster:0"_sr);
ASSERT(cs.hostnames.size() == 2);
ASSERT(cs.coords.size() == 0);
ASSERT(cs.toString() == connectionString);
@ -259,7 +259,7 @@ TEST_CASE("/fdbclient/MonitorLeader/ConnectionString/hostname") {
hostnames.push_back(Hostname::parse(hn1 + ":" + port1));
hostnames.push_back(Hostname::parse(hn1 + ":" + port1));
try {
ClusterConnectionString cs(hostnames, LiteralStringRef("TestCluster:0"));
ClusterConnectionString cs(hostnames, "TestCluster:0"_sr);
} catch (Error& e) {
ASSERT(e.code() == error_code_connection_string_invalid);
}
@ -367,7 +367,7 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/fuzz") {
auto c = connectionString.begin();
while (c != connectionString.end()) {
if (deterministicRandom()->random01() < 0.1) // Add whitespace character
output += deterministicRandom()->randomChoice(LiteralStringRef(" \t\n\r"));
output += deterministicRandom()->randomChoice(" \t\n\r"_sr);
if (deterministicRandom()->random01() < 0.5) { // Add one of the input characters
output += *c;
++c;
@ -378,7 +378,7 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/fuzz") {
for (int i = 0; i < charCount; i++) {
output += deterministicRandom()->randomChoice(LiteralStringRef("asdfzxcv123345:!@#$#$&()<\"\' \t"));
}
output += deterministicRandom()->randomChoice(LiteralStringRef("\n\r"));
output += deterministicRandom()->randomChoice("\n\r"_sr);
}
}
@ -896,7 +896,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
info.intermediateConnRecord = connRecord;
return info;
} else {
req.issues.push_back_deep(req.issues.arena(), LiteralStringRef("incorrect_cluster_file_contents"));
req.issues.push_back_deep(req.issues.arena(), "incorrect_cluster_file_contents"_sr);
std::string connectionString = connRecord->getConnectionString().toString();
if (!incorrectTime.present()) {
incorrectTime = now();

View File

@ -698,8 +698,10 @@ ThreadFuture<Version> DLDatabase::verifyBlobRange(const KeyRangeRef& keyRange, O
return unsupported_operation();
}
Version readVersion = version.present() ? version.get() : latestVersion;
FdbCApi::FDBFuture* f = api->databaseVerifyBlobRange(
db, keyRange.begin.begin(), keyRange.begin.size(), keyRange.end.begin(), keyRange.end.size(), version);
db, keyRange.begin.begin(), keyRange.begin.size(), keyRange.end.begin(), keyRange.end.size(), readVersion);
return toThreadFuture<Version>(api, f, [](FdbCApi::FDBFuture* f, FdbCApi* api) {
Version version = invalidVersion;
@ -2246,7 +2248,7 @@ void validateOption(Optional<StringRef> value, bool canBePresent, bool canBeAbse
void MultiVersionApi::disableMultiVersionClientApi() {
MutexHolder holder(lock);
if (networkStartSetup || localClientDisabled) {
if (networkStartSetup || localClientDisabled || disableBypass) {
throw invalid_option();
}
@ -2453,6 +2455,13 @@ void MultiVersionApi::setNetworkOptionInternal(FDBNetworkOptions::Option option,
externalClient = true;
bypassMultiClientApi = true;
forwardOption = true;
} else if (option == FDBNetworkOptions::DISABLE_CLIENT_BYPASS) {
MutexHolder holder(lock);
ASSERT(!networkStartSetup);
if (bypassMultiClientApi) {
throw invalid_option();
}
disableBypass = true;
} else if (option == FDBNetworkOptions::CLIENT_THREADS_PER_VERSION) {
MutexHolder holder(lock);
validateOption(value, true, false, false);
@ -2551,7 +2560,7 @@ void MultiVersionApi::setupNetwork() {
networkStartSetup = true;
if (externalClients.empty()) {
if (externalClients.empty() && !disableBypass) {
bypassMultiClientApi = true; // SOMEDAY: we won't be able to set this option once it becomes possible to add
// clients after setupNetwork is called
}
@ -2932,8 +2941,8 @@ void MultiVersionApi::loadEnvironmentVariableNetworkOptions() {
MultiVersionApi::MultiVersionApi()
: callbackOnMainThread(true), localClientDisabled(false), networkStartSetup(false), networkSetup(false),
bypassMultiClientApi(false), externalClient(false), apiVersion(0), threadCount(0), tmpDir("/tmp"),
traceShareBaseNameAmongThreads(false), envOptionsLoaded(false) {}
disableBypass(false), bypassMultiClientApi(false), externalClient(false), apiVersion(0), threadCount(0),
tmpDir("/tmp"), traceShareBaseNameAmongThreads(false), envOptionsLoaded(false) {}
MultiVersionApi* MultiVersionApi::api = new MultiVersionApi();

Some files were not shown because too many files have changed in this diff Show More