Merge branch 'main' of github.com:apple/foundationdb into jfu-mako-tenant-support
This commit is contained in:
commit
c6ac9cf2a5
|
@ -1,6 +1,6 @@
|
|||
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
|
||||
|
||||

|
||||

|
||||
|
||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
||||
|
||||
|
@ -126,7 +126,6 @@ You should create a second build-directory which you will use for building and d
|
|||
mkdir .build && cd .build
|
||||
cmake -G Ninja \
|
||||
-DUSE_CCACHE=on \
|
||||
-DDISABLE_TLS=off \
|
||||
-DUSE_DTRACE=off \
|
||||
..
|
||||
ninja -j 10
|
||||
|
|
|
@ -171,6 +171,13 @@ futures must apply the following rules to the result:
|
|||
the language binding. Make sure the API returns without error. Finally
|
||||
push the string "GOT_ESTIMATED_RANGE_SIZE" onto the stack.
|
||||
|
||||
#### GET_RANGE_SPLIT_POINTS
|
||||
|
||||
Pops the top three items off of the stack as BEGIN_KEY, END_KEY and
|
||||
CHUNK_SIZE. Then call the `getRangeSplitPoints` API of the language
|
||||
binding. Make sure the API returns without error. Finally push the string
|
||||
"GOT_RANGE_SPLIT_POINTS" onto the stack.
|
||||
|
||||
#### GET_KEY (_SNAPSHOT, _DATABASE)
|
||||
|
||||
Pops the top four items off of the stack as KEY, OR_EQUAL, OFFSET, PREFIX
|
||||
|
|
|
@ -38,6 +38,14 @@ The tenant API introduces some new operations:
|
|||
|
||||
Unsets the active tenant.
|
||||
|
||||
#### TENANT_LIST
|
||||
|
||||
Pops the top 3 items off of the stack as BEGIN, END, & LIMIT.
|
||||
Performs a range read of the tenant management keyspace in a language-appropriate
|
||||
way using these parameters. The resulting range of n tenant names are
|
||||
packed into a tuple as [t1,t2,t3,...,tn], and this single packed value
|
||||
is pushed onto the stack.
|
||||
|
||||
Updates to Existing Instructions
|
||||
--------------------------------
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ class ApiTest(Test):
|
|||
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
|
||||
txn_sizes = ['GET_APPROXIMATE_SIZE']
|
||||
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS']
|
||||
tenants = ['TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE', 'TENANT_CLEAR_ACTIVE']
|
||||
tenants = ['TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE', 'TENANT_CLEAR_ACTIVE', 'TENANT_LIST']
|
||||
|
||||
op_choices += reads
|
||||
op_choices += mutations
|
||||
|
@ -600,6 +600,13 @@ class ApiTest(Test):
|
|||
instructions.append(op)
|
||||
elif op == 'TENANT_CLEAR_ACTIVE':
|
||||
instructions.append(op)
|
||||
elif op == 'TENANT_LIST':
|
||||
self.ensure_string(instructions, 2)
|
||||
instructions.push_args(self.random.random_int())
|
||||
test_util.to_front(instructions, 2)
|
||||
test_util.to_front(instructions, 2)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
else:
|
||||
assert False, 'Unknown operation: ' + op
|
||||
|
||||
|
|
|
@ -301,12 +301,37 @@ endif()
|
|||
@LOG_DIR@
|
||||
)
|
||||
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_api_tests_with_tls
|
||||
DISABLE_LOG_DUMP
|
||||
TLS_ENABLED
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
|
||||
--cluster-file
|
||||
@CLUSTER_FILE@
|
||||
--tester-binary
|
||||
$<TARGET_FILE:fdb_c_api_tester>
|
||||
--external-client-library
|
||||
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
|
||||
--test-dir
|
||||
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
|
||||
--tmp-dir
|
||||
@TMP_DIR@
|
||||
--log-dir
|
||||
@LOG_DIR@
|
||||
--tls-cert-file
|
||||
@CLIENT_CERT_FILE@
|
||||
--tls-key-file
|
||||
@CLIENT_KEY_FILE@
|
||||
--tls-ca-file
|
||||
@SERVER_CA_FILE@
|
||||
)
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT USE_SANITIZER)
|
||||
add_test(NAME fdb_c_upgrade_single_threaded_630api
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
|
||||
--upgrade-path "6.3.23" "7.0.0" "7.2.0"
|
||||
--upgrade-path "6.3.23" "7.0.0" "7.1.5" "7.2.0"
|
||||
--process-number 1
|
||||
)
|
||||
|
||||
|
@ -314,7 +339,7 @@ endif()
|
|||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
|
||||
--upgrade-path "7.0.0" "7.2.0"
|
||||
--upgrade-path "7.0.0" "7.1.5" "7.2.0"
|
||||
--process-number 1
|
||||
)
|
||||
|
||||
|
@ -322,7 +347,7 @@ endif()
|
|||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
||||
--upgrade-path "6.3.23" "7.0.0" "7.2.0"
|
||||
--upgrade-path "6.3.23" "7.0.0" "7.1.5" "7.2.0" "7.1.5"
|
||||
--process-number 3
|
||||
)
|
||||
|
||||
|
@ -330,9 +355,38 @@ endif()
|
|||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
||||
--upgrade-path "7.0.0" "7.2.0"
|
||||
--upgrade-path "7.0.0" "7.1.5" "7.2.0" "7.1.5"
|
||||
--process-number 3
|
||||
)
|
||||
|
||||
add_test(NAME fdb_c_upgrade_multi_threaded_710api
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
||||
--upgrade-path "7.1.5" "7.2.0" "7.1.5"
|
||||
--process-number 3
|
||||
)
|
||||
|
||||
add_test(NAME fdb_c_cluster_wiggle
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
||||
--upgrade-path "7.2.0" "wiggle"
|
||||
--disable-log-dump
|
||||
--process-number 3
|
||||
--redundancy double
|
||||
)
|
||||
|
||||
add_test(NAME fdb_c_wiggle_and_upgrade
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
||||
--upgrade-path "7.0.0" "wiggle" "7.2.0"
|
||||
--disable-log-dump
|
||||
--process-number 3
|
||||
--redundancy double
|
||||
)
|
||||
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
|
|
@ -655,6 +655,7 @@ extern "C" DLLEXPORT FDBFuture* fdb_transaction_get_mapped_range(FDBTransaction*
|
|||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
int matchIndex,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
FDBFuture* r = validate_and_update_parameters(limit, target_bytes, mode, iteration, reverse);
|
||||
|
@ -667,6 +668,7 @@ extern "C" DLLEXPORT FDBFuture* fdb_transaction_get_mapped_range(FDBTransaction*
|
|||
KeySelectorRef(KeyRef(end_key_name, end_key_name_length), end_or_equal, end_offset),
|
||||
StringRef(mapper_name, mapper_name_length),
|
||||
GetRangeLimits(limit, target_bytes),
|
||||
matchIndex,
|
||||
snapshot,
|
||||
reverse)
|
||||
.extractPtr());
|
||||
|
|
|
@ -160,6 +160,7 @@ typedef struct mappedkeyvalue {
|
|||
* take the shortcut. */
|
||||
FDBGetRangeReqAndResult getRange;
|
||||
unsigned char buffer[32];
|
||||
fdb_bool_t boundaryAndExist;
|
||||
} FDBMappedKeyValue;
|
||||
|
||||
#pragma pack(push, 4)
|
||||
|
@ -384,6 +385,7 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_mapped_range(FDBTran
|
|||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
int matchIndex,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
||||
|
|
|
@ -53,9 +53,8 @@ private:
|
|||
[this, begin, end, results, tooOld](auto ctx) {
|
||||
ctx->tx()->setOption(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE);
|
||||
KeyValuesResult res = ctx->tx()->readBlobGranules(begin, end, ctx->getBGBasePath());
|
||||
bool more;
|
||||
bool more = false;
|
||||
(*results) = res.getKeyValues(&more);
|
||||
ASSERT(!more);
|
||||
if (res.getError() == error_code_blob_granule_transaction_too_old) {
|
||||
info("BlobGranuleCorrectness::randomReadOp bg too old\n");
|
||||
ASSERT(!seenReadSuccess);
|
||||
|
@ -64,6 +63,7 @@ private:
|
|||
} else if (res.getError() != error_code_success) {
|
||||
ctx->onError(res.getError());
|
||||
} else {
|
||||
ASSERT(!more);
|
||||
if (!seenReadSuccess) {
|
||||
info("BlobGranuleCorrectness::randomReadOp first success\n");
|
||||
}
|
||||
|
|
|
@ -48,9 +48,13 @@ public:
|
|||
int numClientThreads;
|
||||
int numDatabases;
|
||||
int numClients;
|
||||
int statsIntervalMs = 0;
|
||||
std::vector<std::pair<std::string, std::string>> knobs;
|
||||
TestSpec testSpec;
|
||||
std::string bgBasePath;
|
||||
std::string tlsCertFile;
|
||||
std::string tlsKeyFile;
|
||||
std::string tlsCaFile;
|
||||
};
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "TesterScheduler.h"
|
||||
#include "TesterUtil.h"
|
||||
|
||||
#include <boost/asio/detail/chrono.hpp>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <boost/asio.hpp>
|
||||
|
@ -31,6 +32,15 @@ namespace FdbApiTester {
|
|||
|
||||
const TTaskFct NO_OP_TASK = []() {};
|
||||
|
||||
class AsioTimer : public ITimer {
|
||||
public:
|
||||
AsioTimer(io_context& io_ctx, chrono::steady_clock::duration time) : impl(io_ctx, time) {}
|
||||
|
||||
void cancel() override { impl.cancel(); }
|
||||
|
||||
boost::asio::steady_timer impl;
|
||||
};
|
||||
|
||||
class AsioScheduler : public IScheduler {
|
||||
public:
|
||||
AsioScheduler(int numThreads) : numThreads(numThreads) {}
|
||||
|
@ -44,6 +54,16 @@ public:
|
|||
|
||||
void schedule(TTaskFct task) override { post(io_ctx, task); }
|
||||
|
||||
std::unique_ptr<ITimer> scheduleWithDelay(int delayMs, TTaskFct task) override {
|
||||
auto timer = std::make_unique<AsioTimer>(io_ctx, boost::asio::chrono::milliseconds(delayMs));
|
||||
timer->impl.async_wait([task](const boost::system::error_code& e) {
|
||||
if (!e) {
|
||||
task();
|
||||
}
|
||||
});
|
||||
return timer;
|
||||
}
|
||||
|
||||
void stop() override { work = any_io_executor(); }
|
||||
|
||||
void join() override {
|
||||
|
|
|
@ -32,6 +32,16 @@ using TTaskFct = std::function<void(void)>;
|
|||
|
||||
extern const TTaskFct NO_OP_TASK;
|
||||
|
||||
/**
|
||||
* Handle to a scheduled timer
|
||||
*/
|
||||
class ITimer {
|
||||
public:
|
||||
virtual ~ITimer() {}
|
||||
|
||||
virtual void cancel() = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Scheduler for asynchronous execution of tasks on a pool of threads
|
||||
*/
|
||||
|
@ -45,6 +55,9 @@ public:
|
|||
// Schedule a task for asynchronous execution
|
||||
virtual void schedule(TTaskFct task) = 0;
|
||||
|
||||
// Schedule a task to be executed with a given delay
|
||||
virtual std::unique_ptr<ITimer> scheduleWithDelay(int delayMs, TTaskFct task) = 0;
|
||||
|
||||
// Gracefully stop the scheduler. Waits for already running tasks to be finish
|
||||
virtual void stop() = 0;
|
||||
|
||||
|
|
|
@ -33,8 +33,8 @@
|
|||
|
||||
namespace FdbApiTester {
|
||||
|
||||
constexpr int LONG_WAIT_TIME_US = 1000000;
|
||||
constexpr int LARGE_NUMBER_OF_RETRIES = 5;
|
||||
constexpr int LONG_WAIT_TIME_US = 2000000;
|
||||
constexpr int LARGE_NUMBER_OF_RETRIES = 10;
|
||||
|
||||
void TransactionActorBase::complete(fdb_error_t err) {
|
||||
error = err;
|
||||
|
|
|
@ -80,7 +80,7 @@ bool WorkloadConfig::getBoolOption(const std::string& name, bool defaultVal) con
|
|||
|
||||
WorkloadBase::WorkloadBase(const WorkloadConfig& config)
|
||||
: manager(nullptr), tasksScheduled(0), numErrors(0), clientId(config.clientId), numClients(config.numClients),
|
||||
failed(false) {
|
||||
failed(false), numTxCompleted(0) {
|
||||
maxErrors = config.getIntOption("maxErrors", 10);
|
||||
workloadId = fmt::format("{}{}", config.name, clientId);
|
||||
}
|
||||
|
@ -89,6 +89,10 @@ void WorkloadBase::init(WorkloadManager* manager) {
|
|||
this->manager = manager;
|
||||
}
|
||||
|
||||
void WorkloadBase::printStats() {
|
||||
info(fmt::format("{} transactions completed", numTxCompleted.load()));
|
||||
}
|
||||
|
||||
void WorkloadBase::schedule(TTaskFct task) {
|
||||
if (failed) {
|
||||
return;
|
||||
|
@ -106,6 +110,7 @@ void WorkloadBase::execTransaction(std::shared_ptr<ITransactionActor> tx, TTaskF
|
|||
}
|
||||
tasksScheduled++;
|
||||
manager->txExecutor->execute(tx, [this, tx, cont, failOnError]() {
|
||||
numTxCompleted++;
|
||||
fdb_error_t err = tx->getErrorCode();
|
||||
if (tx->getErrorCode() == error_code_success) {
|
||||
cont();
|
||||
|
@ -198,6 +203,9 @@ void WorkloadManager::workloadDone(IWorkload* workload, bool failed) {
|
|||
bool done = workloads.empty();
|
||||
lock.unlock();
|
||||
if (done) {
|
||||
if (statsTimer) {
|
||||
statsTimer->cancel();
|
||||
}
|
||||
scheduler->stop();
|
||||
}
|
||||
}
|
||||
|
@ -241,6 +249,24 @@ void WorkloadManager::readControlInput(std::string pipeName) {
|
|||
}
|
||||
}
|
||||
|
||||
void WorkloadManager::schedulePrintStatistics(int timeIntervalMs) {
|
||||
statsTimer = scheduler->scheduleWithDelay(timeIntervalMs, [this, timeIntervalMs]() {
|
||||
for (auto workload : getActiveWorkloads()) {
|
||||
workload->printStats();
|
||||
}
|
||||
this->schedulePrintStatistics(timeIntervalMs);
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<IWorkload>> WorkloadManager::getActiveWorkloads() {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
std::vector<std::shared_ptr<IWorkload>> res;
|
||||
for (auto iter : workloads) {
|
||||
res.push_back(iter.second.ref);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void WorkloadManager::handleStopCommand() {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
for (auto& iter : workloads) {
|
||||
|
|
|
@ -62,6 +62,9 @@ public:
|
|||
|
||||
// Get workload control interface if supported, nullptr otherwise
|
||||
virtual IWorkloadControlIfc* getControlIfc() = 0;
|
||||
|
||||
// Print workload statistics
|
||||
virtual void printStats() = 0;
|
||||
};
|
||||
|
||||
// Workload configuration
|
||||
|
@ -100,6 +103,8 @@ public:
|
|||
|
||||
std::string getWorkloadId() override { return workloadId; }
|
||||
|
||||
void printStats() override;
|
||||
|
||||
protected:
|
||||
// Schedule the a task as a part of the workload
|
||||
void schedule(TTaskFct task);
|
||||
|
@ -150,6 +155,9 @@ protected:
|
|||
|
||||
// Workload is failed, no further transactions or continuations will be scheduled by the workload
|
||||
std::atomic<bool> failed;
|
||||
|
||||
// Number of completed transactions
|
||||
std::atomic<int> numTxCompleted;
|
||||
};
|
||||
|
||||
// Workload manager
|
||||
|
@ -175,6 +183,9 @@ public:
|
|||
return numWorkloadsFailed > 0;
|
||||
}
|
||||
|
||||
// Schedule statistics to be printed in regular timeintervals
|
||||
void schedulePrintStatistics(int timeIntervalMs);
|
||||
|
||||
private:
|
||||
friend WorkloadBase;
|
||||
|
||||
|
@ -205,6 +216,9 @@ private:
|
|||
// Handle CHECK command received from the test controller
|
||||
void handleCheckCommand();
|
||||
|
||||
// A thread-safe operation to return a list of active workloads
|
||||
std::vector<std::shared_ptr<IWorkload>> getActiveWorkloads();
|
||||
|
||||
// Transaction executor to be used by the workloads
|
||||
ITransactionExecutor* txExecutor;
|
||||
|
||||
|
@ -225,6 +239,9 @@ private:
|
|||
|
||||
// Output pipe for emitting test control events
|
||||
std::ofstream outputPipe;
|
||||
|
||||
// Timer for printing statistics in regular intervals
|
||||
std::unique_ptr<ITimer> statsTimer;
|
||||
};
|
||||
|
||||
// A workload factory
|
||||
|
|
|
@ -53,7 +53,11 @@ enum TesterOptionId {
|
|||
OPT_OUTPUT_PIPE,
|
||||
OPT_FDB_API_VERSION,
|
||||
OPT_TRANSACTION_RETRY_LIMIT,
|
||||
OPT_BLOB_GRANULE_LOCAL_FILE_PATH
|
||||
OPT_BLOB_GRANULE_LOCAL_FILE_PATH,
|
||||
OPT_STATS_INTERVAL,
|
||||
OPT_TLS_CERT_FILE,
|
||||
OPT_TLS_KEY_FILE,
|
||||
OPT_TLS_CA_FILE,
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption TesterOptionDefs[] = //
|
||||
|
@ -77,6 +81,10 @@ CSimpleOpt::SOption TesterOptionDefs[] = //
|
|||
{ OPT_FDB_API_VERSION, "--api-version", SO_REQ_SEP },
|
||||
{ OPT_TRANSACTION_RETRY_LIMIT, "--transaction-retry-limit", SO_REQ_SEP },
|
||||
{ OPT_BLOB_GRANULE_LOCAL_FILE_PATH, "--blob-granule-local-file-path", SO_REQ_SEP },
|
||||
{ OPT_STATS_INTERVAL, "--stats-interval", SO_REQ_SEP },
|
||||
{ OPT_TLS_CERT_FILE, "--tls-cert-file", SO_REQ_SEP },
|
||||
{ OPT_TLS_KEY_FILE, "--tls-key-file", SO_REQ_SEP },
|
||||
{ OPT_TLS_CA_FILE, "--tls-ca-file", SO_REQ_SEP },
|
||||
SO_END_OF_OPTIONS };
|
||||
|
||||
void printProgramUsage(const char* execName) {
|
||||
|
@ -118,6 +126,14 @@ void printProgramUsage(const char* execName) {
|
|||
" Path to blob granule files on local filesystem\n"
|
||||
" -f, --test-file FILE\n"
|
||||
" Test file to run.\n"
|
||||
" --stats-interval MILLISECONDS\n"
|
||||
" Time interval in milliseconds for printing workload statistics (default: 0 - disabled).\n"
|
||||
" --tls-cert-file FILE\n"
|
||||
" Path to file containing client's TLS certificate chain\n"
|
||||
" --tls-key-file FILE\n"
|
||||
" Path to file containing client's TLS private key\n"
|
||||
" --tls-ca-file FILE\n"
|
||||
" Path to file containing TLS CA certificate\n"
|
||||
" -h, --help Display this help and exit.\n",
|
||||
FDB_API_VERSION);
|
||||
}
|
||||
|
@ -214,6 +230,18 @@ bool processArg(TesterOptions& options, const CSimpleOpt& args) {
|
|||
case OPT_BLOB_GRANULE_LOCAL_FILE_PATH:
|
||||
options.bgBasePath = args.OptionArg();
|
||||
break;
|
||||
case OPT_STATS_INTERVAL:
|
||||
processIntOption(args.OptionText(), args.OptionArg(), 0, 60000, options.statsIntervalMs);
|
||||
break;
|
||||
case OPT_TLS_CERT_FILE:
|
||||
options.tlsCertFile.assign(args.OptionArg());
|
||||
break;
|
||||
case OPT_TLS_KEY_FILE:
|
||||
options.tlsKeyFile.assign(args.OptionArg());
|
||||
break;
|
||||
case OPT_TLS_CA_FILE:
|
||||
options.tlsCaFile.assign(args.OptionArg());
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -292,6 +320,18 @@ void applyNetworkOptions(TesterOptions& options) {
|
|||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_KNOB,
|
||||
fmt::format("{}={}", knob.first.c_str(), knob.second.c_str())));
|
||||
}
|
||||
|
||||
if (!options.tlsCertFile.empty()) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_TLS_CERT_PATH, options.tlsCertFile));
|
||||
}
|
||||
|
||||
if (!options.tlsKeyFile.empty()) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_TLS_KEY_PATH, options.tlsKeyFile));
|
||||
}
|
||||
|
||||
if (!options.tlsCaFile.empty()) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_TLS_CA_PATH, options.tlsCaFile));
|
||||
}
|
||||
}
|
||||
|
||||
void randomizeOptions(TesterOptions& options) {
|
||||
|
@ -335,6 +375,9 @@ bool runWorkloads(TesterOptions& options) {
|
|||
}
|
||||
|
||||
scheduler->start();
|
||||
if (options.statsIntervalMs) {
|
||||
workloadMgr.schedulePrintStatistics(options.statsIntervalMs);
|
||||
}
|
||||
workloadMgr.run();
|
||||
return !workloadMgr.failed();
|
||||
} catch (const std::runtime_error& err) {
|
||||
|
|
|
@ -30,6 +30,8 @@ import glob
|
|||
import random
|
||||
import string
|
||||
|
||||
TESTER_STATS_INTERVAL_SEC = 5
|
||||
|
||||
|
||||
def random_string(len):
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for i in range(len))
|
||||
|
@ -66,7 +68,8 @@ def dump_client_logs(log_dir):
|
|||
def run_tester(args, test_file):
|
||||
cmd = [args.tester_binary,
|
||||
"--cluster-file", args.cluster_file,
|
||||
"--test-file", test_file]
|
||||
"--test-file", test_file,
|
||||
"--stats-interval", str(TESTER_STATS_INTERVAL_SEC*1000)]
|
||||
if args.external_client_library is not None:
|
||||
cmd += ["--external-client-library", args.external_client_library]
|
||||
if args.tmp_dir is not None:
|
||||
|
@ -81,6 +84,15 @@ def run_tester(args, test_file):
|
|||
cmd += ["--blob-granule-local-file-path",
|
||||
args.blob_granule_local_file_path]
|
||||
|
||||
if args.tls_ca_file is not None:
|
||||
cmd += ["--tls-ca-file", args.tls_ca_file]
|
||||
|
||||
if args.tls_key_file is not None:
|
||||
cmd += ["--tls-key-file", args.tls_key_file]
|
||||
|
||||
if args.tls_cert_file is not None:
|
||||
cmd += ["--tls-cert-file", args.tls_cert_file]
|
||||
|
||||
get_logger().info('\nRunning tester \'%s\'...' % ' '.join(cmd))
|
||||
proc = Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)
|
||||
timed_out = False
|
||||
|
@ -146,6 +158,12 @@ def parse_args(argv):
|
|||
help='The directory for storing temporary files (default: None)')
|
||||
parser.add_argument('--blob-granule-local-file-path', type=str, default=None,
|
||||
help='Enable blob granule tests if set, value is path to local blob granule files')
|
||||
parser.add_argument('--tls-ca-file', type=str, default=None,
|
||||
help='Path to client\'s TLS CA file: i.e. certificate of CA that signed the server certificate')
|
||||
parser.add_argument('--tls-cert-file', type=str, default=None,
|
||||
help='Path to client\'s TLS certificate file')
|
||||
parser.add_argument('--tls-key-file', type=str, default=None,
|
||||
help='Path to client\'s TLS private key file')
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
|
|
@ -247,7 +247,7 @@ void ResumableStateForRunWorkload::onTransactionSuccess() {
|
|||
const auto commit_latency = watch_commit.diff();
|
||||
const auto tx_duration = watch_tx.diff();
|
||||
stats.addLatency(OP_COMMIT, commit_latency);
|
||||
stats.addLatency(OP_TRANSACTION, commit_latency);
|
||||
stats.addLatency(OP_TRANSACTION, tx_duration);
|
||||
sample_bins[OP_COMMIT].put(commit_latency);
|
||||
sample_bins[OP_TRANSACTION].put(tx_duration);
|
||||
}
|
||||
|
|
|
@ -689,6 +689,19 @@ int workerProcessMain(Arguments const& args, int worker_id, shared_memory::Acces
|
|||
|
||||
selectApiVersion(args.api_version);
|
||||
|
||||
/* enable distributed tracing */
|
||||
switch (args.distributed_tracer_client) {
|
||||
case 1:
|
||||
err = network::setOptionNothrow(FDB_NET_OPTION_DISTRIBUTED_CLIENT_TRACER, BytesRef(toBytePtr("network_lossy")));
|
||||
break;
|
||||
case 2:
|
||||
err = network::setOptionNothrow(FDB_NET_OPTION_DISTRIBUTED_CLIENT_TRACER, BytesRef(toBytePtr("log_file")));
|
||||
break;
|
||||
}
|
||||
if (err) {
|
||||
logr.error("network::setOption(FDB_NET_OPTION_DISTRIBUTED_CLIENT_TRACER): {}", err.what());
|
||||
}
|
||||
|
||||
/* enable flatbuffers if specified */
|
||||
if (args.flatbuffers) {
|
||||
#ifdef FDB_NET_OPTION_USE_FLATBUFFERS
|
||||
|
@ -896,6 +909,7 @@ int initArguments(Arguments& args) {
|
|||
args.json_output_path[0] = '\0';
|
||||
args.bg_materialize_files = false;
|
||||
args.bg_file_path[0] = '\0';
|
||||
args.distributed_tracer_client = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1075,6 +1089,8 @@ void usage() {
|
|||
printf("%-24s %s\n",
|
||||
" --bg_file_path=PATH",
|
||||
"Read blob granule files from the local filesystem at PATH and materialize the results.");
|
||||
printf(
|
||||
"%-24s %s\n", " --distributed_tracer_client=CLIENT", "Specify client (disabled, network_lossy, log_file)");
|
||||
}
|
||||
|
||||
/* parse benchmark paramters */
|
||||
|
@ -1127,6 +1143,7 @@ int parseArguments(int argc, char* argv[], Arguments& args) {
|
|||
{ "disable_ryw", no_argument, NULL, ARG_DISABLE_RYW },
|
||||
{ "json_report", optional_argument, NULL, ARG_JSON_REPORT },
|
||||
{ "bg_file_path", required_argument, NULL, ARG_BG_FILE_PATH },
|
||||
{ "distributed_tracer_client", required_argument, NULL, ARG_DISTRIBUTED_TRACER_CLIENT },
|
||||
{ NULL, 0, NULL, 0 }
|
||||
};
|
||||
idx = 0;
|
||||
|
@ -1317,6 +1334,17 @@ int parseArguments(int argc, char* argv[], Arguments& args) {
|
|||
case ARG_BG_FILE_PATH:
|
||||
args.bg_materialize_files = true;
|
||||
strncpy(args.bg_file_path, optarg, std::min(sizeof(args.bg_file_path), strlen(optarg) + 1));
|
||||
case ARG_DISTRIBUTED_TRACER_CLIENT:
|
||||
if (strcmp(optarg, "disabled") == 0) {
|
||||
args.distributed_tracer_client = 0;
|
||||
} else if (strcmp(optarg, "network_lossy") == 0) {
|
||||
args.distributed_tracer_client = 1;
|
||||
} else if (strcmp(optarg, "log_file") == 0) {
|
||||
args.distributed_tracer_client = 2;
|
||||
} else {
|
||||
args.distributed_tracer_client = -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1384,6 +1412,10 @@ int validateArguments(Arguments const& args) {
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
if (args.distributed_tracer_client < 0) {
|
||||
logr.error("--disibuted_tracer_client must specify either (disabled, network_lossy, log_file)");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,8 @@ enum ArgKind {
|
|||
ARG_DISABLE_RYW,
|
||||
ARG_CLIENT_THREADS_PER_VERSION,
|
||||
ARG_JSON_REPORT,
|
||||
ARG_BG_FILE_PATH // if blob granule files are stored locally, mako will read and materialize them if this is set
|
||||
ARG_BG_FILE_PATH, // if blob granule files are stored locally, mako will read and materialize them if this is set
|
||||
ARG_DISTRIBUTED_TRACER_CLIENT
|
||||
};
|
||||
|
||||
constexpr const int OP_COUNT = 0;
|
||||
|
@ -163,6 +164,7 @@ struct Arguments {
|
|||
char json_output_path[PATH_MAX];
|
||||
bool bg_materialize_files;
|
||||
char bg_file_path[PATH_MAX];
|
||||
int distributed_tracer_client;
|
||||
};
|
||||
|
||||
} // namespace mako
|
||||
|
|
|
@ -271,6 +271,7 @@ MappedKeyValueArrayFuture Transaction::get_mapped_range(const uint8_t* begin_key
|
|||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
int matchIndex,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
return MappedKeyValueArrayFuture(fdb_transaction_get_mapped_range(tr_,
|
||||
|
@ -288,6 +289,7 @@ MappedKeyValueArrayFuture Transaction::get_mapped_range(const uint8_t* begin_key
|
|||
target_bytes,
|
||||
mode,
|
||||
iteration,
|
||||
matchIndex,
|
||||
snapshot,
|
||||
reverse));
|
||||
}
|
||||
|
|
|
@ -304,6 +304,7 @@ public:
|
|||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
int matchIndex,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
#include "fdbclient/Tuple.h"
|
||||
|
||||
#include "flow/config.h"
|
||||
#include "flow/DeterministicRandom.h"
|
||||
#include "flow/IRandom.h"
|
||||
|
||||
#include "fdb_api.hpp"
|
||||
|
||||
|
@ -179,8 +181,8 @@ struct GetMappedRangeResult {
|
|||
std::string, // value
|
||||
std::string, // begin
|
||||
std::string, // end
|
||||
std::vector<std::pair<std::string, std::string>> // range results
|
||||
>>
|
||||
std::vector<std::pair<std::string, std::string>>, // range results
|
||||
fdb_bool_t>>
|
||||
mkvs;
|
||||
// True if values remain in the key range requested.
|
||||
bool more;
|
||||
|
@ -259,6 +261,7 @@ GetMappedRangeResult get_mapped_range(fdb::Transaction& tr,
|
|||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
int matchIndex,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
fdb::MappedKeyValueArrayFuture f1 = tr.get_mapped_range(begin_key_name,
|
||||
|
@ -275,6 +278,7 @@ GetMappedRangeResult get_mapped_range(fdb::Transaction& tr,
|
|||
target_bytes,
|
||||
mode,
|
||||
iteration,
|
||||
matchIndex,
|
||||
snapshot,
|
||||
reverse);
|
||||
|
||||
|
@ -302,6 +306,7 @@ GetMappedRangeResult get_mapped_range(fdb::Transaction& tr,
|
|||
auto value = extractString(mkv.value);
|
||||
auto begin = extractString(mkv.getRange.begin.key);
|
||||
auto end = extractString(mkv.getRange.end.key);
|
||||
bool boundaryAndExist = mkv.boundaryAndExist;
|
||||
// std::cout << "key:" << key << " value:" << value << " begin:" << begin << " end:" << end << std::endl;
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> range_results;
|
||||
|
@ -312,7 +317,7 @@ GetMappedRangeResult get_mapped_range(fdb::Transaction& tr,
|
|||
range_results.emplace_back(k, v);
|
||||
// std::cout << "[" << i << "]" << k << " -> " << v << std::endl;
|
||||
}
|
||||
result.mkvs.emplace_back(key, value, begin, end, range_results);
|
||||
result.mkvs.emplace_back(key, value, begin, end, range_results, boundaryAndExist);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -949,7 +954,11 @@ std::map<std::string, std::string> fillInRecords(int n) {
|
|||
return data;
|
||||
}
|
||||
|
||||
GetMappedRangeResult getMappedIndexEntries(int beginId, int endId, fdb::Transaction& tr, std::string mapper) {
|
||||
GetMappedRangeResult getMappedIndexEntries(int beginId,
|
||||
int endId,
|
||||
fdb::Transaction& tr,
|
||||
std::string mapper,
|
||||
int matchIndex) {
|
||||
std::string indexEntryKeyBegin = indexEntryKey(beginId);
|
||||
std::string indexEntryKeyEnd = indexEntryKey(endId);
|
||||
|
||||
|
@ -963,13 +972,24 @@ GetMappedRangeResult getMappedIndexEntries(int beginId, int endId, fdb::Transact
|
|||
/* target_bytes */ 0,
|
||||
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
|
||||
/* iteration */ 0,
|
||||
/* matchIndex */ matchIndex,
|
||||
/* snapshot */ false,
|
||||
/* reverse */ 0);
|
||||
}
|
||||
|
||||
GetMappedRangeResult getMappedIndexEntries(int beginId, int endId, fdb::Transaction& tr) {
|
||||
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).append("{...}"_sr).pack().toString();
|
||||
return getMappedIndexEntries(beginId, endId, tr, mapper);
|
||||
GetMappedRangeResult getMappedIndexEntries(int beginId,
|
||||
int endId,
|
||||
fdb::Transaction& tr,
|
||||
int matchIndex,
|
||||
bool allMissing) {
|
||||
std::string mapper = Tuple()
|
||||
.append(prefix)
|
||||
.append(RECORD)
|
||||
.append(allMissing ? "{K[2]}"_sr : "{K[3]}"_sr)
|
||||
.append("{...}"_sr)
|
||||
.pack()
|
||||
.toString();
|
||||
return getMappedIndexEntries(beginId, endId, tr, mapper, matchIndex);
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_mapped_range") {
|
||||
|
@ -981,7 +1001,16 @@ TEST_CASE("fdb_transaction_get_mapped_range") {
|
|||
while (1) {
|
||||
int beginId = 1;
|
||||
int endId = 19;
|
||||
auto result = getMappedIndexEntries(beginId, endId, tr);
|
||||
const double r = deterministicRandom()->random01();
|
||||
int matchIndex = MATCH_INDEX_ALL;
|
||||
if (r < 0.25) {
|
||||
matchIndex = MATCH_INDEX_NONE;
|
||||
} else if (r < 0.5) {
|
||||
matchIndex = MATCH_INDEX_MATCHED_ONLY;
|
||||
} else if (r < 0.75) {
|
||||
matchIndex = MATCH_INDEX_UNMATCHED_ONLY;
|
||||
}
|
||||
auto result = getMappedIndexEntries(beginId, endId, tr, matchIndex, false);
|
||||
|
||||
if (result.err) {
|
||||
fdb::EmptyFuture f1 = tr.on_error(result.err);
|
||||
|
@ -994,9 +1023,21 @@ TEST_CASE("fdb_transaction_get_mapped_range") {
|
|||
CHECK(!result.more);
|
||||
|
||||
int id = beginId;
|
||||
bool boundary;
|
||||
for (int i = 0; i < expectSize; i++, id++) {
|
||||
const auto& [key, value, begin, end, range_results] = result.mkvs[i];
|
||||
CHECK(indexEntryKey(id).compare(key) == 0);
|
||||
boundary = i == 0 || i == expectSize - 1;
|
||||
const auto& [key, value, begin, end, range_results, boundaryAndExist] = result.mkvs[i];
|
||||
if (matchIndex == MATCH_INDEX_ALL || i == 0 || i == expectSize - 1) {
|
||||
CHECK(indexEntryKey(id).compare(key) == 0);
|
||||
} else if (matchIndex == MATCH_INDEX_MATCHED_ONLY) {
|
||||
CHECK(indexEntryKey(id).compare(key) == 0);
|
||||
} else if (matchIndex == MATCH_INDEX_UNMATCHED_ONLY) {
|
||||
CHECK(EMPTY.compare(key) == 0);
|
||||
} else {
|
||||
CHECK(EMPTY.compare(key) == 0);
|
||||
}
|
||||
bool empty = range_results.empty();
|
||||
CHECK(boundaryAndExist == (boundary && !empty));
|
||||
CHECK(EMPTY.compare(value) == 0);
|
||||
CHECK(range_results.size() == SPLIT_SIZE);
|
||||
for (int split = 0; split < SPLIT_SIZE; split++) {
|
||||
|
@ -1009,6 +1050,58 @@ TEST_CASE("fdb_transaction_get_mapped_range") {
|
|||
}
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_mapped_range_missing_all_secondary") {
|
||||
const int TOTAL_RECORDS = 20;
|
||||
fillInRecords(TOTAL_RECORDS);
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
// RYW should be enabled.
|
||||
while (1) {
|
||||
int beginId = 1;
|
||||
int endId = 19;
|
||||
const double r = deterministicRandom()->random01();
|
||||
int matchIndex = MATCH_INDEX_ALL;
|
||||
if (r < 0.25) {
|
||||
matchIndex = MATCH_INDEX_NONE;
|
||||
} else if (r < 0.5) {
|
||||
matchIndex = MATCH_INDEX_MATCHED_ONLY;
|
||||
} else if (r < 0.75) {
|
||||
matchIndex = MATCH_INDEX_UNMATCHED_ONLY;
|
||||
}
|
||||
auto result = getMappedIndexEntries(beginId, endId, tr, matchIndex, true);
|
||||
|
||||
if (result.err) {
|
||||
fdb::EmptyFuture f1 = tr.on_error(result.err);
|
||||
fdb_check(wait_future(f1));
|
||||
continue;
|
||||
}
|
||||
|
||||
int expectSize = endId - beginId;
|
||||
CHECK(result.mkvs.size() == expectSize);
|
||||
CHECK(!result.more);
|
||||
|
||||
int id = beginId;
|
||||
bool boundary;
|
||||
for (int i = 0; i < expectSize; i++, id++) {
|
||||
boundary = i == 0 || i == expectSize - 1;
|
||||
const auto& [key, value, begin, end, range_results, boundaryAndExist] = result.mkvs[i];
|
||||
if (matchIndex == MATCH_INDEX_ALL || i == 0 || i == expectSize - 1) {
|
||||
CHECK(indexEntryKey(id).compare(key) == 0);
|
||||
} else if (matchIndex == MATCH_INDEX_MATCHED_ONLY) {
|
||||
CHECK(EMPTY.compare(key) == 0);
|
||||
} else if (matchIndex == MATCH_INDEX_UNMATCHED_ONLY) {
|
||||
CHECK(indexEntryKey(id).compare(key) == 0);
|
||||
} else {
|
||||
CHECK(EMPTY.compare(key) == 0);
|
||||
}
|
||||
bool empty = range_results.empty();
|
||||
CHECK(boundaryAndExist == (boundary && !empty));
|
||||
CHECK(EMPTY.compare(value) == 0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_mapped_range_restricted_to_serializable") {
|
||||
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
|
||||
fdb::Transaction tr(db);
|
||||
|
@ -1022,6 +1115,7 @@ TEST_CASE("fdb_transaction_get_mapped_range_restricted_to_serializable") {
|
|||
/* target_bytes */ 0,
|
||||
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
|
||||
/* iteration */ 0,
|
||||
/* matchIndex */ MATCH_INDEX_ALL,
|
||||
/* snapshot */ true, // Set snapshot to true
|
||||
/* reverse */ 0);
|
||||
ASSERT(result.err == error_code_unsupported_operation);
|
||||
|
@ -1041,6 +1135,7 @@ TEST_CASE("fdb_transaction_get_mapped_range_restricted_to_ryw_enable") {
|
|||
/* target_bytes */ 0,
|
||||
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
|
||||
/* iteration */ 0,
|
||||
/* matchIndex */ MATCH_INDEX_ALL,
|
||||
/* snapshot */ false,
|
||||
/* reverse */ 0);
|
||||
ASSERT(result.err == error_code_unsupported_operation);
|
||||
|
@ -1067,7 +1162,7 @@ TEST_CASE("fdb_transaction_get_mapped_range_fail_on_mapper_not_tuple") {
|
|||
};
|
||||
assertNotTuple(mapper);
|
||||
fdb::Transaction tr(db);
|
||||
auto result = getMappedIndexEntries(1, 3, tr, mapper);
|
||||
auto result = getMappedIndexEntries(1, 3, tr, mapper, MATCH_INDEX_ALL);
|
||||
ASSERT(result.err == error_code_mapper_not_tuple);
|
||||
}
|
||||
|
||||
|
@ -2021,15 +2116,17 @@ TEST_CASE("fdb_transaction_add_conflict_range") {
|
|||
TEST_CASE("special-key-space valid transaction ID") {
|
||||
auto value = get_value("\xff\xff/tracing/transaction_id", /* snapshot */ false, {});
|
||||
REQUIRE(value.has_value());
|
||||
uint64_t transaction_id = std::stoul(value.value());
|
||||
CHECK(transaction_id > 0);
|
||||
UID transaction_id = UID::fromString(value.value());
|
||||
CHECK(transaction_id.first() > 0);
|
||||
CHECK(transaction_id.second() > 0);
|
||||
}
|
||||
|
||||
TEST_CASE("special-key-space custom transaction ID") {
|
||||
fdb::Transaction tr(db);
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
while (1) {
|
||||
tr.set("\xff\xff/tracing/transaction_id", std::to_string(ULONG_MAX));
|
||||
UID randomTransactionID = UID(deterministicRandom()->randomUInt64(), deterministicRandom()->randomUInt64());
|
||||
tr.set("\xff\xff/tracing/transaction_id", randomTransactionID.toString());
|
||||
fdb::ValueFuture f1 = tr.get("\xff\xff/tracing/transaction_id",
|
||||
/* snapshot */ false);
|
||||
|
||||
|
@ -2046,8 +2143,8 @@ TEST_CASE("special-key-space custom transaction ID") {
|
|||
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
|
||||
|
||||
REQUIRE(out_present);
|
||||
uint64_t transaction_id = std::stoul(std::string(val, vallen));
|
||||
CHECK(transaction_id == ULONG_MAX);
|
||||
UID transaction_id = UID::fromString(val);
|
||||
CHECK(transaction_id == randomTransactionID);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2074,8 +2171,9 @@ TEST_CASE("special-key-space set transaction ID after write") {
|
|||
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
|
||||
|
||||
REQUIRE(out_present);
|
||||
uint64_t transaction_id = std::stoul(std::string(val, vallen));
|
||||
CHECK(transaction_id != 0);
|
||||
UID transaction_id = UID::fromString(val);
|
||||
CHECK(transaction_id.first() > 0);
|
||||
CHECK(transaction_id.second() > 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2140,7 +2238,9 @@ TEST_CASE("special-key-space tracing get range") {
|
|||
CHECK(out_count == 2);
|
||||
|
||||
CHECK(std::string((char*)out_kv[1].key, out_kv[1].key_length) == tracingBegin + "transaction_id");
|
||||
CHECK(std::stoul(std::string((char*)out_kv[1].value, out_kv[1].value_length)) > 0);
|
||||
UID transaction_id = UID::fromString(std::string((char*)out_kv[1].value));
|
||||
CHECK(transaction_id.first() > 0);
|
||||
CHECK(transaction_id.second() > 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -289,7 +289,7 @@ func (o NetworkOptions) SetDistributedClientTracer(param string) error {
|
|||
//
|
||||
// Parameter: Client directory for temporary files.
|
||||
func (o NetworkOptions) SetClientTmpDir(param string) error {
|
||||
return o.setOpt(90, []byte(param))
|
||||
return o.setOpt(91, []byte(param))
|
||||
}
|
||||
|
||||
// Set the size of the client location cache. Raising this value can boost performance in very large databases where clients access data in a near-random pattern. Defaults to 100000.
|
||||
|
|
|
@ -533,10 +533,14 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureMappedResults_Future
|
|||
FDBMappedKeyValue kvm = kvms[i];
|
||||
int kvm_count = kvm.getRange.m_size;
|
||||
|
||||
const int totalLengths = 4 + kvm_count * 2;
|
||||
// now it has 5 field, key, value, getRange.begin, getRange.end, boundaryAndExist
|
||||
// this needs to change if FDBMappedKeyValue definition is changed.
|
||||
const int totalFieldFDBMappedKeyValue = 5;
|
||||
|
||||
const int totalLengths = totalFieldFDBMappedKeyValue + kvm_count * 2;
|
||||
|
||||
int totalBytes = kvm.key.key_length + kvm.value.key_length + kvm.getRange.begin.key.key_length +
|
||||
kvm.getRange.end.key.key_length;
|
||||
kvm.getRange.end.key.key_length + sizeof(kvm.boundaryAndExist);
|
||||
for (int i = 0; i < kvm_count; i++) {
|
||||
auto kv = kvm.getRange.data[i];
|
||||
totalBytes += kv.key_length + kv.value_length;
|
||||
|
@ -580,6 +584,7 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureMappedResults_Future
|
|||
cpBytesAndLength(pByte, pLength, kvm.value);
|
||||
cpBytesAndLength(pByte, pLength, kvm.getRange.begin.key);
|
||||
cpBytesAndLength(pByte, pLength, kvm.getRange.end.key);
|
||||
cpBytesAndLengthInner(pByte, pLength, (uint8_t*)&(kvm.boundaryAndExist), sizeof(kvm.boundaryAndExist));
|
||||
for (int kvm_i = 0; kvm_i < kvm_count; kvm_i++) {
|
||||
auto kv = kvm.getRange.data[kvm_i];
|
||||
cpBytesAndLengthInner(pByte, pLength, kv.key, kv.key_length);
|
||||
|
@ -588,6 +593,7 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureMappedResults_Future
|
|||
}
|
||||
}
|
||||
// After native arrays are released
|
||||
// call public static method MappedKeyValue::fromBytes()
|
||||
jobject mkv = jenv->CallStaticObjectMethod(
|
||||
mapped_key_value_class, mapped_key_value_from_bytes, (jbyteArray)bytesArray, (jintArray)lengthArray);
|
||||
if (jenv->ExceptionOccurred())
|
||||
|
@ -960,6 +966,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
|
|||
jint targetBytes,
|
||||
jint streamingMode,
|
||||
jint iteration,
|
||||
jint matchIndex,
|
||||
jboolean snapshot,
|
||||
jboolean reverse) {
|
||||
if (!tPtr || !keyBeginBytes || !keyEndBytes || !mapperBytes) {
|
||||
|
@ -1007,6 +1014,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
|
|||
targetBytes,
|
||||
(FDBStreamingMode)streamingMode,
|
||||
iteration,
|
||||
matchIndex,
|
||||
snapshot,
|
||||
reverse);
|
||||
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
|
||||
|
|
|
@ -43,8 +43,8 @@ public class CycleMultiClientIntegrationTest {
|
|||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
// more write txn than validate txn, as parent thread waits only for validate txn.
|
||||
private static final int writeTxnCnt = 2000;
|
||||
private static final int validateTxnCnt = 1000;
|
||||
private static final int writeTxnCnt = 200;
|
||||
private static final int validateTxnCnt = 100;
|
||||
private static final int threadPerDB = 5;
|
||||
|
||||
private static final int cycleLength = 4;
|
||||
|
|
|
@ -192,12 +192,12 @@ class MappedRangeQueryIntegrationTest {
|
|||
|
||||
RangeQueryWithIndex mappedRangeQuery = (int begin, int end, Database db) -> db.run(tr -> {
|
||||
try {
|
||||
List<MappedKeyValue> kvs =
|
||||
tr.getMappedRange(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
|
||||
KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER,
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
|
||||
.asList()
|
||||
.get();
|
||||
List<MappedKeyValue> kvs = tr.getMappedRange(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
|
||||
KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER,
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED,
|
||||
FDBTransaction.MATCH_INDEX_ALL, false, StreamingMode.WANT_ALL)
|
||||
.asList()
|
||||
.get();
|
||||
Assertions.assertEquals(end - begin, kvs.size());
|
||||
|
||||
if (validate) {
|
||||
|
@ -208,7 +208,11 @@ class MappedRangeQueryIntegrationTest {
|
|||
assertByteArrayEquals(indexEntryKey(id), mappedKeyValue.getKey());
|
||||
assertByteArrayEquals(EMPTY, mappedKeyValue.getValue());
|
||||
assertByteArrayEquals(indexEntryKey(id), mappedKeyValue.getKey());
|
||||
|
||||
if (id == begin || id == end - 1) {
|
||||
Assertions.assertTrue(mappedKeyValue.getBoundaryAndExist());
|
||||
} else {
|
||||
Assertions.assertFalse(mappedKeyValue.getBoundaryAndExist());
|
||||
}
|
||||
byte[] prefix = recordKeyPrefix(id);
|
||||
assertByteArrayEquals(prefix, mappedKeyValue.getRangeBegin());
|
||||
prefix[prefix.length - 1] = (byte)0x01;
|
||||
|
|
|
@ -32,6 +32,12 @@ import com.apple.foundationdb.async.AsyncUtil;
|
|||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionConsumer {
|
||||
|
||||
static public final int MATCH_INDEX_ALL = 0;
|
||||
static public final int MATCH_INDEX_NONE = 1;
|
||||
static public final int MATCH_INDEX_MATCHED_ONLY = 2;
|
||||
static public final int MATCH_INDEX_UNMATCHED_ONLY = 3;
|
||||
|
||||
private final Database database;
|
||||
private final Executor executor;
|
||||
private final TransactionOptions options;
|
||||
|
@ -93,7 +99,8 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
|
||||
@Override
|
||||
public AsyncIterable<MappedKeyValue> getMappedRange(KeySelector begin, KeySelector end, byte[] mapper,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
int limit, int matchIndex, boolean reverse,
|
||||
StreamingMode mode) {
|
||||
|
||||
throw new UnsupportedOperationException("getMappedRange is only supported in serializable");
|
||||
}
|
||||
|
@ -346,12 +353,13 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
}
|
||||
|
||||
@Override
|
||||
public AsyncIterable<MappedKeyValue> getMappedRange(KeySelector begin, KeySelector end, byte[] mapper,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
public AsyncIterable<MappedKeyValue> getMappedRange(KeySelector begin, KeySelector end, byte[] mapper, int limit,
|
||||
int matchIndex, boolean reverse, StreamingMode mode) {
|
||||
if (mapper == null) {
|
||||
throw new IllegalArgumentException("Mapper must be non-null");
|
||||
}
|
||||
return new MappedRangeQuery(FDBTransaction.this, false, begin, end, mapper, limit, reverse, mode, eventKeeper);
|
||||
return new MappedRangeQuery(FDBTransaction.this, false, begin, end, mapper, limit, matchIndex, reverse, mode,
|
||||
eventKeeper);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
|
@ -456,7 +464,8 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
protected FutureMappedResults getMappedRange_internal(KeySelector begin, KeySelector end,
|
||||
byte[] mapper, // Nullable
|
||||
int rowLimit, int targetBytes, int streamingMode,
|
||||
int iteration, boolean isSnapshot, boolean reverse) {
|
||||
int iteration, boolean isSnapshot, boolean reverse,
|
||||
int matchIndex) {
|
||||
if (eventKeeper != null) {
|
||||
eventKeeper.increment(Events.JNI_CALL);
|
||||
}
|
||||
|
@ -467,9 +476,9 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode,
|
||||
iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/
|
||||
return new FutureMappedResults(
|
||||
Transaction_getMappedRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
|
||||
end.getKey(), end.orEqual(), end.getOffset(), mapper, rowLimit,
|
||||
targetBytes, streamingMode, iteration, isSnapshot, reverse),
|
||||
Transaction_getMappedRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(), end.getKey(),
|
||||
end.orEqual(), end.getOffset(), mapper, rowLimit, targetBytes, streamingMode,
|
||||
iteration, matchIndex, isSnapshot, reverse),
|
||||
FDB.instance().isDirectBufferQueriesEnabled(), executor, eventKeeper);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
|
@ -809,12 +818,11 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
byte[] keyEnd, boolean orEqualEnd, int offsetEnd,
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse);
|
||||
private native long Transaction_getMappedRange(long cPtr, byte[] keyBegin, boolean orEqualBegin,
|
||||
int offsetBegin, byte[] keyEnd, boolean orEqualEnd,
|
||||
int offsetEnd,
|
||||
byte[] mapper, // Nonnull
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse);
|
||||
private native long Transaction_getMappedRange(long cPtr, byte[] keyBegin, boolean orEqualBegin, int offsetBegin,
|
||||
byte[] keyEnd, boolean orEqualEnd, int offsetEnd,
|
||||
byte[] mapper, // Nonnull
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
int matchIndex, boolean isSnapshot, boolean reverse);
|
||||
private native void Transaction_addConflictRange(long cPtr,
|
||||
byte[] keyBegin, byte[] keyEnd, int conflictRangeType);
|
||||
private native void Transaction_set(long cPtr, byte[] key, byte[] value);
|
||||
|
|
|
@ -41,4 +41,8 @@ public class KeyArrayResult {
|
|||
keys.add(key);
|
||||
}
|
||||
}
|
||||
|
||||
public List<byte[]> getKeys() {
|
||||
return keys;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ package com.apple.foundationdb;
|
|||
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -31,24 +33,35 @@ public class MappedKeyValue extends KeyValue {
|
|||
private final byte[] rangeBegin;
|
||||
private final byte[] rangeEnd;
|
||||
private final List<KeyValue> rangeResult;
|
||||
private final int boundaryAndExist;
|
||||
|
||||
MappedKeyValue(byte[] key, byte[] value, byte[] rangeBegin, byte[] rangeEnd, List<KeyValue> rangeResult) {
|
||||
// now it has 5 field, key, value, getRange.begin, getRange.end, boundaryAndExist
|
||||
// this needs to change if FDBMappedKeyValue definition is changed.
|
||||
private static final int TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue = 5;
|
||||
|
||||
public MappedKeyValue(byte[] key, byte[] value, byte[] rangeBegin, byte[] rangeEnd, List<KeyValue> rangeResult,
|
||||
int boundaryAndExist) {
|
||||
super(key, value);
|
||||
this.rangeBegin = rangeBegin;
|
||||
this.rangeEnd = rangeEnd;
|
||||
this.rangeResult = rangeResult;
|
||||
this.boundaryAndExist = boundaryAndExist;
|
||||
}
|
||||
|
||||
public byte[] getRangeBegin() { return rangeBegin; }
|
||||
|
||||
public byte[] getRangeEnd() { return rangeEnd; }
|
||||
|
||||
public boolean getBoundaryAndExist() { return boundaryAndExist == 0 ? false : true; }
|
||||
|
||||
public List<KeyValue> getRangeResult() { return rangeResult; }
|
||||
|
||||
public static MappedKeyValue fromBytes(byte[] bytes, int[] lengths) {
|
||||
// Lengths include: key, value, rangeBegin, rangeEnd, count * (underlying key, underlying value)
|
||||
if (lengths.length < 4) {
|
||||
throw new IllegalArgumentException("There needs to be at least 4 lengths to cover the metadata");
|
||||
if (lengths.length < TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue) {
|
||||
throw new IllegalArgumentException("There needs to be at least " +
|
||||
TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue +
|
||||
" lengths to cover the metadata");
|
||||
}
|
||||
|
||||
Offset offset = new Offset();
|
||||
|
@ -56,18 +69,20 @@ public class MappedKeyValue extends KeyValue {
|
|||
byte[] value = takeBytes(offset, bytes, lengths);
|
||||
byte[] rangeBegin = takeBytes(offset, bytes, lengths);
|
||||
byte[] rangeEnd = takeBytes(offset, bytes, lengths);
|
||||
byte[] boundaryAndExistBytes = takeBytes(offset, bytes, lengths);
|
||||
int boundaryAndExist = ByteBuffer.wrap(boundaryAndExistBytes).order(ByteOrder.LITTLE_ENDIAN).getInt();
|
||||
|
||||
if ((lengths.length - 4) % 2 != 0) {
|
||||
if ((lengths.length - TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue) % 2 != 0) {
|
||||
throw new IllegalArgumentException("There needs to be an even number of lengths!");
|
||||
}
|
||||
int count = (lengths.length - 4) / 2;
|
||||
int count = (lengths.length - TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue) / 2;
|
||||
List<KeyValue> rangeResult = new ArrayList<>(count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
byte[] k = takeBytes(offset, bytes, lengths);
|
||||
byte[] v = takeBytes(offset, bytes, lengths);
|
||||
rangeResult.add(new KeyValue(k, v));
|
||||
}
|
||||
return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult);
|
||||
return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult, boundaryAndExist);
|
||||
}
|
||||
|
||||
static class Offset {
|
||||
|
@ -84,12 +99,36 @@ public class MappedKeyValue extends KeyValue {
|
|||
return b;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (obj == this)
|
||||
return true;
|
||||
if (!(obj instanceof MappedKeyValue))
|
||||
return false;
|
||||
|
||||
MappedKeyValue rhs = (MappedKeyValue) obj;
|
||||
return Arrays.equals(rangeBegin, rhs.rangeBegin)
|
||||
&& Arrays.equals(rangeEnd, rhs.rangeEnd)
|
||||
&& Objects.equals(rangeResult, rhs.rangeResult)
|
||||
&& boundaryAndExist == rhs.boundaryAndExist;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashForResult = rangeResult == null ? 0 : rangeResult.hashCode();
|
||||
return 17 +
|
||||
(29 * hashForResult + boundaryAndExist + 37 * Arrays.hashCode(rangeBegin) + Arrays.hashCode(rangeEnd));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("MappedKeyValue{");
|
||||
sb.append("rangeBegin=").append(ByteArrayUtil.printable(rangeBegin));
|
||||
sb.append(", rangeEnd=").append(ByteArrayUtil.printable(rangeEnd));
|
||||
sb.append(", rangeResult=").append(rangeResult);
|
||||
sb.append(", boundaryAndExist=").append(boundaryAndExist);
|
||||
sb.append('}');
|
||||
return super.toString() + "->" + sb.toString();
|
||||
}
|
||||
|
|
|
@ -53,18 +53,21 @@ class MappedRangeQuery implements AsyncIterable<MappedKeyValue> {
|
|||
private final byte[] mapper; // Nonnull
|
||||
private final boolean snapshot;
|
||||
private final int rowLimit;
|
||||
private final int matchIndex;
|
||||
private final boolean reverse;
|
||||
private final StreamingMode streamingMode;
|
||||
private final EventKeeper eventKeeper;
|
||||
|
||||
MappedRangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, byte[] mapper,
|
||||
int rowLimit, boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) {
|
||||
int rowLimit, int matchIndex, boolean reverse, StreamingMode streamingMode,
|
||||
EventKeeper eventKeeper) {
|
||||
this.tr = transaction;
|
||||
this.begin = begin;
|
||||
this.end = end;
|
||||
this.mapper = mapper;
|
||||
this.snapshot = isSnapshot;
|
||||
this.rowLimit = rowLimit;
|
||||
this.matchIndex = matchIndex;
|
||||
this.reverse = reverse;
|
||||
this.streamingMode = streamingMode;
|
||||
this.eventKeeper = eventKeeper;
|
||||
|
@ -88,14 +91,14 @@ class MappedRangeQuery implements AsyncIterable<MappedKeyValue> {
|
|||
|
||||
FutureMappedResults range =
|
||||
tr.getMappedRange_internal(this.begin, this.end, this.mapper, this.rowLimit, 0,
|
||||
StreamingMode.EXACT.code(), 1, this.snapshot, this.reverse);
|
||||
StreamingMode.EXACT.code(), 1, this.snapshot, this.reverse, this.matchIndex);
|
||||
return range.thenApply(result -> result.get().values).whenComplete((result, e) -> range.close());
|
||||
}
|
||||
|
||||
// If the streaming mode is not EXACT, simply collect the results of an
|
||||
// iteration into a list
|
||||
return AsyncUtil.collect(
|
||||
new MappedRangeQuery(tr, snapshot, begin, end, mapper, rowLimit, reverse, mode, eventKeeper),
|
||||
new MappedRangeQuery(tr, snapshot, begin, end, mapper, rowLimit, matchIndex, reverse, mode, eventKeeper),
|
||||
tr.getExecutor());
|
||||
}
|
||||
|
||||
|
@ -106,7 +109,7 @@ class MappedRangeQuery implements AsyncIterable<MappedKeyValue> {
|
|||
*/
|
||||
@Override
|
||||
public AsyncRangeIterator iterator() {
|
||||
return new AsyncRangeIterator(this.rowLimit, this.reverse, this.streamingMode);
|
||||
return new AsyncRangeIterator(this.rowLimit, this.matchIndex, this.reverse, this.streamingMode);
|
||||
}
|
||||
|
||||
private class AsyncRangeIterator implements AsyncIterator<MappedKeyValue> {
|
||||
|
@ -114,6 +117,7 @@ class MappedRangeQuery implements AsyncIterable<MappedKeyValue> {
|
|||
private final boolean rowsLimited;
|
||||
private final boolean reverse;
|
||||
private final StreamingMode streamingMode;
|
||||
private final int matchIndex;
|
||||
|
||||
// There is the chance for parallelism in the two "chunks" for fetched data
|
||||
private MappedRangeResult chunk = null;
|
||||
|
@ -131,12 +135,13 @@ class MappedRangeQuery implements AsyncIterable<MappedKeyValue> {
|
|||
private CompletableFuture<Boolean> nextFuture;
|
||||
private boolean isCancelled = false;
|
||||
|
||||
private AsyncRangeIterator(int rowLimit, boolean reverse, StreamingMode streamingMode) {
|
||||
private AsyncRangeIterator(int rowLimit, int matchIndex, boolean reverse, StreamingMode streamingMode) {
|
||||
this.begin = MappedRangeQuery.this.begin;
|
||||
this.end = MappedRangeQuery.this.end;
|
||||
this.rowsLimited = rowLimit != 0;
|
||||
this.rowsRemaining = rowLimit;
|
||||
this.reverse = reverse;
|
||||
this.matchIndex = matchIndex;
|
||||
this.streamingMode = streamingMode;
|
||||
|
||||
startNextFetch();
|
||||
|
@ -217,8 +222,9 @@ class MappedRangeQuery implements AsyncIterable<MappedKeyValue> {
|
|||
|
||||
nextFuture = new CompletableFuture<>();
|
||||
final long sTime = System.nanoTime();
|
||||
fetchingChunk = tr.getMappedRange_internal(begin, end, mapper, rowsLimited ? rowsRemaining : 0, 0,
|
||||
streamingMode.code(), ++iteration, snapshot, reverse);
|
||||
fetchingChunk =
|
||||
tr.getMappedRange_internal(begin, end, mapper, rowsLimited ? rowsRemaining : 0, 0, streamingMode.code(),
|
||||
++iteration, snapshot, reverse, matchIndex);
|
||||
|
||||
BiConsumer<MappedRangeResultInfo, Throwable> cons = new FetchComplete(fetchingChunk, nextFuture);
|
||||
if (eventKeeper != null) {
|
||||
|
|
|
@ -51,6 +51,8 @@ class MappedRangeResultDirectBufferIterator extends DirectBufferIterator impleme
|
|||
final byte[] value = getString();
|
||||
final byte[] rangeBegin = getString();
|
||||
final byte[] rangeEnd = getString();
|
||||
final byte[] boundaryAndExistBytes = getString();
|
||||
final int boundaryAndExist = ByteBuffer.wrap(boundaryAndExistBytes).getInt();
|
||||
final int rangeResultSize = byteBuffer.getInt();
|
||||
List<KeyValue> rangeResult = new ArrayList();
|
||||
for (int i = 0; i < rangeResultSize; i++) {
|
||||
|
@ -59,7 +61,7 @@ class MappedRangeResultDirectBufferIterator extends DirectBufferIterator impleme
|
|||
rangeResult.add(new KeyValue(k, v));
|
||||
}
|
||||
current += 1;
|
||||
return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult);
|
||||
return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult, boundaryAndExist);
|
||||
}
|
||||
|
||||
private byte[] getString() {
|
||||
|
|
|
@ -433,7 +433,9 @@ public interface ReadTransaction extends ReadTransactionContext {
|
|||
*
|
||||
* @param begin the beginning of the range (inclusive)
|
||||
* @param end the end of the range (exclusive)
|
||||
* @param mapper TODO
|
||||
* @param mapper defines how to map a key-value pair (one of the key-value pairs got
|
||||
* from the first range query) to a GetRange (or GetValue) request.
|
||||
* more details: https://github.com/apple/foundationdb/wiki/Everything-about-GetMappedRange
|
||||
* @param limit the maximum number of results to return. Limits results to the
|
||||
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
|
||||
* should not limit the number of results. If {@code reverse} is {@code true} rows
|
||||
|
@ -458,7 +460,7 @@ public interface ReadTransaction extends ReadTransactionContext {
|
|||
* @return a handle to access the results of the asynchronous call
|
||||
*/
|
||||
AsyncIterable<MappedKeyValue> getMappedRange(KeySelector begin, KeySelector end, byte[] mapper, int limit,
|
||||
boolean reverse, StreamingMode mode);
|
||||
int matchIndex, boolean reverse, StreamingMode mode);
|
||||
|
||||
/**
|
||||
* Gets an estimate for the number of bytes stored in the given range.
|
||||
|
|
|
@ -22,6 +22,7 @@ package com.apple.foundationdb;
|
|||
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import java.util.concurrent.Executor;
|
||||
|
@ -210,5 +211,109 @@ public class TenantManagement {
|
|||
return deleteTenant(db, tenantName.pack());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Lists all tenants in between the range specified. The number of tenants listed can be restricted.
|
||||
*
|
||||
* @param db The database used to create a transaction for listing the tenants.
|
||||
* @param begin The beginning of the range of tenants to list.
|
||||
* @param end The end of the range of the tenants to list.
|
||||
* @param limit The maximum number of tenants to return from this request.
|
||||
* @return an iterator where each item is a KeyValue object where the key is the tenant name
|
||||
* and the value is the unprocessed JSON string containing the tenant's metadata
|
||||
*/
|
||||
public static CloseableAsyncIterator<KeyValue> listTenants(Database db, byte[] begin, byte[] end, int limit) {
|
||||
return listTenants_internal(db.createTransaction(), begin, end, limit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists all tenants in between the range specified. The number of tenants listed can be restricted.
|
||||
* This is a convenience method that generates the begin and end ranges by packing two {@code Tuple}s.
|
||||
*
|
||||
* @param db The database used to create a transaction for listing the tenants.
|
||||
* @param begin The beginning of the range of tenants to list.
|
||||
* @param end The end of the range of the tenants to list.
|
||||
* @param limit The maximum number of tenants to return from this request.
|
||||
* @return an iterator where each item is a KeyValue object where the key is the tenant name
|
||||
* and the value is the unprocessed JSON string containing the tenant's metadata
|
||||
*/
|
||||
public static CloseableAsyncIterator<KeyValue> listTenants(Database db, Tuple begin, Tuple end, int limit) {
|
||||
return listTenants_internal(db.createTransaction(), begin.pack(), end.pack(), limit);
|
||||
}
|
||||
|
||||
private static CloseableAsyncIterator<KeyValue> listTenants_internal(Transaction tr, byte[] begin, byte[] end,
|
||||
int limit) {
|
||||
return new TenantAsyncIterator(tr, begin, end, limit);
|
||||
}
|
||||
|
||||
// Templates taken from BoundaryIterator LocalityUtil.java
|
||||
static class TenantAsyncIterator implements CloseableAsyncIterator<KeyValue> {
|
||||
Transaction tr;
|
||||
final byte[] begin;
|
||||
final byte[] end;
|
||||
|
||||
final AsyncIterable<KeyValue> firstGet;
|
||||
AsyncIterator<KeyValue> iter;
|
||||
private boolean closed;
|
||||
|
||||
TenantAsyncIterator(Transaction tr, byte[] begin, byte[] end, int limit) {
|
||||
this.tr = tr;
|
||||
|
||||
this.begin = ByteArrayUtil.join(TENANT_MAP_PREFIX, begin);
|
||||
this.end = ByteArrayUtil.join(TENANT_MAP_PREFIX, end);
|
||||
|
||||
tr.options().setReadSystemKeys();
|
||||
tr.options().setLockAware();
|
||||
|
||||
firstGet = tr.getRange(this.begin, this.end, limit);
|
||||
iter = firstGet.iterator();
|
||||
closed = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Boolean> onHasNext() {
|
||||
return iter.onHasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return iter.hasNext();
|
||||
}
|
||||
@Override
|
||||
public KeyValue next() {
|
||||
KeyValue kv = iter.next();
|
||||
byte[] tenant = Arrays.copyOfRange(kv.getKey(), TENANT_MAP_PREFIX.length, kv.getKey().length);
|
||||
byte[] value = kv.getValue();
|
||||
|
||||
KeyValue result = new KeyValue(tenant, value);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException("Tenant lists are read-only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
TenantAsyncIterator.this.tr.close();
|
||||
closed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
try {
|
||||
if (FDB.instance().warnOnUnclosed && !closed) {
|
||||
System.err.println("CloseableAsyncIterator not closed (listTenants)");
|
||||
}
|
||||
if (!closed) {
|
||||
close();
|
||||
}
|
||||
} finally {
|
||||
super.finalize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private TenantManagement() {}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ import com.apple.foundationdb.Transaction;
|
|||
import com.apple.foundationdb.async.AsyncUtil;
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
import com.apple.foundationdb.async.CloseableAsyncIterator;
|
||||
|
||||
public class AsyncStackTester {
|
||||
static final String DIRECTORY_PREFIX = "DIRECTORY_";
|
||||
|
@ -483,6 +484,26 @@ public class AsyncStackTester {
|
|||
inst.push(TenantManagement.deleteTenant(inst.context.db, tenantName));
|
||||
}, FDB.DEFAULT_EXECUTOR);
|
||||
}
|
||||
else if (op == StackOperation.TENANT_LIST) {
|
||||
return inst.popParams(3).thenAcceptAsync(params -> {
|
||||
byte[] begin = (byte[])params.get(0);
|
||||
byte[] end = (byte[])params.get(1);
|
||||
int limit = StackUtils.getInt(params.get(2));
|
||||
CloseableAsyncIterator<KeyValue> tenantIter = TenantManagement.listTenants(inst.context.db, begin, end, limit);
|
||||
List<byte[]> result = new ArrayList();
|
||||
try {
|
||||
while (tenantIter.hasNext()) {
|
||||
KeyValue next = tenantIter.next();
|
||||
String metadata = new String(next.getValue());
|
||||
assert StackUtils.validTenantMetadata(metadata) : "Invalid Tenant Metadata";
|
||||
result.add(next.getKey());
|
||||
}
|
||||
} finally {
|
||||
tenantIter.close();
|
||||
}
|
||||
inst.push(Tuple.fromItems(result).pack());
|
||||
}, FDB.DEFAULT_EXECUTOR);
|
||||
}
|
||||
else if (op == StackOperation.TENANT_SET_ACTIVE) {
|
||||
return inst.popParam().thenAcceptAsync(param -> {
|
||||
byte[] tenantName = (byte[])param;
|
||||
|
@ -493,7 +514,7 @@ public class AsyncStackTester {
|
|||
inst.context.setTenant(Optional.empty());
|
||||
return AsyncUtil.DONE;
|
||||
}
|
||||
else if(op == StackOperation.UNIT_TESTS) {
|
||||
else if (op == StackOperation.UNIT_TESTS) {
|
||||
inst.context.db.options().setLocationCacheSize(100001);
|
||||
return inst.context.db.runAsync(tr -> {
|
||||
FDB fdb = FDB.instance();
|
||||
|
@ -568,7 +589,7 @@ public class AsyncStackTester {
|
|||
throw new RuntimeException("Unit tests failed: " + t.getMessage());
|
||||
});
|
||||
}
|
||||
else if(op == StackOperation.LOG_STACK) {
|
||||
else if (op == StackOperation.LOG_STACK) {
|
||||
return inst.popParam().thenComposeAsync(prefix -> doLogStack(inst, (byte[])prefix), FDB.DEFAULT_EXECUTOR);
|
||||
}
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ enum StackOperation {
|
|||
// Tenants
|
||||
TENANT_CREATE,
|
||||
TENANT_DELETE,
|
||||
TENANT_LIST,
|
||||
TENANT_SET_ACTIVE,
|
||||
TENANT_CLEAR_ACTIVE,
|
||||
|
||||
|
|
|
@ -429,6 +429,25 @@ public class StackTester {
|
|||
byte[] tenantName = (byte[])inst.popParam().join();
|
||||
inst.push(TenantManagement.deleteTenant(inst.context.db, tenantName));
|
||||
}
|
||||
else if (op == StackOperation.TENANT_LIST) {
|
||||
List<Object> params = inst.popParams(3).join();
|
||||
byte[] begin = (byte[])params.get(0);
|
||||
byte[] end = (byte[])params.get(1);
|
||||
int limit = StackUtils.getInt(params.get(2));
|
||||
CloseableAsyncIterator<KeyValue> tenantIter = TenantManagement.listTenants(inst.context.db, begin, end, limit);
|
||||
List<byte[]> result = new ArrayList();
|
||||
try {
|
||||
while (tenantIter.hasNext()) {
|
||||
KeyValue next = tenantIter.next();
|
||||
String metadata = new String(next.getValue());
|
||||
assert StackUtils.validTenantMetadata(metadata) : "Invalid Tenant Metadata";
|
||||
result.add(next.getKey());
|
||||
}
|
||||
} finally {
|
||||
tenantIter.close();
|
||||
}
|
||||
inst.push(Tuple.fromItems(result).pack());
|
||||
}
|
||||
else if (op == StackOperation.TENANT_SET_ACTIVE) {
|
||||
byte[] tenantName = (byte[])inst.popParam().join();
|
||||
inst.context.setTenant(Optional.of(tenantName));
|
||||
|
@ -436,7 +455,7 @@ public class StackTester {
|
|||
else if (op == StackOperation.TENANT_CLEAR_ACTIVE) {
|
||||
inst.context.setTenant(Optional.empty());
|
||||
}
|
||||
else if(op == StackOperation.UNIT_TESTS) {
|
||||
else if (op == StackOperation.UNIT_TESTS) {
|
||||
try {
|
||||
inst.context.db.options().setLocationCacheSize(100001);
|
||||
inst.context.db.run(tr -> {
|
||||
|
@ -514,7 +533,7 @@ public class StackTester {
|
|||
throw new RuntimeException("Unit tests failed: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
else if(op == StackOperation.LOG_STACK) {
|
||||
else if (op == StackOperation.LOG_STACK) {
|
||||
List<Object> params = inst.popParams(1).join();
|
||||
byte[] prefix = (byte[]) params.get(0);
|
||||
|
||||
|
|
|
@ -67,6 +67,13 @@ public class StackUtils {
|
|||
return item;
|
||||
}
|
||||
|
||||
// Without a JSON parsing library, we try to validate that the metadata consists
|
||||
// of a select few properties using simple string comparison
|
||||
static boolean validTenantMetadata(String metadata) {
|
||||
return (metadata.charAt(0) == '{' && metadata.charAt(metadata.length() - 1) == '}' && metadata.contains("id") &&
|
||||
metadata.contains("prefix"));
|
||||
}
|
||||
|
||||
////////////////////////
|
||||
// Utilities for forcing Objects into various types
|
||||
////////////////////////
|
||||
|
|
|
@ -71,6 +71,11 @@ import types
|
|||
import struct
|
||||
|
||||
|
||||
def remove_prefix(text, prefix):
|
||||
if text.startswith(prefix):
|
||||
return text[len(prefix):]
|
||||
return text
|
||||
|
||||
def option_wrap(code):
|
||||
def setfunc(self):
|
||||
self._parent._set_option(code, None, 0)
|
||||
|
|
|
@ -78,6 +78,39 @@ def _delete_tenant_impl(tr, tenant_name, existence_check_marker, force_existence
|
|||
|
||||
del tr[key]
|
||||
|
||||
class FDBTenantList(object):
|
||||
"""Iterates over the results of list_tenants query. Returns
|
||||
KeyValue objects.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, rangeresult):
|
||||
self._range = rangeresult
|
||||
self._iter = iter(self._range)
|
||||
|
||||
def to_list(self):
|
||||
return list(self.__iter__())
|
||||
|
||||
def __iter__(self):
|
||||
for next_item in self._iter:
|
||||
tenant_name = _impl.remove_prefix(next_item.key, _tenant_map_prefix)
|
||||
yield _impl.KeyValue(tenant_name, next_item.value)
|
||||
|
||||
# Lists the tenants created in the cluster, specified by the begin and end range.
|
||||
# Also limited in number of results by the limit parameter.
|
||||
# Returns an iterable object that yields KeyValue objects
|
||||
# where the keys are the tenant names and the values are the unprocessed
|
||||
# JSON strings of the tenant metadata
|
||||
@_impl.transactional
|
||||
def _list_tenants_impl(tr, begin, end, limit):
|
||||
tr.options.set_read_system_keys()
|
||||
begin_key = b'%s%s' % (_tenant_map_prefix, begin)
|
||||
end_key = b'%s%s' % (_tenant_map_prefix, end)
|
||||
|
||||
rangeresult = tr.get_range(begin_key, end_key, limit)
|
||||
|
||||
return FDBTenantList(rangeresult)
|
||||
|
||||
def create_tenant(db_or_tr, tenant_name):
|
||||
tenant_name = _impl.process_tenant_name(tenant_name)
|
||||
|
||||
|
@ -93,3 +126,9 @@ def delete_tenant(db_or_tr, tenant_name):
|
|||
# Callers using a transaction are expected to check existence themselves if required
|
||||
existence_check_marker = [] if not isinstance(db_or_tr, _impl.TransactionRead) else [None]
|
||||
_delete_tenant_impl(db_or_tr, tenant_name, existence_check_marker)
|
||||
|
||||
def list_tenants(db_or_tr, begin, end, limit):
|
||||
begin = _impl.process_tenant_name(begin)
|
||||
end = _impl.process_tenant_name(end)
|
||||
|
||||
return _list_tenants_impl(db_or_tr, begin, end, limit)
|
||||
|
|
|
@ -215,6 +215,26 @@ def kill(logger):
|
|||
assert new_generation > old_generation
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def killall(logger):
|
||||
# test is designed to make sure 'kill all' sends all requests simultaneously
|
||||
old_generation = get_value_from_status_json(False, 'cluster', 'generation')
|
||||
# This is currently an issue with fdbcli,
|
||||
# where you need to first run 'kill' to initialize processes' list
|
||||
# and then specify the certain process to kill
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
output, error = process.communicate(input='kill; kill all; sleep 1\n'.encode())
|
||||
logger.debug(output)
|
||||
# wait for a second for the cluster recovery
|
||||
time.sleep(1)
|
||||
new_generation = get_value_from_status_json(True, 'cluster', 'generation')
|
||||
logger.debug("Old generation: {}, New generation: {}".format(old_generation, new_generation))
|
||||
# Make sure the kill is not happening sequentially
|
||||
# Pre: each recovery will increase the generated number by 2
|
||||
# Relax the condition to allow one additional recovery happening when we fetched the old generation
|
||||
assert new_generation <= (old_generation + 4)
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def suspend(logger):
|
||||
if not shutil.which("pidof"):
|
||||
|
@ -260,43 +280,31 @@ def suspend(logger):
|
|||
assert get_value_from_status_json(False, 'client', 'database_status', 'available')
|
||||
|
||||
|
||||
def extract_version_epoch(cli_output):
|
||||
return int(cli_output.split("\n")[-1].split(" ")[-1])
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def targetversion(logger):
|
||||
version1 = run_fdbcli_command('targetversion getepoch')
|
||||
def versionepoch(logger):
|
||||
version1 = run_fdbcli_command('versionepoch')
|
||||
assert version1 == "Version epoch is unset"
|
||||
version2 = int(run_fdbcli_command('getversion'))
|
||||
logger.debug("read version: {}".format(version2))
|
||||
assert version2 >= 0
|
||||
# set the version epoch to the default value
|
||||
logger.debug("setting version epoch to default")
|
||||
run_fdbcli_command('targetversion add 0')
|
||||
# get the version epoch
|
||||
versionepoch1 = extract_version_epoch(run_fdbcli_command('targetversion getepoch'))
|
||||
logger.debug("version epoch: {}".format(versionepoch1))
|
||||
# make sure the version increased
|
||||
version3 = int(run_fdbcli_command('getversion'))
|
||||
logger.debug("read version: {}".format(version3))
|
||||
assert version3 >= version2
|
||||
# slightly increase the version epoch
|
||||
versionepoch2 = extract_version_epoch(run_fdbcli_command("targetversion setepoch {}".format(versionepoch1 + 1000000)))
|
||||
logger.debug("version epoch: {}".format(versionepoch2))
|
||||
assert versionepoch2 == versionepoch1 + 1000000
|
||||
# slightly decrease the version epoch
|
||||
versionepoch3 = extract_version_epoch(run_fdbcli_command("targetversion add {}".format(-1000000)))
|
||||
logger.debug("version epoch: {}".format(versionepoch3))
|
||||
assert versionepoch3 == versionepoch2 - 1000000 == versionepoch1
|
||||
# the versions should still be increasing
|
||||
version4 = int(run_fdbcli_command('getversion'))
|
||||
logger.debug("read version: {}".format(version4))
|
||||
assert version4 >= version3
|
||||
# clear the version epoch and make sure it is now unset
|
||||
run_fdbcli_command("targetversion clearepoch")
|
||||
version5 = run_fdbcli_command('targetversion getepoch')
|
||||
assert version5 == "Version epoch is unset"
|
||||
version2 = run_fdbcli_command('versionepoch get')
|
||||
assert version2 == "Version epoch is unset"
|
||||
version3 = run_fdbcli_command('versionepoch commit')
|
||||
assert version3 == "Must set the version epoch before committing it (see `versionepoch enable`)"
|
||||
version4 = run_fdbcli_command('versionepoch enable')
|
||||
assert version4 == "Version epoch enabled. Run `versionepoch commit` to irreversibly jump to the target version"
|
||||
version5 = run_fdbcli_command('versionepoch get')
|
||||
assert version5 == "Current version epoch is 0"
|
||||
version6 = run_fdbcli_command('versionepoch set 10')
|
||||
assert version6 == "Version epoch enabled. Run `versionepoch commit` to irreversibly jump to the target version"
|
||||
version7 = run_fdbcli_command('versionepoch get')
|
||||
assert version7 == "Current version epoch is 10"
|
||||
run_fdbcli_command('versionepoch disable')
|
||||
version8 = run_fdbcli_command('versionepoch get')
|
||||
assert version8 == "Version epoch is unset"
|
||||
version9 = run_fdbcli_command('versionepoch enable')
|
||||
assert version9 == "Version epoch enabled. Run `versionepoch commit` to irreversibly jump to the target version"
|
||||
version10 = run_fdbcli_command('versionepoch get')
|
||||
assert version10 == "Current version epoch is 0"
|
||||
version11 = run_fdbcli_command('versionepoch commit')
|
||||
assert version11.startswith("Current read version is ")
|
||||
|
||||
|
||||
def get_value_from_status_json(retry, *args):
|
||||
|
@ -582,6 +590,7 @@ def triggerddteaminfolog(logger):
|
|||
output = run_fdbcli_command('triggerddteaminfolog')
|
||||
assert output == 'Triggered team info logging in data distribution.'
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def tenants(logger):
|
||||
output = run_fdbcli_command('listtenants')
|
||||
|
@ -610,7 +619,7 @@ def tenants(logger):
|
|||
assert len(lines) == 2
|
||||
assert lines[0].strip().startswith('id: ')
|
||||
assert lines[1].strip().startswith('prefix: ')
|
||||
|
||||
|
||||
output = run_fdbcli_command('usetenant')
|
||||
assert output == 'Using the default tenant'
|
||||
|
||||
|
@ -652,7 +661,8 @@ def tenants(logger):
|
|||
assert lines[3] == '`tenant_test\' is `default_tenant\''
|
||||
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
|
||||
cmd_sequence = ['writemode on', 'usetenant tenant', 'clear tenant_test', 'deletetenant tenant', 'get tenant_test', 'defaulttenant', 'usetenant tenant']
|
||||
cmd_sequence = ['writemode on', 'usetenant tenant', 'clear tenant_test',
|
||||
'deletetenant tenant', 'get tenant_test', 'defaulttenant', 'usetenant tenant']
|
||||
output, error_output = process.communicate(input='\n'.join(cmd_sequence).encode())
|
||||
|
||||
lines = output.decode().strip().split('\n')[-7:]
|
||||
|
@ -680,6 +690,7 @@ def tenants(logger):
|
|||
|
||||
run_fdbcli_command('writemode on; clear tenant_test')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||
description="""
|
||||
|
@ -724,12 +735,11 @@ if __name__ == '__main__':
|
|||
throttle()
|
||||
triggerddteaminfolog()
|
||||
tenants()
|
||||
# TODO: similar to advanceversion, this seems to cause some issues, so disable for now
|
||||
# This must go last, otherwise the version advancement can mess with the other tests
|
||||
# targetversion()
|
||||
versionepoch()
|
||||
else:
|
||||
assert args.process_number > 1, "Process number should be positive"
|
||||
coordinators()
|
||||
exclude()
|
||||
killall()
|
||||
# TODO: fix the failure where one process is not available after setclass call
|
||||
#setclass()
|
||||
# setclass()
|
||||
|
|
|
@ -59,6 +59,18 @@ def test_tenant_operations(db):
|
|||
fdb.tenant_management.create_tenant(db, b'tenant1')
|
||||
fdb.tenant_management.create_tenant(db, b'tenant2')
|
||||
|
||||
tenant_list = fdb.tenant_management.list_tenants(db, b'a', b'z', 10).to_list()
|
||||
assert tenant_list[0].key == b'tenant1'
|
||||
assert tenant_list[1].key == b'tenant2'
|
||||
|
||||
t1_entry = tenant_list[0].value
|
||||
t1_json = json.loads(t1_entry)
|
||||
p1 = t1_json['prefix'].encode('utf8')
|
||||
|
||||
t2_entry = tenant_list[1].value
|
||||
t2_json = json.loads(t2_entry)
|
||||
p2 = t2_json['prefix'].encode('utf8')
|
||||
|
||||
tenant1 = db.open_tenant(b'tenant1')
|
||||
tenant2 = db.open_tenant(b'tenant2')
|
||||
|
||||
|
@ -69,10 +81,12 @@ def test_tenant_operations(db):
|
|||
tenant1_entry = db[b'\xff\xff/management/tenant_map/tenant1']
|
||||
tenant1_json = json.loads(tenant1_entry)
|
||||
prefix1 = tenant1_json['prefix'].encode('utf8')
|
||||
assert prefix1 == p1
|
||||
|
||||
tenant2_entry = db[b'\xff\xff/management/tenant_map/tenant2']
|
||||
tenant2_json = json.loads(tenant2_entry)
|
||||
prefix2 = tenant2_json['prefix'].encode('utf8')
|
||||
assert prefix2 == p2
|
||||
|
||||
assert tenant1[b'tenant_test_key'] == b'tenant1'
|
||||
assert db[prefix1 + b'tenant_test_key'] == b'tenant1'
|
||||
|
|
|
@ -30,6 +30,7 @@ import time
|
|||
import random
|
||||
import time
|
||||
import traceback
|
||||
import json
|
||||
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
|
||||
import fdb
|
||||
|
@ -604,6 +605,19 @@ class Tester:
|
|||
self.tenant = self.db.open_tenant(name)
|
||||
elif inst.op == six.u("TENANT_CLEAR_ACTIVE"):
|
||||
self.tenant = None
|
||||
elif inst.op == six.u("TENANT_LIST"):
|
||||
begin, end, limit = inst.pop(3)
|
||||
tenant_list = fdb.tenant_management.list_tenants(self.db, begin, end, limit)
|
||||
result = []
|
||||
for tenant in tenant_list:
|
||||
result += [tenant.key]
|
||||
try:
|
||||
metadata = json.loads(tenant.value)
|
||||
id = metadata["id"]
|
||||
prefix = metadata["prefix"]
|
||||
except (json.decoder.JSONDecodeError, KeyError) as e:
|
||||
assert False, "Invalid Tenant Metadata"
|
||||
inst.push(fdb.tuple.pack(tuple(result)))
|
||||
elif inst.op == six.u("UNIT_TESTS"):
|
||||
try:
|
||||
test_db_options(db)
|
||||
|
|
|
@ -404,8 +404,7 @@ endfunction()
|
|||
|
||||
# Creates a single cluster before running the specified command (usually a ctest test)
|
||||
function(add_fdbclient_test)
|
||||
set(options DISABLED ENABLED DISABLE_LOG_DUMP)
|
||||
set(options DISABLED ENABLED API_TEST_BLOB_GRANULES_ENABLED)
|
||||
set(options DISABLED ENABLED DISABLE_LOG_DUMP API_TEST_BLOB_GRANULES_ENABLED TLS_ENABLED)
|
||||
set(oneValueArgs NAME PROCESS_NUMBER TEST_TIMEOUT WORKING_DIRECTORY)
|
||||
set(multiValueArgs COMMAND)
|
||||
cmake_parse_arguments(T "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
|
@ -435,6 +434,9 @@ function(add_fdbclient_test)
|
|||
if(T_API_TEST_BLOB_GRANULES_ENABLED)
|
||||
list(APPEND TMP_CLUSTER_CMD --blob-granules-enabled)
|
||||
endif()
|
||||
if(T_TLS_ENABLED)
|
||||
list(APPEND TMP_CLUSTER_CMD --tls-enabled)
|
||||
endif()
|
||||
message(STATUS "Adding Client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
WORKING_DIRECTORY ${T_WORKING_DIRECTORY}
|
||||
|
|
|
@ -9,7 +9,7 @@ function(compile_boost)
|
|||
|
||||
# Configure bootstrap command
|
||||
set(BOOTSTRAP_COMMAND "./bootstrap.sh")
|
||||
set(BOOTSTRAP_LIBRARIES "context")
|
||||
set(BOOTSTRAP_LIBRARIES "context,filesystem")
|
||||
|
||||
set(BOOST_CXX_COMPILER "${CMAKE_CXX_COMPILER}")
|
||||
if(CLANG)
|
||||
|
@ -57,15 +57,20 @@ function(compile_boost)
|
|||
INSTALL_COMMAND ""
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_BYPRODUCTS "${BOOST_INSTALL_DIR}/boost/config.hpp"
|
||||
"${BOOST_INSTALL_DIR}/lib/libboost_context.a")
|
||||
"${BOOST_INSTALL_DIR}/lib/libboost_context.a"
|
||||
"${BOOST_INSTALL_DIR}/lib/libboost_filesystem.a")
|
||||
|
||||
add_library(${COMPILE_BOOST_TARGET}_context STATIC IMPORTED)
|
||||
add_dependencies(${COMPILE_BOOST_TARGET}_context ${COMPILE_BOOST_TARGET}Project)
|
||||
set_target_properties(${COMPILE_BOOST_TARGET}_context PROPERTIES IMPORTED_LOCATION "${BOOST_INSTALL_DIR}/lib/libboost_context.a")
|
||||
|
||||
add_library(${COMPILE_BOOST_TARGET}_filesystem STATIC IMPORTED)
|
||||
add_dependencies(${COMPILE_BOOST_TARGET}_filesystem ${COMPILE_BOOST_TARGET}Project)
|
||||
set_target_properties(${COMPILE_BOOST_TARGET}_filesystem PROPERTIES IMPORTED_LOCATION "${BOOST_INSTALL_DIR}/lib/libboost_filesystem.a")
|
||||
|
||||
add_library(${COMPILE_BOOST_TARGET} INTERFACE)
|
||||
target_include_directories(${COMPILE_BOOST_TARGET} SYSTEM INTERFACE ${BOOST_INSTALL_DIR}/include)
|
||||
target_link_libraries(${COMPILE_BOOST_TARGET} INTERFACE ${COMPILE_BOOST_TARGET}_context)
|
||||
target_link_libraries(${COMPILE_BOOST_TARGET} INTERFACE ${COMPILE_BOOST_TARGET}_context ${COMPILE_BOOST_TARGET}_filesystem)
|
||||
|
||||
endfunction(compile_boost)
|
||||
|
||||
|
@ -91,11 +96,11 @@ set(Boost_USE_STATIC_LIBS ON)
|
|||
if (UNIX AND CMAKE_CXX_COMPILER_ID MATCHES "Clang$")
|
||||
list(APPEND CMAKE_PREFIX_PATH /opt/boost_1_78_0_clang)
|
||||
set(BOOST_HINT_PATHS /opt/boost_1_78_0_clang)
|
||||
message(STATUS "Using Clang version of boost::context")
|
||||
message(STATUS "Using Clang version of boost::context and boost::filesystem")
|
||||
else ()
|
||||
list(APPEND CMAKE_PREFIX_PATH /opt/boost_1_78_0)
|
||||
set(BOOST_HINT_PATHS /opt/boost_1_78_0)
|
||||
message(STATUS "Using g++ version of boost::context")
|
||||
message(STATUS "Using g++ version of boost::context and boost::filesystem")
|
||||
endif ()
|
||||
|
||||
if(BOOST_ROOT)
|
||||
|
@ -107,18 +112,18 @@ if(WIN32)
|
|||
# properly for config mode. So we use the old way on Windows
|
||||
# find_package(Boost 1.72.0 EXACT QUIET REQUIRED CONFIG PATHS ${BOOST_HINT_PATHS})
|
||||
# I think depending on the cmake version this will cause weird warnings
|
||||
find_package(Boost 1.72)
|
||||
find_package(Boost 1.72 COMPONENTS filesystem)
|
||||
add_library(boost_target INTERFACE)
|
||||
target_link_libraries(boost_target INTERFACE Boost::boost)
|
||||
target_link_libraries(boost_target INTERFACE Boost::boost Boost::filesystem)
|
||||
return()
|
||||
endif()
|
||||
|
||||
find_package(Boost 1.78.0 EXACT QUIET COMPONENTS context CONFIG PATHS ${BOOST_HINT_PATHS})
|
||||
find_package(Boost 1.78.0 EXACT QUIET COMPONENTS context filesystem CONFIG PATHS ${BOOST_HINT_PATHS})
|
||||
set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores any installed boost")
|
||||
|
||||
if(Boost_FOUND AND NOT FORCE_BOOST_BUILD)
|
||||
if(Boost_FOUND AND Boost_filesystem_FOUND AND Boost_context_FOUND AND NOT FORCE_BOOST_BUILD)
|
||||
add_library(boost_target INTERFACE)
|
||||
target_link_libraries(boost_target INTERFACE Boost::boost Boost::context)
|
||||
target_link_libraries(boost_target INTERFACE Boost::boost Boost::context Boost::filesystem)
|
||||
elseif(WIN32)
|
||||
message(FATAL_ERROR "Could not find Boost")
|
||||
else()
|
||||
|
|
|
@ -20,41 +20,36 @@ endif()
|
|||
|
||||
include(CheckSymbolExists)
|
||||
|
||||
set(DISABLE_TLS OFF CACHE BOOL "Don't try to find OpenSSL and always build without TLS support")
|
||||
set(USE_WOLFSSL OFF CACHE BOOL "Build against WolfSSL instead of OpenSSL")
|
||||
set(USE_OPENSSL ON CACHE BOOL "Build against OpenSSL")
|
||||
if(DISABLE_TLS)
|
||||
set(WITH_TLS OFF)
|
||||
else()
|
||||
if(USE_WOLFSSL)
|
||||
set(WOLFSSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(WolfSSL)
|
||||
if(WOLFSSL_FOUND)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${WOLFSSL_INCLUDE_DIR})
|
||||
set(WITH_TLS ON)
|
||||
add_compile_options(-DHAVE_OPENSSL)
|
||||
add_compile_options(-DHAVE_WOLFSSL)
|
||||
else()
|
||||
message(STATUS "WolfSSL was not found - Will compile without TLS Support")
|
||||
message(STATUS "You can set WOLFSSL_ROOT_DIR to help cmake find it")
|
||||
set(WITH_TLS OFF)
|
||||
endif()
|
||||
elseif(USE_OPENSSL)
|
||||
set(OPENSSL_USE_STATIC_LIBS TRUE)
|
||||
if(WIN32)
|
||||
set(OPENSSL_MSVC_STATIC_RT ON)
|
||||
endif()
|
||||
find_package(OpenSSL)
|
||||
if(OPENSSL_FOUND)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
|
||||
set(WITH_TLS ON)
|
||||
add_compile_options(-DHAVE_OPENSSL)
|
||||
else()
|
||||
message(STATUS "OpenSSL was not found - Will compile without TLS Support")
|
||||
message(STATUS "You can set OPENSSL_ROOT_DIR to help cmake find it")
|
||||
set(WITH_TLS OFF)
|
||||
endif()
|
||||
if(USE_WOLFSSL)
|
||||
set(WOLFSSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(WolfSSL)
|
||||
if(WOLFSSL_FOUND)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${WOLFSSL_INCLUDE_DIR})
|
||||
add_compile_options(-DHAVE_OPENSSL)
|
||||
add_compile_options(-DHAVE_WOLFSSL)
|
||||
else()
|
||||
message(STATUS "WolfSSL was not found - Will compile without TLS Support")
|
||||
message(STATUS "You can set WOLFSSL_ROOT_DIR to help cmake find it")
|
||||
message(FATAL_ERROR "Unable to find WolfSSL")
|
||||
endif()
|
||||
elseif(USE_OPENSSL)
|
||||
set(OPENSSL_USE_STATIC_LIBS TRUE)
|
||||
if(WIN32)
|
||||
set(OPENSSL_MSVC_STATIC_RT ON)
|
||||
endif()
|
||||
find_package(OpenSSL)
|
||||
if(OPENSSL_FOUND)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
|
||||
add_compile_options(-DHAVE_OPENSSL)
|
||||
else()
|
||||
message(STATUS "OpenSSL was not found - Will compile without TLS Support")
|
||||
message(STATUS "You can set OPENSSL_ROOT_DIR to help cmake find it")
|
||||
message(FATAL_ERROR "Unable to find OpenSSL")
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "Must set USE_WOLFSSL or USE_OPENSSL")
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
|
@ -222,7 +217,7 @@ set(DEFAULT_COROUTINE_IMPL boost)
|
|||
if(WIN32)
|
||||
# boost coroutine not available in windows build environment for now.
|
||||
set(DEFAULT_COROUTINE_IMPL libcoro)
|
||||
elseif(NOT APPLE AND NOT USE_SANITIZER AND CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^x86")
|
||||
elseif(NOT APPLE AND NOT USE_ASAN AND CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^x86")
|
||||
# revert to libcoro for x86 linux while we investigate a performance regression
|
||||
set(DEFAULT_COROUTINE_IMPL libcoro)
|
||||
endif()
|
||||
|
@ -255,7 +250,6 @@ function(print_components)
|
|||
message(STATUS "Build Java Bindings: ${WITH_JAVA_BINDING}")
|
||||
message(STATUS "Build Go bindings: ${WITH_GO_BINDING}")
|
||||
message(STATUS "Build Ruby bindings: ${WITH_RUBY_BINDING}")
|
||||
message(STATUS "Build with TLS support: ${WITH_TLS}")
|
||||
message(STATUS "Build Documentation (make html): ${WITH_DOCUMENTATION}")
|
||||
message(STATUS "Build Python sdist (make package): ${WITH_PYTHON_BINDING}")
|
||||
message(STATUS "Configure CTest (depends on Python): ${WITH_PYTHON}")
|
||||
|
@ -265,7 +259,7 @@ function(print_components)
|
|||
endfunction()
|
||||
|
||||
if(FORCE_ALL_COMPONENTS)
|
||||
if(NOT WITH_C_BINDING OR NOT WITH_JAVA_BINDING OR NOT WITH_TLS OR NOT WITH_GO_BINDING OR NOT WITH_RUBY_BINDING OR NOT WITH_PYTHON_BINDING OR NOT WITH_DOCUMENTATION)
|
||||
if(NOT WITH_C_BINDING OR NOT WITH_JAVA_BINDING OR NOT WITH_GO_BINDING OR NOT WITH_RUBY_BINDING OR NOT WITH_PYTHON_BINDING OR NOT WITH_DOCUMENTATION)
|
||||
print_components()
|
||||
message(FATAL_ERROR "FORCE_ALL_COMPONENTS is set but not all dependencies could be found")
|
||||
endif()
|
||||
|
|
|
@ -5,3 +5,4 @@ if(NOT WIN32)
|
|||
add_subdirectory(TraceLogHelper)
|
||||
add_subdirectory(TestHarness)
|
||||
endif()
|
||||
add_subdirectory(mockkms)
|
||||
|
|
|
@ -376,11 +376,13 @@ namespace SummarizeTest
|
|||
bool useNewPlugin = (oldServerName == fdbserverName) || versionGreaterThanOrEqual(oldServerName.Split('-').Last(), "5.2.0");
|
||||
bool useToml = File.Exists(testFile + "-1.toml");
|
||||
string testFile1 = useToml ? testFile + "-1.toml" : testFile + "-1.txt";
|
||||
result = RunTest(firstServerName, useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1, summaryFileName, errorFileName, seed, buggify, testFile1, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, true, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
bool useValgrindRunOne = useValgrind && firstServerName == fdbserverName;
|
||||
bool useValgrindRunTwo = useValgrind && secondServerName == fdbserverName;
|
||||
result = RunTest(firstServerName, useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1, summaryFileName, errorFileName, seed, buggify, testFile1, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrindRunOne, false, true, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
if (result == 0)
|
||||
{
|
||||
string testFile2 = useToml ? testFile + "-2.toml" : testFile + "-2.txt";
|
||||
result = RunTest(secondServerName, tlsPluginFile, summaryFileName, errorFileName, seed+1, buggify, testFile2, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, true, false, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
result = RunTest(secondServerName, tlsPluginFile, summaryFileName, errorFileName, seed+1, buggify, testFile2, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrindRunTwo, true, false, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -458,7 +460,7 @@ namespace SummarizeTest
|
|||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", faultInjectionArg, tlsPluginArg);
|
||||
}
|
||||
if (restarting) args = args + " --restarting";
|
||||
if (useValgrind && !willRestart)
|
||||
if (useValgrind)
|
||||
{
|
||||
valgrindOutputFile = string.Format("valgrind-{0}.xml", seed);
|
||||
process.StartInfo.FileName = "valgrind";
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
if(WITH_GO_BINDING)
|
||||
set(MOCK_KMS_SRC fault_injection.go get_encryption_keys.go mock_kms.go utils.go)
|
||||
set(MOCK_KMS_TEST_SRC ${MOCK_KMS_SRC} mockkms_test.go)
|
||||
add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/bin/mockkms
|
||||
COMMAND go build -o ${CMAKE_BINARY_DIR}/bin/mockkms ${MOCK_KMS_SRC}
|
||||
DEPENDS ${MOCK_KMS_SRC}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_custom_target(mockkms ALL DEPENDS ${CMAKE_BINARY_DIR}/bin/mockkms)
|
||||
fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/bin/mockkms DESTINATION bin COMPONENT server)
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/bin/mockkms_test
|
||||
COMMAND go test -c -o ${CMAKE_BINARY_DIR}/bin/mockkms_test ${MOCK_KMS_TEST_SRC}
|
||||
DEPENDS ${MOCK_KMS_TEST_SRC}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_custom_target(mockkms_test ALL DEPENDS ${CMAKE_BINARY_DIR}/bin/mockkms_test)
|
||||
add_test(NAME mockkms COMMAND ${CMAKE_BINARY_DIR}/bin/mockkms_test)
|
||||
|
||||
endif()
|
|
@ -0,0 +1,179 @@
|
|||
/*
|
||||
* fault_injection.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Interface supports client to inject fault(s)
|
||||
// Module enables a client to update { FaultLocation -> FaultStatus } mapping in a
|
||||
// thread-safe manner, however, client is responsible to synchronize fault status
|
||||
// updates across 'getEncryptionKeys' REST requests to obtain predictable results.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Fault struct {
|
||||
Location int `json:"fault_location"`
|
||||
Enable bool `json:"enable_fault"`
|
||||
}
|
||||
|
||||
type FaultInjectionRequest struct {
|
||||
Faults []Fault `json:"faults"`
|
||||
}
|
||||
|
||||
type FaultInjectionResponse struct {
|
||||
Faults []Fault `json:"faults"`
|
||||
}
|
||||
|
||||
type faultLocMap struct {
|
||||
locMap map[int]bool
|
||||
rwLock sync.RWMutex
|
||||
}
|
||||
|
||||
var (
|
||||
faultLocMapInstance *faultLocMap // Singleton mapping of { FaultLocation -> FaultStatus }
|
||||
)
|
||||
|
||||
// Caller is responsible for thread synchronization. Recommended to be invoked during package::init()
|
||||
func NewFaultLocMap() *faultLocMap {
|
||||
if faultLocMapInstance == nil {
|
||||
faultLocMapInstance = &faultLocMap{}
|
||||
|
||||
faultLocMapInstance.rwLock = sync.RWMutex{}
|
||||
faultLocMapInstance.locMap = map[int]bool {
|
||||
READ_HTTP_REQUEST_BODY : false,
|
||||
UNMARSHAL_REQUEST_BODY_JSON : false,
|
||||
UNSUPPORTED_QUERY_MODE : false,
|
||||
PARSE_HTTP_REQUEST : false,
|
||||
MARSHAL_RESPONSE : false,
|
||||
}
|
||||
}
|
||||
return faultLocMapInstance
|
||||
}
|
||||
|
||||
func getLocFaultStatus(loc int) (val bool, found bool) {
|
||||
if faultLocMapInstance == nil {
|
||||
panic("FaultLocMap not intialized")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
faultLocMapInstance.rwLock.RLock()
|
||||
defer faultLocMapInstance.rwLock.RUnlock()
|
||||
val, found = faultLocMapInstance.locMap[loc]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func updateLocFaultStatuses(faults []Fault) (updated []Fault, err error) {
|
||||
if faultLocMapInstance == nil {
|
||||
panic("FaultLocMap not intialized")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
updated = []Fault{}
|
||||
err = nil
|
||||
|
||||
faultLocMapInstance.rwLock.Lock()
|
||||
defer faultLocMapInstance.rwLock.Unlock()
|
||||
for i := 0; i < len(faults); i++ {
|
||||
fault := faults[i]
|
||||
|
||||
oldVal, found := faultLocMapInstance.locMap[fault.Location]
|
||||
if !found {
|
||||
err = fmt.Errorf("Unknown fault_location '%d'", fault.Location)
|
||||
return
|
||||
}
|
||||
faultLocMapInstance.locMap[fault.Location] = fault.Enable
|
||||
log.Printf("Update Location '%d' oldVal '%t' newVal '%t'", fault.Location, oldVal, fault.Enable)
|
||||
}
|
||||
|
||||
// return the updated faultLocMap
|
||||
for loc, enable := range faultLocMapInstance.locMap {
|
||||
var f Fault
|
||||
f.Location = loc
|
||||
f.Enable = enable
|
||||
updated = append(updated, f)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func jsonifyFaultArr(w http.ResponseWriter, faults []Fault) (jResp string) {
|
||||
resp := FaultInjectionResponse{
|
||||
Faults: faults,
|
||||
}
|
||||
|
||||
mResp, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Printf("Error marshaling response '%s'", err.Error())
|
||||
sendErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
jResp = string(mResp)
|
||||
return
|
||||
}
|
||||
|
||||
func updateFaultLocMap(w http.ResponseWriter, faults []Fault) {
|
||||
updated , err := updateLocFaultStatuses(faults)
|
||||
if err != nil {
|
||||
sendErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, jsonifyFaultArr(w, updated))
|
||||
}
|
||||
|
||||
func shouldInjectFault(loc int) bool {
|
||||
status, found := getLocFaultStatus(loc)
|
||||
if !found {
|
||||
log.Printf("Unknown fault_location '%d'", loc)
|
||||
return false
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func handleUpdateFaultInjection(w http.ResponseWriter, r *http.Request) {
|
||||
byteArr, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
log.Printf("Http request body read error '%s'", err.Error())
|
||||
sendErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
req := FaultInjectionRequest{}
|
||||
err = json.Unmarshal(byteArr, &req)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing FaultInjectionRequest '%s'", string(byteArr))
|
||||
sendErrorResponse(w, err)
|
||||
}
|
||||
updateFaultLocMap(w, req.Faults)
|
||||
}
|
||||
|
||||
func initFaultLocMap() {
|
||||
faultLocMapInstance = NewFaultLocMap()
|
||||
log.Printf("FaultLocMap int done")
|
||||
}
|
|
@ -0,0 +1,321 @@
|
|||
/*
|
||||
* get_encryption_keys.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// GetEncryptionKeys handler
|
||||
// Handler is resposible for the following:
|
||||
// 1. Parse the incoming HttpRequest and validate JSON request structural sanity
|
||||
// 2. Ability to handle getEncryptionKeys by 'KeyId' or 'DomainId' as requested
|
||||
// 3. Ability to inject faults if requested
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type CipherDetailRes struct {
|
||||
BaseCipherId uint64 `json:"base_cipher_id"`
|
||||
EncryptDomainId int64 `json:"encrypt_domain_id"`
|
||||
BaseCipher string `json:"base_cipher"`
|
||||
}
|
||||
|
||||
type ValidationToken struct {
|
||||
TokenName string `json:"token_name"`
|
||||
TokenValue string `json:"token_value"`
|
||||
}
|
||||
|
||||
type CipherDetailReq struct {
|
||||
BaseCipherId uint64 `json:"base_cipher_id"`
|
||||
EncryptDomainId int64 `json:"encrypt_domain_id"`
|
||||
}
|
||||
|
||||
type GetEncryptKeysResponse struct {
|
||||
CipherDetails []CipherDetailRes `json:"cipher_key_details"`
|
||||
KmsUrls []string `json:"kms_urls"`
|
||||
}
|
||||
|
||||
type GetEncryptKeysRequest struct {
|
||||
QueryMode string `json:"query_mode"`
|
||||
CipherDetails []CipherDetailReq `json:"cipher_key_details"`
|
||||
ValidationTokens []ValidationToken `json:"validation_tokens"`
|
||||
RefreshKmsUrls bool `json:"refresh_kms_urls"`
|
||||
}
|
||||
|
||||
type cipherMapInstanceSingleton map[uint64][]byte
|
||||
|
||||
const (
|
||||
READ_HTTP_REQUEST_BODY = iota
|
||||
UNMARSHAL_REQUEST_BODY_JSON
|
||||
UNSUPPORTED_QUERY_MODE
|
||||
PARSE_HTTP_REQUEST
|
||||
MARSHAL_RESPONSE
|
||||
)
|
||||
|
||||
const (
|
||||
maxCipherKeys = uint64(1024*1024) // Max cipher keys
|
||||
maxCipherSize = 16 // Max cipher buffer size
|
||||
)
|
||||
|
||||
var (
|
||||
cipherMapInstance cipherMapInstanceSingleton // Singleton mapping of { baseCipherId -> baseCipher }
|
||||
)
|
||||
|
||||
// const mapping of { Location -> errorString }
|
||||
func errStrMap() func(int) string {
|
||||
_errStrMap := map[int]string {
|
||||
READ_HTTP_REQUEST_BODY : "Http request body read error",
|
||||
UNMARSHAL_REQUEST_BODY_JSON : "Http request body unmarshal error",
|
||||
UNSUPPORTED_QUERY_MODE : "Unsupported query_mode",
|
||||
PARSE_HTTP_REQUEST : "Error parsing GetEncryptionKeys request",
|
||||
MARSHAL_RESPONSE : "Error marshaling response",
|
||||
}
|
||||
|
||||
return func(key int) string {
|
||||
return _errStrMap[key]
|
||||
}
|
||||
}
|
||||
|
||||
// Caller is responsible for thread synchronization. Recommended to be invoked during package::init()
|
||||
func NewCipherMap(maxKeys uint64, cipherSize int) cipherMapInstanceSingleton {
|
||||
if cipherMapInstance == nil {
|
||||
cipherMapInstance = make(map[uint64][]byte)
|
||||
|
||||
for i := uint64(1); i<= maxKeys; i++ {
|
||||
cipher := make([]byte, cipherSize)
|
||||
rand.Read(cipher)
|
||||
cipherMapInstance[i] = cipher
|
||||
}
|
||||
log.Printf("KMS cipher map populate done, maxCiphers '%d'", maxCipherKeys)
|
||||
}
|
||||
return cipherMapInstance
|
||||
}
|
||||
|
||||
func getKmsUrls() (urls []string) {
|
||||
urlCount := rand.Intn(5) + 1
|
||||
for i := 1; i <= urlCount; i++ {
|
||||
url := fmt.Sprintf("https://KMS/%d:%d:%d:%d", i, i, i, i)
|
||||
urls = append(urls, url)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isEncryptDomainIdValid(id int64) bool {
|
||||
if id > 0 || id == -1 || id == -2 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func abs(x int64) int64 {
|
||||
if x < 0 {
|
||||
return -x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func getBaseCipherIdFromDomainId(domainId int64) (baseCipherId uint64) {
|
||||
baseCipherId = uint64(1) + uint64(abs(domainId)) % maxCipherKeys
|
||||
return
|
||||
}
|
||||
|
||||
func getEncryptionKeysByKeyIds(w http.ResponseWriter, byteArr []byte) {
|
||||
req := GetEncryptKeysRequest{}
|
||||
err := json.Unmarshal(byteArr, &req)
|
||||
if err != nil || shouldInjectFault(PARSE_HTTP_REQUEST) {
|
||||
var e error
|
||||
if shouldInjectFault(PARSE_HTTP_REQUEST) {
|
||||
e = fmt.Errorf("[FAULT] %s %s'", errStrMap()(PARSE_HTTP_REQUEST), string(byteArr))
|
||||
} else {
|
||||
e = fmt.Errorf("%s %s' err '%v'", errStrMap()(PARSE_HTTP_REQUEST), string(byteArr), err)
|
||||
}
|
||||
log.Println(e.Error())
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
var details []CipherDetailRes
|
||||
for i := 0; i < len(req.CipherDetails); i++ {
|
||||
var baseCipherId = uint64(req.CipherDetails[i].BaseCipherId)
|
||||
|
||||
var encryptDomainId = int64(req.CipherDetails[i].EncryptDomainId)
|
||||
if !isEncryptDomainIdValid(encryptDomainId) {
|
||||
e := fmt.Errorf("EncryptDomainId not valid '%d'", encryptDomainId)
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
cipher, found := cipherMapInstance[baseCipherId]
|
||||
if !found {
|
||||
e := fmt.Errorf("BaseCipherId not found '%d'", baseCipherId)
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
var detail = CipherDetailRes {
|
||||
BaseCipherId: baseCipherId,
|
||||
EncryptDomainId: encryptDomainId,
|
||||
BaseCipher: string(cipher),
|
||||
}
|
||||
details = append(details, detail)
|
||||
}
|
||||
|
||||
var urls []string
|
||||
if req.RefreshKmsUrls {
|
||||
urls = getKmsUrls()
|
||||
}
|
||||
|
||||
resp := GetEncryptKeysResponse{
|
||||
CipherDetails: details,
|
||||
KmsUrls: urls,
|
||||
}
|
||||
|
||||
mResp, err := json.Marshal(resp)
|
||||
if err != nil || shouldInjectFault(MARSHAL_RESPONSE) {
|
||||
var e error
|
||||
if shouldInjectFault(MARSHAL_RESPONSE) {
|
||||
e = fmt.Errorf("[FAULT] %s", errStrMap()(MARSHAL_RESPONSE))
|
||||
} else {
|
||||
e = fmt.Errorf("%s err '%v'", errStrMap()(MARSHAL_RESPONSE), err)
|
||||
}
|
||||
log.Println(e.Error())
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, string(mResp))
|
||||
}
|
||||
|
||||
func getEncryptionKeysByDomainIds(w http.ResponseWriter, byteArr []byte) {
|
||||
req := GetEncryptKeysRequest{}
|
||||
err := json.Unmarshal(byteArr, &req)
|
||||
if err != nil || shouldInjectFault(PARSE_HTTP_REQUEST) {
|
||||
var e error
|
||||
if shouldInjectFault(PARSE_HTTP_REQUEST) {
|
||||
e = fmt.Errorf("[FAULT] %s '%s'", errStrMap()(PARSE_HTTP_REQUEST), string(byteArr))
|
||||
} else {
|
||||
e = fmt.Errorf("%s '%s' err '%v'", errStrMap()(PARSE_HTTP_REQUEST), string(byteArr), err)
|
||||
}
|
||||
log.Println(e.Error())
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
var details []CipherDetailRes
|
||||
for i := 0; i < len(req.CipherDetails); i++ {
|
||||
var encryptDomainId = int64(req.CipherDetails[i].EncryptDomainId)
|
||||
if !isEncryptDomainIdValid(encryptDomainId) {
|
||||
e := fmt.Errorf("EncryptDomainId not valid '%d'", encryptDomainId)
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
var baseCipherId = getBaseCipherIdFromDomainId(encryptDomainId)
|
||||
cipher, found := cipherMapInstance[baseCipherId]
|
||||
if !found {
|
||||
e := fmt.Errorf("BaseCipherId not found '%d'", baseCipherId)
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
var detail = CipherDetailRes {
|
||||
BaseCipherId: baseCipherId,
|
||||
EncryptDomainId: encryptDomainId,
|
||||
BaseCipher: string(cipher),
|
||||
}
|
||||
details = append(details, detail)
|
||||
}
|
||||
|
||||
var urls []string
|
||||
if req.RefreshKmsUrls {
|
||||
urls = getKmsUrls()
|
||||
}
|
||||
|
||||
resp := GetEncryptKeysResponse{
|
||||
CipherDetails: details,
|
||||
KmsUrls: urls,
|
||||
}
|
||||
|
||||
mResp, err := json.Marshal(resp)
|
||||
if err != nil || shouldInjectFault(MARSHAL_RESPONSE) {
|
||||
var e error
|
||||
if shouldInjectFault(MARSHAL_RESPONSE) {
|
||||
e = fmt.Errorf("[FAULT] %s", errStrMap()(MARSHAL_RESPONSE))
|
||||
} else {
|
||||
e = fmt.Errorf("%s err '%v'", errStrMap()(MARSHAL_RESPONSE), err)
|
||||
}
|
||||
log.Println(e.Error())
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, string(mResp))
|
||||
}
|
||||
|
||||
func handleGetEncryptionKeys(w http.ResponseWriter, r *http.Request) {
|
||||
byteArr, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil || shouldInjectFault(READ_HTTP_REQUEST_BODY) {
|
||||
var e error
|
||||
if shouldInjectFault(READ_HTTP_REQUEST_BODY) {
|
||||
e = fmt.Errorf("[FAULT] %s", errStrMap()(READ_HTTP_REQUEST_BODY))
|
||||
} else {
|
||||
e = fmt.Errorf("%s err '%v'", errStrMap()(READ_HTTP_REQUEST_BODY), err)
|
||||
}
|
||||
log.Println(e.Error())
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
var arbitrary_json map[string]interface{}
|
||||
err = json.Unmarshal(byteArr, &arbitrary_json)
|
||||
if err != nil || shouldInjectFault(UNMARSHAL_REQUEST_BODY_JSON) {
|
||||
var e error
|
||||
if shouldInjectFault(UNMARSHAL_REQUEST_BODY_JSON) {
|
||||
e = fmt.Errorf("[FAULT] %s", errStrMap()(UNMARSHAL_REQUEST_BODY_JSON))
|
||||
} else {
|
||||
e = fmt.Errorf("%s err '%v'", errStrMap()(UNMARSHAL_REQUEST_BODY_JSON), err)
|
||||
}
|
||||
log.Println(e.Error())
|
||||
sendErrorResponse(w, e)
|
||||
return
|
||||
}
|
||||
|
||||
if shouldInjectFault(UNSUPPORTED_QUERY_MODE) {
|
||||
err = fmt.Errorf("[FAULT] %s '%s'", errStrMap()(UNSUPPORTED_QUERY_MODE), arbitrary_json["query_mode"])
|
||||
sendErrorResponse(w, err)
|
||||
return
|
||||
} else if arbitrary_json["query_mode"] == "lookupByKeyId" {
|
||||
getEncryptionKeysByKeyIds(w, byteArr)
|
||||
} else if arbitrary_json["query_mode"] == "lookupByDomainId" {
|
||||
getEncryptionKeysByDomainIds(w, byteArr)
|
||||
} else {
|
||||
err = fmt.Errorf("%s '%s'", errStrMap()(UNSUPPORTED_QUERY_MODE), arbitrary_json["query_mode"])
|
||||
sendErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func initEncryptCipherMap() {
|
||||
cipherMapInstance = NewCipherMap(maxCipherKeys, maxCipherSize)
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* mock_kms.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Mock KMS (Key Management Solution/Service) interface
|
||||
// Interface runs an HTTP server handling REST calls simulating FDB communications
|
||||
// with an external KMS.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// KMS supported endpoints
|
||||
const (
|
||||
getEncryptionKeysEndpoint = "/getEncryptionKeys"
|
||||
updateFaultInjectionEndpoint = "/updateFaultInjection"
|
||||
)
|
||||
|
||||
// Routine is responsible to instantiate data-structures necessary for MockKMS functioning
|
||||
func init () {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(2)
|
||||
go func(){
|
||||
initEncryptCipherMap()
|
||||
wg.Done()
|
||||
}()
|
||||
go func(){
|
||||
initFaultLocMap()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
}
|
||||
|
||||
func main() {
|
||||
http.NewServeMux()
|
||||
http.HandleFunc(getEncryptionKeysEndpoint, handleGetEncryptionKeys)
|
||||
http.HandleFunc(updateFaultInjectionEndpoint, handleUpdateFaultInjection)
|
||||
|
||||
log.Fatal(http.ListenAndServe(":5001", nil))
|
||||
}
|
|
@ -0,0 +1,302 @@
|
|||
/*
|
||||
* mockkms_test.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// MockKMS unit tests, the coverage includes:
|
||||
// 1. Mock HttpRequest creation and HttpResponse writer.
|
||||
// 2. Construct fake request to validate the following scenarions:
|
||||
// 2.1. Request with "unsupported query mode"
|
||||
// 2.2. Get encryption keys by KeyIds; with and without 'RefreshKmsUrls' flag.
|
||||
// 2.2. Get encryption keys by DomainIds; with and without 'RefreshKmsUrls' flag.
|
||||
// 2.3. Random fault injection and response validation
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
ByKeyIdReqWithRefreshUrls = `{
|
||||
"query_mode": "lookupByKeyId",
|
||||
"cipher_key_details": [
|
||||
{
|
||||
"base_cipher_id": 77,
|
||||
"encrypt_domain_id": 76
|
||||
},
|
||||
{
|
||||
"base_cipher_id": 2,
|
||||
"encrypt_domain_id": -1
|
||||
}
|
||||
],
|
||||
"validation_tokens": [
|
||||
{
|
||||
"token_name": "1",
|
||||
"token_value":"12344"
|
||||
},
|
||||
{
|
||||
"token_name": "2",
|
||||
"token_value":"12334"
|
||||
}
|
||||
],
|
||||
"refresh_kms_urls": true
|
||||
}`
|
||||
ByKeyIdReqWithoutRefreshUrls = `{
|
||||
"query_mode": "lookupByKeyId",
|
||||
"cipher_key_details": [
|
||||
{
|
||||
"base_cipher_id": 77,
|
||||
"encrypt_domain_id": 76
|
||||
},
|
||||
{
|
||||
"base_cipher_id": 2,
|
||||
"encrypt_domain_id": -1
|
||||
}
|
||||
],
|
||||
"validation_tokens": [
|
||||
{
|
||||
"token_name": "1",
|
||||
"token_value":"12344"
|
||||
},
|
||||
{
|
||||
"token_name": "2",
|
||||
"token_value":"12334"
|
||||
}
|
||||
],
|
||||
"refresh_kms_urls": false
|
||||
}`
|
||||
ByDomainIdReqWithRefreshUrls = `{
|
||||
"query_mode": "lookupByDomainId",
|
||||
"cipher_key_details": [
|
||||
{
|
||||
"encrypt_domain_id": 76
|
||||
},
|
||||
{
|
||||
"encrypt_domain_id": -1
|
||||
}
|
||||
],
|
||||
"validation_tokens": [
|
||||
{
|
||||
"token_name": "1",
|
||||
"token_value":"12344"
|
||||
},
|
||||
{
|
||||
"token_name": "2",
|
||||
"token_value":"12334"
|
||||
}
|
||||
],
|
||||
"refresh_kms_urls": true
|
||||
}`
|
||||
ByDomainIdReqWithoutRefreshUrls = `{
|
||||
"query_mode": "lookupByDomainId",
|
||||
"cipher_key_details": [
|
||||
{
|
||||
"encrypt_domain_id": 76
|
||||
},
|
||||
{
|
||||
"encrypt_domain_id": -1
|
||||
}
|
||||
],
|
||||
"validation_tokens": [
|
||||
{
|
||||
"token_name": "1",
|
||||
"token_value":"12344"
|
||||
},
|
||||
{
|
||||
"token_name": "2",
|
||||
"token_value":"12334"
|
||||
}
|
||||
],
|
||||
"refresh_kms_urls": false
|
||||
}`
|
||||
UnsupportedQueryMode= `{
|
||||
"query_mode": "foo_mode",
|
||||
"cipher_key_details": [
|
||||
{
|
||||
"encrypt_domain_id": 76
|
||||
},
|
||||
{
|
||||
"encrypt_domain_id": -1
|
||||
}
|
||||
],
|
||||
"validation_tokens": [
|
||||
{
|
||||
"token_name": "1",
|
||||
"token_value":"12344"
|
||||
},
|
||||
{
|
||||
"token_name": "2",
|
||||
"token_value":"12334"
|
||||
}
|
||||
],
|
||||
"refresh_kms_urls": false
|
||||
}`
|
||||
)
|
||||
|
||||
func unmarshalValidResponse(data []byte, t *testing.T) (resp GetEncryptKeysResponse) {
|
||||
resp = GetEncryptKeysResponse{}
|
||||
err := json.Unmarshal(data, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("Error unmarshaling valid response '%s' error '%v'", string(data), err)
|
||||
t.Fail()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unmarshalErrorResponse(data []byte, t *testing.T) (resp ErrorResponse) {
|
||||
resp = ErrorResponse{}
|
||||
err := json.Unmarshal(data, &resp)
|
||||
if err != nil {
|
||||
t.Errorf("Error unmarshaling error response resp '%s' error '%v'", string(data), err)
|
||||
t.Fail()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkGetEncyptKeysResponseValidity(resp GetEncryptKeysResponse, t *testing.T) {
|
||||
if len(resp.CipherDetails) != 2 {
|
||||
t.Errorf("Unexpected CipherDetails count, expected '%d' actual '%d'", 2, len(resp.CipherDetails))
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
baseCipherIds := [...]uint64 {uint64(77), uint64(2)}
|
||||
encryptDomainIds := [...]int64 {int64(76), int64(-1)}
|
||||
|
||||
for i := 0; i < len(resp.CipherDetails); i++ {
|
||||
if resp.CipherDetails[i].BaseCipherId != baseCipherIds[i] {
|
||||
t.Errorf("Mismatch BaseCipherId, expected '%d' actual '%d'", baseCipherIds[i], resp.CipherDetails[i].BaseCipherId)
|
||||
t.Fail()
|
||||
}
|
||||
if resp.CipherDetails[i].EncryptDomainId != encryptDomainIds[i] {
|
||||
t.Errorf("Mismatch EncryptDomainId, expected '%d' actual '%d'", encryptDomainIds[i], resp.CipherDetails[i].EncryptDomainId)
|
||||
t.Fail()
|
||||
}
|
||||
if len(resp.CipherDetails[i].BaseCipher) == 0 {
|
||||
t.Error("Empty BaseCipher")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runQueryExpectingErrorResponse(payload string, url string, errSubStr string, t *testing.T) {
|
||||
body := strings.NewReader(payload)
|
||||
req := httptest.NewRequest(http.MethodPost, url, body)
|
||||
w := httptest.NewRecorder()
|
||||
handleGetEncryptionKeys(w, req)
|
||||
res := w.Result()
|
||||
defer res.Body.Close()
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("Error %v", err)
|
||||
}
|
||||
|
||||
resp := unmarshalErrorResponse(data, t)
|
||||
if !strings.Contains(resp.Err.Detail, errSubStr) {
|
||||
t.Errorf("Unexpected error response '%s'", resp.Err.Detail)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func runQueryExpectingValidResponse(payload string, url string, t *testing.T) {
|
||||
body := strings.NewReader(payload)
|
||||
req := httptest.NewRequest(http.MethodPost, url, body)
|
||||
w := httptest.NewRecorder()
|
||||
handleGetEncryptionKeys(w, req)
|
||||
res := w.Result()
|
||||
defer res.Body.Close()
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("Error %v", err)
|
||||
}
|
||||
|
||||
resp := unmarshalValidResponse(data, t)
|
||||
checkGetEncyptKeysResponseValidity(resp, t)
|
||||
}
|
||||
|
||||
func TestUnsupportedQueryMode(t *testing.T) {
|
||||
runQueryExpectingErrorResponse(UnsupportedQueryMode, getEncryptionKeysEndpoint, errStrMap()(UNSUPPORTED_QUERY_MODE), t)
|
||||
}
|
||||
|
||||
func TestGetEncryptionKeysByKeyIdsWithRefreshUrls(t *testing.T) {
|
||||
runQueryExpectingValidResponse(ByKeyIdReqWithRefreshUrls, getEncryptionKeysEndpoint, t)
|
||||
}
|
||||
|
||||
func TestGetEncryptionKeysByKeyIdsWithoutRefreshUrls(t *testing.T) {
|
||||
runQueryExpectingValidResponse(ByKeyIdReqWithoutRefreshUrls, getEncryptionKeysEndpoint, t)
|
||||
}
|
||||
|
||||
func TestGetEncryptionKeysByDomainIdsWithRefreshUrls(t *testing.T) {
|
||||
runQueryExpectingValidResponse(ByDomainIdReqWithRefreshUrls, getEncryptionKeysEndpoint, t)
|
||||
}
|
||||
|
||||
func TestGetEncryptionKeysByDomainIdsWithoutRefreshUrls(t *testing.T) {
|
||||
runQueryExpectingValidResponse(ByDomainIdReqWithoutRefreshUrls, getEncryptionKeysEndpoint, t)
|
||||
}
|
||||
|
||||
func TestFaultInjection(t *testing.T) {
|
||||
numIterations := rand.Intn(701) + 86
|
||||
|
||||
for i := 0; i < numIterations; i++ {
|
||||
loc := rand.Intn(MARSHAL_RESPONSE + 1)
|
||||
f := Fault{}
|
||||
f.Location = loc
|
||||
f.Enable = true
|
||||
|
||||
var faults []Fault
|
||||
faults = append(faults, f)
|
||||
fW := httptest.NewRecorder()
|
||||
body := strings.NewReader(jsonifyFaultArr(fW, faults))
|
||||
fReq := httptest.NewRequest(http.MethodPost, updateFaultInjectionEndpoint, body)
|
||||
handleUpdateFaultInjection(fW, fReq)
|
||||
if !shouldInjectFault(loc) {
|
||||
t.Errorf("Expected fault enabled for loc '%d'", loc)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
var payload string
|
||||
lottery := rand.Intn(100)
|
||||
if lottery < 25 {
|
||||
payload = ByKeyIdReqWithRefreshUrls
|
||||
} else if lottery >= 25 && lottery < 50 {
|
||||
payload = ByKeyIdReqWithoutRefreshUrls
|
||||
} else if lottery >= 50 && lottery < 75 {
|
||||
payload = ByDomainIdReqWithRefreshUrls
|
||||
} else {
|
||||
payload = ByDomainIdReqWithoutRefreshUrls
|
||||
}
|
||||
runQueryExpectingErrorResponse(payload, getEncryptionKeysEndpoint, errStrMap()(loc), t)
|
||||
|
||||
// reset Fault
|
||||
faults[0].Enable = false
|
||||
fW = httptest.NewRecorder()
|
||||
body = strings.NewReader(jsonifyFaultArr(fW, faults))
|
||||
fReq = httptest.NewRequest(http.MethodPost, updateFaultInjectionEndpoint, body)
|
||||
handleUpdateFaultInjection(fW, fReq)
|
||||
if shouldInjectFault(loc) {
|
||||
t.Errorf("Expected fault disabled for loc '%d'", loc)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* utils.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type ErrorDetail struct {
|
||||
Detail string `json:"details"`
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Err ErrorDetail `json:"error"`
|
||||
}
|
||||
|
||||
func sendErrorResponse(w http.ResponseWriter, err error) {
|
||||
e := ErrorDetail{}
|
||||
e.Detail = fmt.Sprintf("Error: %s", err.Error())
|
||||
resp := ErrorResponse{
|
||||
Err: e,
|
||||
}
|
||||
|
||||
mResp,err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Printf("Error marshalling error response %s", err.Error())
|
||||
panic(err)
|
||||
}
|
||||
fmt.Fprintf(w, string(mResp))
|
||||
}
|
|
@ -194,7 +194,8 @@ class BaseInfo(object):
|
|||
if protocol_version >= PROTOCOL_VERSION_6_3:
|
||||
self.dc_id = bb.get_bytes_with_length()
|
||||
if protocol_version >= PROTOCOL_VERSION_7_1:
|
||||
self.tenant = bb.get_bytes_with_length()
|
||||
if bb.get_bytes(1):
|
||||
self.tenant = bb.get_bytes_with_length()
|
||||
|
||||
class GetVersionInfo(BaseInfo):
|
||||
def __init__(self, bb, protocol_version):
|
||||
|
|
|
@ -877,6 +877,9 @@
|
|||
"logical_core_utilization":0.4 // computed as cpu_seconds / elapsed_seconds; value may be capped at 0.5 due to hyper-threading
|
||||
}
|
||||
}
|
||||
},
|
||||
"tenants":{
|
||||
"num_tenants":0
|
||||
}
|
||||
},
|
||||
"client":{
|
||||
|
|
|
@ -4,6 +4,62 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
7.1.7
|
||||
=====
|
||||
* Same as 7.1.6 release with AVX enabled.
|
||||
|
||||
7.1.6
|
||||
=====
|
||||
* Released with AVX disabled.
|
||||
* Fixed a fdbserver crash when given invalid knob name. `(PR #7189) <https://github.com/apple/foundationdb/pull/7189>`_
|
||||
* Fixed a storage server bug that read data after its failure. `(PR #7217) <https://github.com/apple/foundationdb/pull/7217>`_
|
||||
|
||||
7.1.5
|
||||
=====
|
||||
* Fixed a fdbcli kill bug that was not killing in parallel. `(PR #7150) <https://github.com/apple/foundationdb/pull/7150>`_
|
||||
* Fixed a bug that prevents a peer from sending messages on a previously incompatible connection. `(PR #7124) <https://github.com/apple/foundationdb/pull/7124>`_
|
||||
* Added rocksdb throttling counters to trace event. `(PR #7096) <https://github.com/apple/foundationdb/pull/7096>`_
|
||||
* Added a backtrace before throwing serialization_failed. `(PR #7155) <https://github.com/apple/foundationdb/pull/7155>`_
|
||||
|
||||
7.1.4
|
||||
=====
|
||||
* Fixed a bug that prevents client from connecting to a cluster. `(PR #7060) <https://github.com/apple/foundationdb/pull/7060>`_
|
||||
* Fixed a performance bug that overloads Resolver CPU. `(PR #7068) <https://github.com/apple/foundationdb/pull/7068>`_
|
||||
* Optimized storage server performance for "get range and flat map" feature. `(PR #7078) <https://github.com/apple/foundationdb/pull/7078>`_
|
||||
* Optimized both Proxy performance and Resolver (when version vector is enabled) performance. `(PR #7076) <https://github.com/apple/foundationdb/pull/7076>`_
|
||||
* Fixed a key size limit bug when using tenants. `(PR #6986) <https://github.com/apple/foundationdb/pull/6986>`_
|
||||
* Fixed operation_failed thrown incorrectly from transactions. `(PR #6993) <https://github.com/apple/foundationdb/pull/6993>`_
|
||||
* Fixed a version vector bug when GRV cache is used. `(PR #7057) <https://github.com/apple/foundationdb/pull/7057>`_
|
||||
* Fixed orphaned storage server due to force recovery. `(PR #7028) <https://github.com/apple/foundationdb/pull/7028>`_
|
||||
* Fixed a bug that a storage server reads stale cluster ID. `(PR #7026) <https://github.com/apple/foundationdb/pull/7026>`_
|
||||
* Fixed a storage server exclusion status bug that affects wiggling. `(PR #6984) <https://github.com/apple/foundationdb/pull/6984>`_
|
||||
* Fixed a bug that relocate shard tasks move data to a removed team. `(PR #7023) <https://github.com/apple/foundationdb/pull/7023>`_
|
||||
* Fixed recruitment thrashing when there are temporarily multiple cluster controllers. `(PR #7001) <https://github.com/apple/foundationdb/pull/7001>`_
|
||||
* Fixed change feed deletion due to multiple sources race. `(PR #6987) <https://github.com/apple/foundationdb/pull/6987>`_
|
||||
* Fixed TLog crash if more TLogs are absent than the replication factor. `(PR #6991) <https://github.com/apple/foundationdb/pull/6991>`_
|
||||
* Added hostname DNS resolution logic for cluster connection string. `(PR #6998) <https://github.com/apple/foundationdb/pull/6998>`_
|
||||
* Fixed a limit bug in indexPrefetch. `(PR #7005) <https://github.com/apple/foundationdb/pull/7005>`_
|
||||
|
||||
7.1.3
|
||||
=====
|
||||
* Added logging measuring commit compute duration. `(PR #6906) <https://github.com/apple/foundationdb/pull/6906>`_
|
||||
* RocksDb used aggregated property metrics for pending compaction bytes. `(PR #6867) <https://github.com/apple/foundationdb/pull/6867>`_
|
||||
* Fixed a perpetual wiggle bug that would not react to a pause. `(PR #6933) <https://github.com/apple/foundationdb/pull/6933>`_
|
||||
* Fixed a crash of data distributor. `(PR #6938) <https://github.com/apple/foundationdb/pull/6938>`_
|
||||
* Added new c libs to client package. `(PR #6921) <https://github.com/apple/foundationdb/pull/6921>`_
|
||||
* Fixed a bug that prevents a cluster from fully recovered state after taking a snapshot. `(PR #6892) <https://github.com/apple/foundationdb/pull/6892>`_
|
||||
|
||||
7.1.2
|
||||
=====
|
||||
* Fixed failing upgrades due to non-persisted initial cluster version. `(PR #6864) <https://github.com/apple/foundationdb/pull/6864>`_
|
||||
* Fixed a client load balancing bug because ClientDBInfo may be unintentionally not set. `(PR #6878) <https://github.com/apple/foundationdb/pull/6878>`_
|
||||
* Fixed stuck LogRouter due to races of multiple PeekStream requests. `(PR #6870) <https://github.com/apple/foundationdb/pull/6870>`_
|
||||
* Fixed a client-side infinite loop due to provisional GRV Proxy ID not set in GetReadVersionReply. `(PR #6849) <https://github.com/apple/foundationdb/pull/6849>`_
|
||||
|
||||
7.1.1
|
||||
=====
|
||||
* Added new c libs to client package. `(PR #6828) <https://github.com/apple/foundationdb/pull/6828>`_
|
||||
|
||||
7.1.0
|
||||
=====
|
||||
|
||||
|
|
|
@ -13,8 +13,6 @@ Overview
|
|||
|
||||
A tenant in a FoundationDB cluster maps a byte-string name to a key-space that can be used to store data associated with that tenant. This key-space is stored in the clusters global key-space under a prefix assigned to that tenant, with each tenant being assigned a separate non-intersecting prefix.
|
||||
|
||||
In addition to being each assigned a separate tenant prefix, tenants can be configured to have a common shared prefix. By default, the shared prefix is empty and tenants are allocated prefixes throughout the normal key-space. To configure an alternate shared prefix, set the ``\xff/tenantDataPrefix`` key to have the desired prefix as the value.
|
||||
|
||||
Tenant operations are implicitly confined to the key-space associated with the tenant. It is not necessary for client applications to use or be aware of the prefix assigned to the tenant.
|
||||
|
||||
Enabling tenants
|
||||
|
|
|
@ -65,9 +65,7 @@ CSimpleOpt::SOption gConverterOptions[] = { { OPT_CONTAINER, "-r", SO_REQ_SEP },
|
|||
{ OPT_INPUT_FILE, "-i", SO_REQ_SEP },
|
||||
{ OPT_INPUT_FILE, "--input", SO_REQ_SEP },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
TLS_OPTION_FLAGS,
|
||||
{ OPT_BUILD_FLAGS, "--build-flags", SO_NONE },
|
||||
{ OPT_LIST_ONLY, "--list-only", SO_NONE },
|
||||
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
|
||||
|
|
|
@ -75,10 +75,7 @@ void printDecodeUsage() {
|
|||
" --crash Crash on serious error.\n"
|
||||
" --blob-credentials FILE\n"
|
||||
" File containing blob credentials in JSON format.\n"
|
||||
" The same credential format/file fdbbackup uses.\n"
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_HELP
|
||||
#endif
|
||||
" The same credential format/file fdbbackup uses.\n" TLS_HELP
|
||||
" --build-flags Print build information and exit.\n"
|
||||
" --list-only Print file list and exit.\n"
|
||||
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
||||
|
@ -302,7 +299,6 @@ int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
|||
param->save_file_locally = true;
|
||||
break;
|
||||
|
||||
#ifndef TLS_DISABLED
|
||||
case TLSConfig::OPT_TLS_PLUGIN:
|
||||
args->OptionArg();
|
||||
break;
|
||||
|
@ -326,7 +322,6 @@ int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
|||
case TLSConfig::OPT_TLS_VERIFY_PEERS:
|
||||
param->tlsConfig.tlsVerifyPeers = args->OptionArg();
|
||||
break;
|
||||
#endif
|
||||
|
||||
case OPT_BUILD_FLAGS:
|
||||
printBuildInformation();
|
||||
|
|
|
@ -220,10 +220,8 @@ CSimpleOpt::SOption g_rgAgentOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupStartOptions[] = {
|
||||
|
@ -269,10 +267,8 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
|
|||
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
|
||||
{ OPT_INCREMENTALONLY, "--incremental", SO_NONE },
|
||||
{ OPT_ENCRYPTION_KEY_FILE, "--encryption-key-file", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupModifyOptions[] = {
|
||||
|
@ -335,10 +331,8 @@ CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
|
|||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_JSON, "--json", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupAbortOptions[] = {
|
||||
|
@ -364,10 +358,8 @@ CSimpleOpt::SOption g_rgBackupAbortOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupCleanupOptions[] = {
|
||||
|
@ -393,10 +385,8 @@ CSimpleOpt::SOption g_rgBackupCleanupOptions[] = {
|
|||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
{ OPT_DELETE_DATA, "--delete-data", SO_NONE },
|
||||
{ OPT_MIN_CLEANUP_SECONDS, "--min-cleanup-seconds", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupDiscontinueOptions[] = {
|
||||
|
@ -424,10 +414,8 @@ CSimpleOpt::SOption g_rgBackupDiscontinueOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupWaitOptions[] = {
|
||||
|
@ -455,10 +443,8 @@ CSimpleOpt::SOption g_rgBackupWaitOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupPauseOptions[] = {
|
||||
|
@ -482,10 +468,8 @@ CSimpleOpt::SOption g_rgBackupPauseOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
|
||||
|
@ -521,10 +505,8 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
|
|||
{ OPT_EXPIRE_BEFORE_DATETIME, "--expire-before-timestamp", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_MIN_RESTORABLE_DAYS, "--min-restorable-days", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_DELETE_BEFORE_DAYS, "--delete-before-days", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupDeleteOptions[] = {
|
||||
|
@ -550,10 +532,8 @@ CSimpleOpt::SOption g_rgBackupDeleteOptions[] = {
|
|||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
|
||||
|
@ -584,10 +564,8 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
|
|||
{ OPT_DESCRIBE_DEEP, "--deep", SO_NONE },
|
||||
{ OPT_DESCRIBE_TIMESTAMPS, "--version-timestamps", SO_NONE },
|
||||
{ OPT_JSON, "--json", SO_NONE },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupDumpOptions[] = {
|
||||
|
@ -616,10 +594,8 @@ CSimpleOpt::SOption g_rgBackupDumpOptions[] = {
|
|||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
{ OPT_DUMP_BEGIN, "--begin", SO_REQ_SEP },
|
||||
{ OPT_DUMP_END, "--end", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupTagsOptions[] = {
|
||||
|
@ -634,10 +610,8 @@ CSimpleOpt::SOption g_rgBackupTagsOptions[] = {
|
|||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupListOptions[] = {
|
||||
|
@ -646,6 +620,7 @@ CSimpleOpt::SOption g_rgBackupListOptions[] = {
|
|||
#endif
|
||||
{ OPT_BASEURL, "-b", SO_REQ_SEP },
|
||||
{ OPT_BASEURL, "--base-url", SO_REQ_SEP },
|
||||
{ OPT_PROXY, "--proxy", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
|
||||
|
@ -662,10 +637,8 @@ CSimpleOpt::SOption g_rgBackupListOptions[] = {
|
|||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupQueryOptions[] = {
|
||||
|
@ -698,10 +671,8 @@ CSimpleOpt::SOption g_rgBackupQueryOptions[] = {
|
|||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
// g_rgRestoreOptions is used by fdbrestore and fastrestore_tool
|
||||
|
@ -747,10 +718,8 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
|
|||
{ OPT_RESTORE_BEGIN_VERSION, "--begin-version", SO_REQ_SEP },
|
||||
{ OPT_RESTORE_INCONSISTENT_SNAPSHOT_ONLY, "--inconsistent-snapshot-only", SO_NONE },
|
||||
{ OPT_ENCRYPTION_KEY_FILE, "--encryption-key-file", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgDBAgentOptions[] = {
|
||||
|
@ -780,10 +749,8 @@ CSimpleOpt::SOption g_rgDBAgentOptions[] = {
|
|||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgDBStartOptions[] = {
|
||||
|
@ -813,10 +780,8 @@ CSimpleOpt::SOption g_rgDBStartOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgDBStatusOptions[] = {
|
||||
|
@ -846,10 +811,8 @@ CSimpleOpt::SOption g_rgDBStatusOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgDBSwitchOptions[] = {
|
||||
|
@ -878,10 +841,8 @@ CSimpleOpt::SOption g_rgDBSwitchOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgDBAbortOptions[] = {
|
||||
|
@ -911,10 +872,8 @@ CSimpleOpt::SOption g_rgDBAbortOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgDBPauseOptions[] = {
|
||||
|
@ -940,10 +899,8 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
|
|||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
SO_END_OF_OPTIONS
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
const KeyRef exeAgent = LiteralStringRef("backup_agent");
|
||||
|
@ -1017,9 +974,7 @@ static void printAgentUsage(bool devhelp) {
|
|||
printf(" -m SIZE, --memory SIZE\n"
|
||||
" Memory limit. The default value is 8GiB. When specified\n"
|
||||
" without a unit, MiB is assumed.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
printf(" --build-flags Print build information and exit.\n");
|
||||
printf(" -v, --version Print version information and exit.\n");
|
||||
printf(" -h, --help Display this help and exit.\n");
|
||||
|
@ -1147,9 +1102,7 @@ static void printBackupUsage(bool devhelp) {
|
|||
"and ignore the range files.\n");
|
||||
printf(" --encryption-key-file"
|
||||
" The AES-128-GCM key in the provided file is used for encrypting backup files.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
printf(" -w, --wait Wait for the backup to complete (allowed with `start' and `discontinue').\n");
|
||||
printf(" -z, --no-stop-when-done\n"
|
||||
" Do not stop backup when restorable.\n");
|
||||
|
@ -1222,9 +1175,7 @@ static void printRestoreUsage(bool devhelp) {
|
|||
"instead of the entire set.\n");
|
||||
printf(" --encryption-key-file"
|
||||
" The AES-128-GCM key in the provided file is used for decrypting backup files.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
printf(" -v DBVERSION The version at which the database will be restored.\n");
|
||||
printf(" --timestamp Instead of a numeric version, use this to specify a timestamp in %s\n",
|
||||
BackupAgentBase::timeFormat().c_str());
|
||||
|
@ -1281,9 +1232,7 @@ static void printDBAgentUsage(bool devhelp) {
|
|||
printf(" -m, --memory SIZE\n"
|
||||
" Memory limit. The default value is 8GiB. When specified\n"
|
||||
" without a unit, MiB is assumed.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
printf(" --build-flags Print build information and exit.\n");
|
||||
printf(" -v, --version Print version information and exit.\n");
|
||||
printf(" -h, --help Display this help and exit.\n");
|
||||
|
@ -1322,9 +1271,7 @@ static void printDBBackupUsage(bool devhelp) {
|
|||
" If not specified, the entire database will be backed up.\n");
|
||||
printf(" --cleanup Abort will attempt to stop mutation logging on the source cluster.\n");
|
||||
printf(" --dstonly Abort will not make any changes on the source cluster.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
|
@ -3390,6 +3337,10 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
Optional<std::string> proxy;
|
||||
std::string p;
|
||||
if (platform::getEnvironmentVar("HTTP_PROXY", p) || platform::getEnvironmentVar("HTTPS_PROXY", p)) {
|
||||
proxy = p;
|
||||
}
|
||||
std::string destinationContainer;
|
||||
bool describeDeep = false;
|
||||
bool describeTimestamps = false;
|
||||
|
@ -3793,7 +3744,6 @@ int main(int argc, char* argv[]) {
|
|||
case OPT_BLOB_CREDENTIALS:
|
||||
tlsConfig.blobCredentials.push_back(args->OptionArg());
|
||||
break;
|
||||
#ifndef TLS_DISABLED
|
||||
case TLSConfig::OPT_TLS_PLUGIN:
|
||||
args->OptionArg();
|
||||
break;
|
||||
|
@ -3812,7 +3762,6 @@ int main(int argc, char* argv[]) {
|
|||
case TLSConfig::OPT_TLS_VERIFY_PEERS:
|
||||
tlsConfig.tlsVerifyPeers = args->OptionArg();
|
||||
break;
|
||||
#endif
|
||||
case OPT_DUMP_BEGIN:
|
||||
dumpBegin = parseVersion(args->OptionArg());
|
||||
break;
|
||||
|
|
|
@ -279,6 +279,36 @@ ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void configureGenerator(const char* text,
|
||||
const char* line,
|
||||
std::vector<std::string>& lc,
|
||||
std::vector<StringRef> const& tokens) {
|
||||
const char* opts[] = { "new",
|
||||
"single",
|
||||
"double",
|
||||
"triple",
|
||||
"three_data_hall",
|
||||
"three_datacenter",
|
||||
"ssd",
|
||||
"ssd-1",
|
||||
"ssd-2",
|
||||
"memory",
|
||||
"memory-1",
|
||||
"memory-2",
|
||||
"memory-radixtree-beta",
|
||||
"commit_proxies=",
|
||||
"grv_proxies=",
|
||||
"logs=",
|
||||
"resolvers=",
|
||||
"perpetual_storage_wiggle=",
|
||||
"perpetual_storage_wiggle_locality=",
|
||||
"storage_migration_type=",
|
||||
"tenant_mode=",
|
||||
"blob_granules_enabled=",
|
||||
nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
|
||||
CommandFactory configureFactory(
|
||||
"configure",
|
||||
CommandHelp(
|
||||
|
@ -322,6 +352,7 @@ CommandFactory configureFactory(
|
|||
"optional, then transactions can be run with or without specifying tenants. If required, all data must be "
|
||||
"accessed using tenants.\n\n"
|
||||
|
||||
"See the FoundationDB Administration Guide for more information."));
|
||||
"See the FoundationDB Administration Guide for more information."),
|
||||
&configureGenerator);
|
||||
|
||||
} // namespace fdb_cli
|
||||
|
|
|
@ -61,13 +61,28 @@ ACTOR Future<Void> setDDMode(Reference<IDatabase> db, int mode) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> setDDIgnoreRebalanceSwitch(Reference<IDatabase> db, bool ignoreRebalance) {
|
||||
ACTOR Future<Void> setDDIgnoreRebalanceSwitch(Reference<IDatabase> db, uint8_t DDIgnoreOptionMask, bool setMaskedBit) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
try {
|
||||
if (ignoreRebalance) {
|
||||
tr->set(fdb_cli::ddIgnoreRebalanceSpecialKey, ValueRef());
|
||||
state ThreadFuture<Optional<Value>> resultFuture = tr->get(rebalanceDDIgnoreKey);
|
||||
Optional<Value> v = wait(safeThreadFutureToFuture(resultFuture));
|
||||
uint8_t oldValue = DDIgnore::NONE; // nothing is disabled
|
||||
if (v.present()) {
|
||||
if (v.get().size() > 0) {
|
||||
oldValue = BinaryReader::fromStringRef<uint8_t>(v.get(), Unversioned());
|
||||
} else {
|
||||
// In old version (<= 7.1), the value is an empty string, which means all DD rebalance functions are
|
||||
// disabled
|
||||
oldValue = DDIgnore::ALL;
|
||||
}
|
||||
// printf("oldValue: %d Mask: %d V:%d\n", oldValue, DDIgnoreOptionMask, v.get().size());
|
||||
}
|
||||
uint8_t newValue = setMaskedBit ? (oldValue | DDIgnoreOptionMask) : (oldValue & ~DDIgnoreOptionMask);
|
||||
if (newValue > 0) {
|
||||
tr->set(fdb_cli::ddIgnoreRebalanceSpecialKey, BinaryWriter::toValue(newValue, Unversioned()));
|
||||
} else {
|
||||
tr->clear(fdb_cli::ddIgnoreRebalanceSpecialKey);
|
||||
}
|
||||
|
@ -79,18 +94,29 @@ ACTOR Future<Void> setDDIgnoreRebalanceSwitch(Reference<IDatabase> db, bool igno
|
|||
}
|
||||
}
|
||||
|
||||
// set masked bit
|
||||
Future<Void> setDDIgnoreRebalanceOn(Reference<IDatabase> db, uint8_t DDIgnoreOptionMask) {
|
||||
return setDDIgnoreRebalanceSwitch(db, DDIgnoreOptionMask, true);
|
||||
}
|
||||
|
||||
// reset masked bit
|
||||
Future<Void> setDDIgnoreRebalanceOff(Reference<IDatabase> db, uint8_t DDIgnoreOptionMask) {
|
||||
return setDDIgnoreRebalanceSwitch(db, DDIgnoreOptionMask, false);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
const KeyRef ddModeSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/mode");
|
||||
const KeyRef ddIgnoreRebalanceSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/rebalance_ignored");
|
||||
|
||||
constexpr auto usage =
|
||||
"Usage: datadistribution <on|off|disable <ssfailure|rebalance|rebalance_disk|rebalance_read>|enable "
|
||||
"<ssfailure|rebalance|rebalance_disk|rebalance_read>>\n";
|
||||
ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
state bool result = true;
|
||||
if (tokens.size() != 2 && tokens.size() != 3) {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
printf(usage);
|
||||
result = false;
|
||||
} else {
|
||||
if (tokencmp(tokens[1], "on")) {
|
||||
|
@ -104,11 +130,16 @@ ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::ve
|
|||
wait(success((setHealthyZone(db, LiteralStringRef("IgnoreSSFailures"), 0))));
|
||||
printf("Data distribution is disabled for storage server failures.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance")) {
|
||||
wait(setDDIgnoreRebalanceSwitch(db, true));
|
||||
wait(setDDIgnoreRebalanceOn(db, DDIgnore::REBALANCE_DISK | DDIgnore::REBALANCE_READ));
|
||||
printf("Data distribution is disabled for rebalance.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance_disk")) {
|
||||
wait(setDDIgnoreRebalanceOn(db, DDIgnore::REBALANCE_DISK));
|
||||
printf("Data distribution is disabled for rebalance_disk.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance_read")) {
|
||||
wait(setDDIgnoreRebalanceOn(db, DDIgnore::REBALANCE_READ));
|
||||
printf("Data distribution is disabled for rebalance_read.\n");
|
||||
} else {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
printf(usage);
|
||||
result = false;
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "enable")) {
|
||||
|
@ -116,16 +147,20 @@ ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::ve
|
|||
wait(success((clearHealthyZone(db, false, true))));
|
||||
printf("Data distribution is enabled for storage server failures.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance")) {
|
||||
wait(setDDIgnoreRebalanceSwitch(db, false));
|
||||
wait(setDDIgnoreRebalanceOff(db, DDIgnore::REBALANCE_DISK | DDIgnore::REBALANCE_READ));
|
||||
printf("Data distribution is enabled for rebalance.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance_disk")) {
|
||||
wait(setDDIgnoreRebalanceOff(db, DDIgnore::REBALANCE_DISK));
|
||||
printf("Data distribution is enabled for rebalance_disk.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance_read")) {
|
||||
wait(setDDIgnoreRebalanceOff(db, DDIgnore::REBALANCE_READ));
|
||||
printf("Data distribution is enabled for rebalance_read.\n");
|
||||
} else {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
printf(usage);
|
||||
result = false;
|
||||
}
|
||||
} else {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
printf(usage);
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "boost/algorithm/string.hpp"
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
|
@ -40,8 +42,10 @@ ACTOR Future<bool> expensiveDataCheckCommandActor(
|
|||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||
state bool result = true;
|
||||
state std::string addressesStr;
|
||||
if (tokens.size() == 1) {
|
||||
// initialize worker interfaces
|
||||
address_interface->clear();
|
||||
wait(getWorkerInterfaces(tr, address_interface));
|
||||
}
|
||||
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
||||
|
@ -57,20 +61,26 @@ ACTOR Future<bool> expensiveDataCheckCommandActor(
|
|||
}
|
||||
printf("\n");
|
||||
} else if (tokencmp(tokens[1], "all")) {
|
||||
state std::map<Key, std::pair<Value, ClientLeaderRegInterface>>::const_iterator it;
|
||||
for (it = address_interface->cbegin(); it != address_interface->cend(); it++) {
|
||||
int64_t checkRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(it->first, true, 0)));
|
||||
if (!checkRequestSent) {
|
||||
result = false;
|
||||
fprintf(stderr, "ERROR: failed to send request to check process `%s'.\n", it->first.toString().c_str());
|
||||
}
|
||||
}
|
||||
if (address_interface->size() == 0) {
|
||||
fprintf(stderr,
|
||||
"ERROR: no processes to check. You must run the `expensive_data_check’ "
|
||||
"command before running `expensive_data_check all’.\n");
|
||||
} else {
|
||||
printf("Attempted to kill and check %zu processes\n", address_interface->size());
|
||||
std::vector<std::string> addressesVec;
|
||||
for (const auto& [address, _] : *address_interface) {
|
||||
addressesVec.push_back(address.toString());
|
||||
}
|
||||
addressesStr = boost::algorithm::join(addressesVec, ",");
|
||||
// make sure we only call the interface once to send requests in parallel
|
||||
int64_t checkRequestsSent = wait(safeThreadFutureToFuture(db->rebootWorker(addressesStr, true, 0)));
|
||||
if (!checkRequestsSent) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: failed to send requests to check all processes, please run the `expensive_data_check’ "
|
||||
"command again to fetch latest addresses.\n");
|
||||
} else {
|
||||
printf("Attempted to kill and check %zu processes\n", address_interface->size());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
state int i;
|
||||
|
@ -83,15 +93,21 @@ ACTOR Future<bool> expensiveDataCheckCommandActor(
|
|||
}
|
||||
|
||||
if (result) {
|
||||
std::vector<std::string> addressesVec;
|
||||
for (i = 1; i < tokens.size(); i++) {
|
||||
int64_t checkRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], true, 0)));
|
||||
if (!checkRequestSent) {
|
||||
result = false;
|
||||
fprintf(
|
||||
stderr, "ERROR: failed to send request to check process `%s'.\n", tokens[i].toString().c_str());
|
||||
}
|
||||
addressesVec.push_back(tokens[i].toString());
|
||||
}
|
||||
addressesStr = boost::algorithm::join(addressesVec, ",");
|
||||
int64_t checkRequestsSent = wait(safeThreadFutureToFuture(db->rebootWorker(addressesStr, true, 0)));
|
||||
if (!checkRequestsSent) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: failed to send requests to check processes `%s', please run the `expensive_data_check’ "
|
||||
"command again to fetch latest addresses.\n",
|
||||
addressesStr.c_str());
|
||||
} else {
|
||||
printf("Attempted to kill and check %zu processes\n", tokens.size() - 1);
|
||||
}
|
||||
printf("Attempted to kill and check %zu processes\n", tokens.size() - 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "boost/algorithm/string.hpp"
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
|
@ -37,8 +39,10 @@ ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
|
|||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||
ASSERT(tokens.size() >= 1);
|
||||
state bool result = true;
|
||||
state std::string addressesStr;
|
||||
if (tokens.size() == 1) {
|
||||
// initialize worker interfaces
|
||||
address_interface->clear();
|
||||
wait(getWorkerInterfaces(tr, address_interface));
|
||||
}
|
||||
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
||||
|
@ -54,21 +58,27 @@ ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
|
|||
}
|
||||
printf("\n");
|
||||
} else if (tokencmp(tokens[1], "all")) {
|
||||
state std::map<Key, std::pair<Value, ClientLeaderRegInterface>>::const_iterator it;
|
||||
for (it = address_interface->cbegin(); it != address_interface->cend(); it++) {
|
||||
int64_t killRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(it->first, false, 0)));
|
||||
if (!killRequestSent) {
|
||||
result = false;
|
||||
fprintf(stderr, "ERROR: failed to send request to kill process `%s'.\n", it->first.toString().c_str());
|
||||
}
|
||||
}
|
||||
if (address_interface->size() == 0) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: no processes to kill. You must run the `kill’ command before "
|
||||
"running `kill all’.\n");
|
||||
} else {
|
||||
printf("Attempted to kill %zu processes\n", address_interface->size());
|
||||
std::vector<std::string> addressesVec;
|
||||
for (const auto& [address, _] : *address_interface) {
|
||||
addressesVec.push_back(address.toString());
|
||||
}
|
||||
addressesStr = boost::algorithm::join(addressesVec, ",");
|
||||
// make sure we only call the interface once to send requests in parallel
|
||||
int64_t killRequestsSent = wait(safeThreadFutureToFuture(db->rebootWorker(addressesStr, false, 0)));
|
||||
if (!killRequestsSent) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: failed to send requests to all processes, please run the `kill’ command again to fetch "
|
||||
"latest addresses.\n");
|
||||
} else {
|
||||
printf("Attempted to kill %zu processes\n", address_interface->size());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
state int i;
|
||||
|
@ -81,20 +91,34 @@ ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
|
|||
}
|
||||
|
||||
if (result) {
|
||||
std::vector<std::string> addressesVec;
|
||||
for (i = 1; i < tokens.size(); i++) {
|
||||
int64_t killRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], false, 0)));
|
||||
if (!killRequestSent) {
|
||||
result = false;
|
||||
fprintf(
|
||||
stderr, "ERROR: failed to send request to kill process `%s'.\n", tokens[i].toString().c_str());
|
||||
}
|
||||
addressesVec.push_back(tokens[i].toString());
|
||||
}
|
||||
addressesStr = boost::algorithm::join(addressesVec, ",");
|
||||
int64_t killRequestsSent = wait(safeThreadFutureToFuture(db->rebootWorker(addressesStr, false, 0)));
|
||||
if (!killRequestsSent) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: failed to send requests to kill processes `%s', please run the `kill’ command again to "
|
||||
"fetch latest addresses.\n",
|
||||
addressesStr.c_str());
|
||||
} else {
|
||||
printf("Attempted to kill %zu processes\n", tokens.size() - 1);
|
||||
}
|
||||
printf("Attempted to kill %zu processes\n", tokens.size() - 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void killGenerator(const char* text,
|
||||
const char* line,
|
||||
std::vector<std::string>& lc,
|
||||
std::vector<StringRef> const& tokens) {
|
||||
const char* opts[] = { "all", "list", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
|
||||
CommandFactory killFactory(
|
||||
"kill",
|
||||
CommandHelp(
|
||||
|
@ -103,5 +127,6 @@ CommandFactory killFactory(
|
|||
"If no addresses are specified, populates the list of processes which can be killed. Processes cannot be "
|
||||
"killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known "
|
||||
"processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is "
|
||||
"unresponsive.\n\nFor each IP:port pair in <ADDRESS ...>, attempt to kill the specified process."));
|
||||
"unresponsive.\n\nFor each IP:port pair in <ADDRESS ...>, attempt to kill the specified process."),
|
||||
&killGenerator);
|
||||
} // namespace fdb_cli
|
||||
|
|
|
@ -35,7 +35,10 @@
|
|||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens, bool intrans) {
|
||||
ACTOR Future<bool> profileCommandActor(Database db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
bool intrans) {
|
||||
state bool result = true;
|
||||
if (tokens.size() == 1) {
|
||||
printUsage(tokens[0]);
|
||||
|
@ -45,7 +48,7 @@ ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<S
|
|||
fprintf(stderr, "ERROR: Usage: profile client <get|set>\n");
|
||||
return false;
|
||||
}
|
||||
wait(GlobalConfig::globalConfig().onInitialized());
|
||||
wait(db->globalConfig->onInitialized());
|
||||
if (tokencmp(tokens[2], "get")) {
|
||||
if (tokens.size() != 3) {
|
||||
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");
|
||||
|
@ -53,12 +56,12 @@ ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<S
|
|||
}
|
||||
std::string sampleRateStr = "default";
|
||||
std::string sizeLimitStr = "default";
|
||||
const double sampleRateDbl = GlobalConfig::globalConfig().get<double>(
|
||||
fdbClientInfoTxnSampleRate, std::numeric_limits<double>::infinity());
|
||||
const double sampleRateDbl =
|
||||
db->globalConfig->get<double>(fdbClientInfoTxnSampleRate, std::numeric_limits<double>::infinity());
|
||||
if (!std::isinf(sampleRateDbl)) {
|
||||
sampleRateStr = std::to_string(sampleRateDbl);
|
||||
}
|
||||
const int64_t sizeLimit = GlobalConfig::globalConfig().get<int64_t>(fdbClientInfoTxnSizeLimit, -1);
|
||||
const int64_t sizeLimit = db->globalConfig->get<int64_t>(fdbClientInfoTxnSizeLimit, -1);
|
||||
if (sizeLimit != -1) {
|
||||
sizeLimitStr = boost::lexical_cast<std::string>(sizeLimit);
|
||||
}
|
||||
|
|
|
@ -1128,8 +1128,12 @@ void printStatus(StatusObjectReader statusObj,
|
|||
"storage server failures.";
|
||||
}
|
||||
if (statusObjCluster.has("data_distribution_disabled_for_rebalance")) {
|
||||
outputString += "\n\nWARNING: Data distribution is currently turned on but shard size balancing is "
|
||||
"currently disabled.";
|
||||
outputString += "\n\nWARNING: Data distribution is currently turned on but one or both of shard "
|
||||
"size and read-load based balancing are disabled.";
|
||||
// data_distribution_disabled_hex
|
||||
if (statusObjCluster.has("data_distribution_disabled_hex")) {
|
||||
outputString += " Ignore code: " + statusObjCluster["data_distribution_disabled_hex"].get_str();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1246,6 +1250,16 @@ ACTOR Future<bool> statusCommandActor(Reference<IDatabase> db,
|
|||
return true;
|
||||
}
|
||||
|
||||
void statusGenerator(const char* text,
|
||||
const char* line,
|
||||
std::vector<std::string>& lc,
|
||||
std::vector<StringRef> const& tokens) {
|
||||
if (tokens.size() == 1) {
|
||||
const char* opts[] = { "minimal", "details", "json", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
}
|
||||
|
||||
CommandFactory statusFactory(
|
||||
"status",
|
||||
CommandHelp("status [minimal|details|json]",
|
||||
|
@ -1254,5 +1268,6 @@ CommandFactory statusFactory(
|
|||
"what is wrong. If the cluster is running, this command will print cluster "
|
||||
"statistics.\n\nSpecifying `minimal' will provide a minimal description of the status of your "
|
||||
"database.\n\nSpecifying `details' will provide load information for individual "
|
||||
"workers.\n\nSpecifying `json' will provide status information in a machine readable JSON format."));
|
||||
"workers.\n\nSpecifying `json' will provide status information in a machine readable JSON format."),
|
||||
&statusGenerator);
|
||||
} // namespace fdb_cli
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "boost/algorithm/string.hpp"
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
|
@ -37,8 +39,10 @@ ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
|
|||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||
ASSERT(tokens.size() >= 1);
|
||||
state bool result = true;
|
||||
state std::string addressesStr;
|
||||
if (tokens.size() == 1) {
|
||||
// initialize worker interfaces
|
||||
address_interface->clear();
|
||||
wait(getWorkerInterfaces(tr, address_interface));
|
||||
if (address_interface->size() == 0) {
|
||||
printf("\nNo addresses can be suspended.\n");
|
||||
|
@ -72,19 +76,23 @@ ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
|
|||
printUsage(tokens[0]);
|
||||
result = false;
|
||||
} else {
|
||||
int64_t timeout_ms = seconds * 1000;
|
||||
tr->setOption(FDBTransactionOptions::TIMEOUT, StringRef((uint8_t*)&timeout_ms, sizeof(int64_t)));
|
||||
std::vector<std::string> addressesVec;
|
||||
for (i = 2; i < tokens.size(); i++) {
|
||||
int64_t suspendRequestSent =
|
||||
wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], false, static_cast<int>(seconds))));
|
||||
if (!suspendRequestSent) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: failed to send request to suspend process `%s'.\n",
|
||||
tokens[i].toString().c_str());
|
||||
}
|
||||
addressesVec.push_back(tokens[i].toString());
|
||||
}
|
||||
addressesStr = boost::algorithm::join(addressesVec, ",");
|
||||
int64_t suspendRequestSent =
|
||||
wait(safeThreadFutureToFuture(db->rebootWorker(addressesStr, false, static_cast<int>(seconds))));
|
||||
if (!suspendRequestSent) {
|
||||
result = false;
|
||||
fprintf(
|
||||
stderr,
|
||||
"ERROR: failed to send requests to suspend processes `%s', please run the `suspend’ command "
|
||||
"to fetch latest addresses.\n",
|
||||
addressesStr.c_str());
|
||||
} else {
|
||||
printf("Attempted to suspend %zu processes\n", tokens.size() - 2);
|
||||
}
|
||||
printf("Attempted to suspend %zu processes\n", tokens.size() - 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -310,10 +310,104 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
return true;
|
||||
}
|
||||
|
||||
void throttleGenerator(const char* text,
|
||||
const char* line,
|
||||
std::vector<std::string>& lc,
|
||||
std::vector<StringRef> const& tokens) {
|
||||
if (tokens.size() == 1) {
|
||||
const char* opts[] = { "on tag", "off", "enable auto", "disable auto", "list", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "on")) {
|
||||
if (tokens.size() == 2) {
|
||||
const char* opts[] = { "tag", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() == 6) {
|
||||
const char* opts[] = { "default", "immediate", "batch", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "off") && !tokencmp(tokens[tokens.size() - 1], "tag")) {
|
||||
const char* opts[] = { "all", "auto", "manual", "tag", "default", "immediate", "batch", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() == 2 && (tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable"))) {
|
||||
const char* opts[] = { "auto", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "list")) {
|
||||
if (tokens.size() == 2) {
|
||||
const char* opts[] = { "throttled", "recommended", "all", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() == 3) {
|
||||
const char* opts[] = { "LIMITS", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<const char*> throttleHintGenerator(std::vector<StringRef> const& tokens, bool inArgument) {
|
||||
if (tokens.size() == 1) {
|
||||
return { "<on|off|enable auto|disable auto|list>", "[ARGS]" };
|
||||
} else if (tokencmp(tokens[1], "on")) {
|
||||
std::vector<const char*> opts = { "tag", "<TAG>", "[RATE]", "[DURATION]", "[default|immediate|batch]" };
|
||||
if (tokens.size() == 2) {
|
||||
return opts;
|
||||
} else if (((tokens.size() == 3 && inArgument) || tokencmp(tokens[2], "tag")) && tokens.size() < 7) {
|
||||
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "off")) {
|
||||
if (tokencmp(tokens[tokens.size() - 1], "tag")) {
|
||||
return { "<TAG>" };
|
||||
} else {
|
||||
bool hasType = false;
|
||||
bool hasTag = false;
|
||||
bool hasPriority = false;
|
||||
for (int i = 2; i < tokens.size(); ++i) {
|
||||
if (tokencmp(tokens[i], "all") || tokencmp(tokens[i], "auto") || tokencmp(tokens[i], "manual")) {
|
||||
hasType = true;
|
||||
} else if (tokencmp(tokens[i], "default") || tokencmp(tokens[i], "immediate") ||
|
||||
tokencmp(tokens[i], "batch")) {
|
||||
hasPriority = true;
|
||||
} else if (tokencmp(tokens[i], "tag")) {
|
||||
hasTag = true;
|
||||
++i;
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<const char*> options;
|
||||
if (!hasType) {
|
||||
options.push_back("[all|auto|manual]");
|
||||
}
|
||||
if (!hasTag) {
|
||||
options.push_back("[tag <TAG>]");
|
||||
}
|
||||
if (!hasPriority) {
|
||||
options.push_back("[default|immediate|batch]");
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
} else if ((tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable")) && tokens.size() == 2) {
|
||||
return { "auto" };
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "list")) {
|
||||
if (tokens.size() == 2) {
|
||||
return { "[throttled|recommended|all]", "[LIMITS]" };
|
||||
} else if (tokens.size() == 3 && (tokencmp(tokens[2], "throttled") || tokencmp(tokens[2], "recommended") ||
|
||||
tokencmp(tokens[2], "all"))) {
|
||||
return { "[LIMITS]" };
|
||||
}
|
||||
} else if (tokens.size() == 2 && inArgument) {
|
||||
return { "[ARGS]" };
|
||||
}
|
||||
|
||||
return std::vector<const char*>();
|
||||
}
|
||||
|
||||
CommandFactory throttleFactory(
|
||||
"throttle",
|
||||
CommandHelp("throttle <on|off|enable auto|disable auto|list> [ARGS]",
|
||||
"view and control throttled tags",
|
||||
"Use `on' and `off' to manually throttle or unthrottle tags. Use `enable auto' or `disable auto' "
|
||||
"to enable or disable automatic tag throttling. Use `list' to print the list of throttled tags.\n"));
|
||||
"to enable or disable automatic tag throttling. Use `list' to print the list of throttled tags.\n"),
|
||||
&throttleGenerator,
|
||||
&throttleHintGenerator);
|
||||
} // namespace fdb_cli
|
||||
|
|
|
@ -158,7 +158,7 @@ ACTOR Future<bool> versionEpochCommandActor(Reference<IDatabase> db, Database cx
|
|||
|
||||
CommandFactory versionEpochFactory(
|
||||
"versionepoch",
|
||||
CommandHelp("versionepoch [<enable|commit|set|disable> [EPOCH]]",
|
||||
CommandHelp("versionepoch [<enable|commit|get|set|disable> [EPOCH]]",
|
||||
"Read or write the version epoch",
|
||||
"If no arguments are specified, reports the offset between the expected version "
|
||||
"and the actual version. Otherwise, enables, disables, or commits the version epoch. "
|
||||
|
|
|
@ -125,12 +125,8 @@ CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
|
|||
{ OPT_DEBUG_TLS, "--debug-tls", SO_NONE },
|
||||
{ OPT_API_VERSION, "--api-version", SO_REQ_SEP },
|
||||
{ OPT_MEMORY, "--memory", SO_REQ_SEP },
|
||||
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
|
||||
SO_END_OF_OPTIONS };
|
||||
TLS_OPTION_FLAGS,
|
||||
SO_END_OF_OPTIONS };
|
||||
|
||||
void printAtCol(const char* text, int col, FILE* stream = stdout) {
|
||||
const char* iter = text;
|
||||
|
@ -448,10 +444,7 @@ static void printProgramUsage(const char* name) {
|
|||
" --no-status Disables the initial status check done when starting\n"
|
||||
" the CLI.\n"
|
||||
" --api-version APIVERSION\n"
|
||||
" Specifies the version of the API for the CLI to use.\n"
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_HELP
|
||||
#endif
|
||||
" Specifies the version of the API for the CLI to use.\n" TLS_HELP
|
||||
" --knob-KNOBNAME KNOBVALUE\n"
|
||||
" Changes a knob option. KNOBNAME should be lowercase.\n"
|
||||
" --debug-tls Prints the TLS configuration and certificate chain, then exits.\n"
|
||||
|
@ -758,6 +751,7 @@ void optionGenerator(const char* text, const char* line, std::vector<std::string
|
|||
}
|
||||
}
|
||||
|
||||
namespace fdb_cli {
|
||||
void arrayGenerator(const char* text, const char* line, const char** options, std::vector<std::string>& lc) {
|
||||
const char** iter = options;
|
||||
int len = strlen(text);
|
||||
|
@ -770,81 +764,13 @@ void arrayGenerator(const char* text, const char* line, const char** options, st
|
|||
}
|
||||
}
|
||||
}
|
||||
} // namespace fdb_cli
|
||||
|
||||
void onOffGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
||||
const char* opts[] = { "on", "off", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
|
||||
void configureGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
||||
const char* opts[] = { "new",
|
||||
"single",
|
||||
"double",
|
||||
"triple",
|
||||
"three_data_hall",
|
||||
"three_datacenter",
|
||||
"ssd",
|
||||
"ssd-1",
|
||||
"ssd-2",
|
||||
"memory",
|
||||
"memory-1",
|
||||
"memory-2",
|
||||
"memory-radixtree-beta",
|
||||
"commit_proxies=",
|
||||
"grv_proxies=",
|
||||
"logs=",
|
||||
"resolvers=",
|
||||
"perpetual_storage_wiggle=",
|
||||
"perpetual_storage_wiggle_locality=",
|
||||
"storage_migration_type=",
|
||||
"tenant_mode=",
|
||||
"blob_granules_enabled=",
|
||||
nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
|
||||
void statusGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
||||
const char* opts[] = { "minimal", "details", "json", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
|
||||
void killGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
||||
const char* opts[] = { "all", "list", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
|
||||
void throttleGenerator(const char* text,
|
||||
const char* line,
|
||||
std::vector<std::string>& lc,
|
||||
std::vector<StringRef> const& tokens) {
|
||||
if (tokens.size() == 1) {
|
||||
const char* opts[] = { "on tag", "off", "enable auto", "disable auto", "list", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "on")) {
|
||||
if (tokens.size() == 2) {
|
||||
const char* opts[] = { "tag", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() == 6) {
|
||||
const char* opts[] = { "default", "immediate", "batch", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "off") && !tokencmp(tokens[tokens.size() - 1], "tag")) {
|
||||
const char* opts[] = { "all", "auto", "manual", "tag", "default", "immediate", "batch", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() == 2 && (tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable"))) {
|
||||
const char* opts[] = { "auto", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "list")) {
|
||||
if (tokens.size() == 2) {
|
||||
const char* opts[] = { "throttled", "recommended", "all", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
} else if (tokens.size() == 3) {
|
||||
const char* opts[] = { "LIMITS", nullptr };
|
||||
arrayGenerator(text, line, opts, lc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fdbcliCompCmd(std::string const& text, std::vector<std::string>& lc) {
|
||||
bool err, partial;
|
||||
std::string whole_line = text;
|
||||
|
@ -856,7 +782,7 @@ void fdbcliCompCmd(std::string const& text, std::vector<std::string>& lc) {
|
|||
int count = tokens.size();
|
||||
|
||||
// for(int i = 0; i < count; i++) {
|
||||
// printf("Token (%d): `%s'\n", i, tokens[i].toString().c_str());
|
||||
// printf("Token (%d): `%s'\n", i, tokens[i].toString().c_str());
|
||||
// }
|
||||
|
||||
std::string ntext = "";
|
||||
|
@ -892,81 +818,10 @@ void fdbcliCompCmd(std::string const& text, std::vector<std::string>& lc) {
|
|||
onOffGenerator(ntext.c_str(), base_input.c_str(), lc);
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "configure")) {
|
||||
configureGenerator(ntext.c_str(), base_input.c_str(), lc);
|
||||
auto itr = CommandFactory::completionGenerators().find(tokens[0].toString());
|
||||
if (itr != CommandFactory::completionGenerators().end()) {
|
||||
itr->second(ntext.c_str(), base_input.c_str(), lc, tokens);
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "status") && count == 1) {
|
||||
statusGenerator(ntext.c_str(), base_input.c_str(), lc);
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "kill") && count == 1) {
|
||||
killGenerator(ntext.c_str(), base_input.c_str(), lc);
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "throttle")) {
|
||||
throttleGenerator(ntext.c_str(), base_input.c_str(), lc, tokens);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<const char*> throttleHintGenerator(std::vector<StringRef> const& tokens, bool inArgument) {
|
||||
if (tokens.size() == 1) {
|
||||
return { "<on|off|enable auto|disable auto|list>", "[ARGS]" };
|
||||
} else if (tokencmp(tokens[1], "on")) {
|
||||
std::vector<const char*> opts = { "tag", "<TAG>", "[RATE]", "[DURATION]", "[default|immediate|batch]" };
|
||||
if (tokens.size() == 2) {
|
||||
return opts;
|
||||
} else if (((tokens.size() == 3 && inArgument) || tokencmp(tokens[2], "tag")) && tokens.size() < 7) {
|
||||
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "off")) {
|
||||
if (tokencmp(tokens[tokens.size() - 1], "tag")) {
|
||||
return { "<TAG>" };
|
||||
} else {
|
||||
bool hasType = false;
|
||||
bool hasTag = false;
|
||||
bool hasPriority = false;
|
||||
for (int i = 2; i < tokens.size(); ++i) {
|
||||
if (tokencmp(tokens[i], "all") || tokencmp(tokens[i], "auto") || tokencmp(tokens[i], "manual")) {
|
||||
hasType = true;
|
||||
} else if (tokencmp(tokens[i], "default") || tokencmp(tokens[i], "immediate") ||
|
||||
tokencmp(tokens[i], "batch")) {
|
||||
hasPriority = true;
|
||||
} else if (tokencmp(tokens[i], "tag")) {
|
||||
hasTag = true;
|
||||
++i;
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<const char*> options;
|
||||
if (!hasType) {
|
||||
options.push_back("[all|auto|manual]");
|
||||
}
|
||||
if (!hasTag) {
|
||||
options.push_back("[tag <TAG>]");
|
||||
}
|
||||
if (!hasPriority) {
|
||||
options.push_back("[default|immediate|batch]");
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
} else if ((tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable")) && tokens.size() == 2) {
|
||||
return { "auto" };
|
||||
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "list")) {
|
||||
if (tokens.size() == 2) {
|
||||
return { "[throttled|recommended|all]", "[LIMITS]" };
|
||||
} else if (tokens.size() == 3 && (tokencmp(tokens[2], "throttled") || tokencmp(tokens[2], "recommended") ||
|
||||
tokencmp(tokens[2], "all"))) {
|
||||
return { "[LIMITS]" };
|
||||
}
|
||||
} else if (tokens.size() == 2 && inArgument) {
|
||||
return { "[ARGS]" };
|
||||
}
|
||||
|
||||
return std::vector<const char*>();
|
||||
}
|
||||
|
||||
void LogCommand(std::string line, UID randomID, std::string errMsg) {
|
||||
|
@ -1091,7 +946,6 @@ struct CLIOptions {
|
|||
case OPT_NO_HINTS:
|
||||
cliHints = false;
|
||||
|
||||
#ifndef TLS_DISABLED
|
||||
// TLS Options
|
||||
case TLSConfig::OPT_TLS_PLUGIN:
|
||||
args.OptionArg();
|
||||
|
@ -1111,7 +965,7 @@ struct CLIOptions {
|
|||
case TLSConfig::OPT_TLS_VERIFY_PEERS:
|
||||
tlsVerifyPeers = args.OptionArg();
|
||||
break;
|
||||
#endif
|
||||
|
||||
case OPT_HELP:
|
||||
printProgramUsage(program_name.c_str());
|
||||
return 0;
|
||||
|
@ -1158,6 +1012,36 @@ Future<T> stopNetworkAfter(Future<T> what) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> addInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||
Reference<FlowLock> connectLock,
|
||||
KeyValue kv) {
|
||||
wait(connectLock->take());
|
||||
state FlowLock::Releaser releaser(*connectLock);
|
||||
state ClientWorkerInterface workerInterf =
|
||||
BinaryReader::fromStringRef<ClientWorkerInterface>(kv.value, IncludeVersion());
|
||||
state ClientLeaderRegInterface leaderInterf(workerInterf.address());
|
||||
choose {
|
||||
when(Optional<LeaderInfo> rep =
|
||||
wait(brokenPromiseToNever(leaderInterf.getLeader.getReply(GetLeaderRequest())))) {
|
||||
StringRef ip_port =
|
||||
(kv.key.endsWith(LiteralStringRef(":tls")) ? kv.key.removeSuffix(LiteralStringRef(":tls")) : kv.key)
|
||||
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
||||
(*address_interface)[ip_port] = std::make_pair(kv.value, leaderInterf);
|
||||
|
||||
if (workerInterf.reboot.getEndpoint().addresses.secondaryAddress.present()) {
|
||||
Key full_ip_port2 =
|
||||
StringRef(workerInterf.reboot.getEndpoint().addresses.secondaryAddress.get().toString());
|
||||
StringRef ip_port2 = full_ip_port2.endsWith(LiteralStringRef(":tls"))
|
||||
? full_ip_port2.removeSuffix(LiteralStringRef(":tls"))
|
||||
: full_ip_port2;
|
||||
(*address_interface)[ip_port2] = std::make_pair(kv.value, leaderInterf);
|
||||
}
|
||||
}
|
||||
when(wait(delay(CLIENT_KNOBS->CLI_CONNECT_TIMEOUT))) {}
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
||||
state LineNoise& linenoise = *plinenoise;
|
||||
state bool intrans = false;
|
||||
|
@ -1698,7 +1582,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
|
||||
if (tokencmp(tokens[0], "profile")) {
|
||||
getTransaction(db, managementTenant, tr, options, intrans);
|
||||
bool _result = wait(makeInterruptable(profileCommandActor(tr, tokens, intrans)));
|
||||
bool _result = wait(makeInterruptable(profileCommandActor(localDb, tr, tokens, intrans)));
|
||||
if (!_result)
|
||||
is_error = true;
|
||||
continue;
|
||||
|
@ -2080,8 +1964,9 @@ ACTOR Future<int> runCli(CLIOptions opt) {
|
|||
|
||||
bool inArgument = *(line.end() - 1) != ' ';
|
||||
std::string hintLine = inArgument ? " " : "";
|
||||
if (tokencmp(command, "throttle")) {
|
||||
std::vector<const char*> hintItems = throttleHintGenerator(parsed.back(), inArgument);
|
||||
auto itr = CommandFactory::hintGenerators().find(command.toString());
|
||||
if (itr != CommandFactory::hintGenerators().end()) {
|
||||
std::vector<const char*> hintItems = itr->second(parsed.back(), inArgument);
|
||||
if (hintItems.empty()) {
|
||||
return LineNoise::Hint();
|
||||
}
|
||||
|
@ -2224,7 +2109,6 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
|
||||
if (opt.debugTLS) {
|
||||
#ifndef TLS_DISABLED
|
||||
// Backdoor into NativeAPI's tlsConfig, which is where the above network option settings ended up.
|
||||
extern TLSConfig tlsConfig;
|
||||
printf("TLS Configuration:\n");
|
||||
|
@ -2241,9 +2125,6 @@ int main(int argc, char** argv) {
|
|||
printf("Use --log and look at the trace logs for more detailed information on the failure.\n");
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
printf("This fdbcli was built with TLS disabled.\n");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,8 +47,28 @@ struct CommandHelp {
|
|||
CommandHelp(const char* u, const char* s, const char* l) : usage(u), short_desc(s), long_desc(l) {}
|
||||
};
|
||||
|
||||
void arrayGenerator(const char* text, const char* line, const char** options, std::vector<std::string>& lc);
|
||||
|
||||
struct CommandFactory {
|
||||
CommandFactory(const char* name, CommandHelp help) { commands()[name] = help; }
|
||||
typedef void (*CompletionGeneratorFunc)(const char* text,
|
||||
const char* line,
|
||||
std::vector<std::string>& lc,
|
||||
std::vector<StringRef> const& tokens);
|
||||
|
||||
typedef std::vector<const char*> (*HintGeneratorFunc)(std::vector<StringRef> const& tokens, bool inArgument);
|
||||
|
||||
CommandFactory(const char* name,
|
||||
CommandHelp help,
|
||||
CompletionGeneratorFunc completionFunc = nullptr,
|
||||
HintGeneratorFunc hintFunc = nullptr) {
|
||||
commands()[name] = help;
|
||||
if (completionFunc) {
|
||||
completionGenerators()[name] = completionFunc;
|
||||
}
|
||||
if (hintFunc) {
|
||||
hintGenerators()[name] = hintFunc;
|
||||
}
|
||||
}
|
||||
CommandFactory(const char* name) { hiddenCommands().insert(name); }
|
||||
static std::map<std::string, CommandHelp>& commands() {
|
||||
static std::map<std::string, CommandHelp> helpMap;
|
||||
|
@ -58,6 +78,14 @@ struct CommandFactory {
|
|||
static std::set<std::string> commands;
|
||||
return commands;
|
||||
}
|
||||
static std::map<std::string, CompletionGeneratorFunc>& completionGenerators() {
|
||||
static std::map<std::string, CompletionGeneratorFunc> completionMap;
|
||||
return completionMap;
|
||||
}
|
||||
static std::map<std::string, HintGeneratorFunc>& hintGenerators() {
|
||||
static std::map<std::string, HintGeneratorFunc> hintMap;
|
||||
return hintMap;
|
||||
}
|
||||
};
|
||||
|
||||
// Special keys used by fdbcli commands
|
||||
|
@ -93,10 +121,7 @@ extern const KeyRangeRef processClassTypeSpecialKeyRange;
|
|||
// Other special keys
|
||||
inline const KeyRef errorMsgSpecialKey = LiteralStringRef("\xff\xff/error_message");
|
||||
// help functions (Copied from fdbcli.actor.cpp)
|
||||
// decode worker interfaces
|
||||
ACTOR Future<Void> addInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||
Reference<FlowLock> connectLock,
|
||||
KeyValue kv);
|
||||
|
||||
// get all workers' info
|
||||
ACTOR Future<bool> getWorkers(Reference<IDatabase> db, std::vector<ProcessData>* workers);
|
||||
|
||||
|
@ -189,7 +214,10 @@ ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db,
|
|||
bool clearSSFailureZoneString = false);
|
||||
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// profile command
|
||||
ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens, bool intrans);
|
||||
ACTOR Future<bool> profileCommandActor(Database db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
bool intrans);
|
||||
// setclass command
|
||||
ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// snapshot command
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
// Determine public IP address by calling the first available coordinator.
|
||||
// If fail connecting all coordinators, throw bind_failed().
|
||||
IPAddress determinePublicIPAutomatically(ClusterConnectionString& ccs) {
|
||||
int size = ccs.coordinators().size() + ccs.hostnames.size();
|
||||
int size = ccs.coords.size() + ccs.hostnames.size();
|
||||
int index = 0;
|
||||
loop {
|
||||
try {
|
||||
|
@ -42,10 +42,10 @@ IPAddress determinePublicIPAutomatically(ClusterConnectionString& ccs) {
|
|||
|
||||
NetworkAddress coordAddr;
|
||||
// Try coords first, because they don't need to be resolved.
|
||||
if (index < ccs.coordinators().size()) {
|
||||
coordAddr = ccs.coordinators()[index];
|
||||
if (index < ccs.coords.size()) {
|
||||
coordAddr = ccs.coords[index];
|
||||
} else {
|
||||
Hostname& h = ccs.hostnames[index - ccs.coordinators().size()];
|
||||
Hostname& h = ccs.hostnames[index - ccs.coords.size()];
|
||||
Optional<NetworkAddress> resolvedAddr = h.resolveBlocking();
|
||||
if (!resolvedAddr.present()) {
|
||||
throw lookup_failed();
|
||||
|
|
|
@ -213,9 +213,7 @@ public:
|
|||
// Hack to get around the fact that macros don't work inside actor functions
|
||||
static Reference<IAsyncFile> encryptFile(Reference<IAsyncFile> const& f, AsyncFileEncrypted::Mode mode) {
|
||||
Reference<IAsyncFile> result = f;
|
||||
#if ENCRYPTION_ENABLED
|
||||
result = makeReference<AsyncFileEncrypted>(result, mode);
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -1128,7 +1128,6 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
#if ENCRYPTION_ENABLED
|
||||
ACTOR static Future<Void> createTestEncryptionKeyFile(std::string filename) {
|
||||
state Reference<IAsyncFile> keyFile = wait(IAsyncFileSystem::filesystem()->open(
|
||||
filename,
|
||||
|
@ -1164,7 +1163,6 @@ public:
|
|||
ASSERT_EQ(bytesRead, cipherKey->size());
|
||||
return Void();
|
||||
}
|
||||
#endif // ENCRYPTION_ENABLED
|
||||
|
||||
}; // class BackupContainerFileSystemImpl
|
||||
|
||||
|
@ -1481,19 +1479,11 @@ Future<Void> BackupContainerFileSystem::encryptionSetupComplete() const {
|
|||
|
||||
void BackupContainerFileSystem::setEncryptionKey(Optional<std::string> const& encryptionKeyFileName) {
|
||||
if (encryptionKeyFileName.present()) {
|
||||
#if ENCRYPTION_ENABLED
|
||||
encryptionSetupFuture = BackupContainerFileSystemImpl::readEncryptionKey(encryptionKeyFileName.get());
|
||||
#else
|
||||
encryptionSetupFuture = Void();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
Future<Void> BackupContainerFileSystem::createTestEncryptionKeyFile(std::string const& filename) {
|
||||
#if ENCRYPTION_ENABLED
|
||||
return BackupContainerFileSystemImpl::createTestEncryptionKeyFile(filename);
|
||||
#else
|
||||
return Void();
|
||||
#endif
|
||||
}
|
||||
|
||||
// Get a BackupContainerFileSystem based on a container URL string
|
||||
|
|
|
@ -20,9 +20,7 @@
|
|||
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbclient/BackupContainerS3BlobStore.h"
|
||||
#if (!defined(TLS_DISABLED) && !defined(_WIN32))
|
||||
#include "fdbrpc/AsyncFileEncrypted.h"
|
||||
#endif
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
|
@ -174,11 +172,9 @@ std::string BackupContainerS3BlobStore::getURLFormat() {
|
|||
Future<Reference<IAsyncFile>> BackupContainerS3BlobStore::readFile(const std::string& path) {
|
||||
Reference<IAsyncFile> f = makeReference<AsyncFileS3BlobStoreRead>(m_bstore, m_bucket, dataPath(path));
|
||||
|
||||
#if ENCRYPTION_ENABLED
|
||||
if (usesEncryption()) {
|
||||
f = makeReference<AsyncFileEncrypted>(f, AsyncFileEncrypted::Mode::READ_ONLY);
|
||||
}
|
||||
#endif
|
||||
f = makeReference<AsyncFileReadAheadCache>(f,
|
||||
m_bstore->knobs.read_block_size,
|
||||
m_bstore->knobs.read_ahead_blocks,
|
||||
|
@ -194,11 +190,9 @@ Future<std::vector<std::string>> BackupContainerS3BlobStore::listURLs(Reference<
|
|||
|
||||
Future<Reference<IBackupFile>> BackupContainerS3BlobStore::writeFile(const std::string& path) {
|
||||
Reference<IAsyncFile> f = makeReference<AsyncFileS3BlobStoreWrite>(m_bstore, m_bucket, dataPath(path));
|
||||
#if ENCRYPTION_ENABLED
|
||||
if (usesEncryption()) {
|
||||
f = makeReference<AsyncFileEncrypted>(f, AsyncFileEncrypted::Mode::APPEND_ONLY);
|
||||
}
|
||||
#endif
|
||||
return Future<Reference<IBackupFile>>(makeReference<BackupContainerS3BlobStoreImpl::BackupFile>(path, f));
|
||||
}
|
||||
|
||||
|
|
|
@ -82,10 +82,11 @@ struct BlobGranuleChunkRef {
|
|||
Optional<BlobFilePointerRef> snapshotFile; // not set if it's an incremental read
|
||||
VectorRef<BlobFilePointerRef> deltaFiles;
|
||||
GranuleDeltas newDeltas;
|
||||
Optional<KeyRef> tenantPrefix;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, keyRange, includedVersion, snapshotVersion, snapshotFile, deltaFiles, newDeltas);
|
||||
serializer(ar, keyRange, includedVersion, snapshotVersion, snapshotFile, deltaFiles, newDeltas, tenantPrefix);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -201,9 +201,15 @@ RangeResult materializeBlobGranule(const BlobGranuleChunkRef& chunk,
|
|||
Arena arena;
|
||||
std::map<KeyRef, ValueRef> dataMap;
|
||||
Version lastFileEndVersion = invalidVersion;
|
||||
KeyRange requestRange;
|
||||
if (chunk.tenantPrefix.present()) {
|
||||
requestRange = keyRange.withPrefix(chunk.tenantPrefix.get());
|
||||
} else {
|
||||
requestRange = keyRange;
|
||||
}
|
||||
|
||||
if (snapshotData.present()) {
|
||||
Arena snapshotArena = loadSnapshotFile(snapshotData.get(), keyRange, dataMap);
|
||||
Arena snapshotArena = loadSnapshotFile(snapshotData.get(), requestRange, dataMap);
|
||||
arena.dependsOn(snapshotArena);
|
||||
}
|
||||
|
||||
|
@ -211,18 +217,21 @@ RangeResult materializeBlobGranule(const BlobGranuleChunkRef& chunk,
|
|||
fmt::print("Applying {} delta files\n", chunk.deltaFiles.size());
|
||||
}
|
||||
for (int deltaIdx = 0; deltaIdx < chunk.deltaFiles.size(); deltaIdx++) {
|
||||
Arena deltaArena =
|
||||
loadDeltaFile(deltaFileData[deltaIdx], keyRange, beginVersion, readVersion, lastFileEndVersion, dataMap);
|
||||
Arena deltaArena = loadDeltaFile(
|
||||
deltaFileData[deltaIdx], requestRange, beginVersion, readVersion, lastFileEndVersion, dataMap);
|
||||
arena.dependsOn(deltaArena);
|
||||
}
|
||||
if (BG_READ_DEBUG) {
|
||||
fmt::print("Applying {} memory deltas\n", chunk.newDeltas.size());
|
||||
}
|
||||
applyDeltas(chunk.newDeltas, keyRange, beginVersion, readVersion, lastFileEndVersion, dataMap);
|
||||
applyDeltas(chunk.newDeltas, requestRange, beginVersion, readVersion, lastFileEndVersion, dataMap);
|
||||
|
||||
RangeResult ret;
|
||||
for (auto& it : dataMap) {
|
||||
ret.push_back_deep(ret.arena(), KeyValueRef(it.first, it.second));
|
||||
ret.push_back_deep(
|
||||
ret.arena(),
|
||||
KeyValueRef(chunk.tenantPrefix.present() ? it.first.removePrefix(chunk.tenantPrefix.get()) : it.first,
|
||||
it.second));
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
#include "fdbclient/StorageServerInterface.h" // for TenantInfo - should we refactor that elsewhere?
|
||||
|
||||
struct BlobWorkerInterface {
|
||||
constexpr static FileIdentifier file_identifier = 8358753;
|
||||
|
@ -104,13 +105,14 @@ struct BlobGranuleFileRequest {
|
|||
Version beginVersion = 0;
|
||||
Version readVersion;
|
||||
bool canCollapseBegin = true;
|
||||
TenantInfo tenantInfo;
|
||||
ReplyPromise<BlobGranuleFileReply> reply;
|
||||
|
||||
BlobGranuleFileRequest() {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, keyRange, beginVersion, readVersion, canCollapseBegin, reply, arena);
|
||||
serializer(ar, keyRange, beginVersion, readVersion, canCollapseBegin, tenantInfo, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -210,6 +210,7 @@ void ClientKnobs::initialize(Randomize randomize) {
|
|||
init( HTTP_VERBOSE_LEVEL, 0 );
|
||||
init( HTTP_REQUEST_ID_HEADER, "" );
|
||||
init( HTTP_REQUEST_AWS_V4_HEADER, true );
|
||||
init( HTTP_RESPONSE_SKIP_VERIFY_CHECKSUM_FOR_PARTIAL_CONTENT, false );
|
||||
init( BLOBSTORE_ENCRYPTION_TYPE, "" );
|
||||
init( BLOBSTORE_CONNECT_TRIES, 10 );
|
||||
init( BLOBSTORE_CONNECT_TIMEOUT, 10 );
|
||||
|
|
|
@ -221,6 +221,7 @@ public:
|
|||
int HTTP_VERBOSE_LEVEL;
|
||||
std::string HTTP_REQUEST_ID_HEADER;
|
||||
bool HTTP_REQUEST_AWS_V4_HEADER; // setting this knob to true will enable AWS V4 style header.
|
||||
bool HTTP_RESPONSE_SKIP_VERIFY_CHECKSUM_FOR_PARTIAL_CONTENT; // skip verify md5 checksum for 206 response
|
||||
std::string BLOBSTORE_ENCRYPTION_TYPE;
|
||||
int BLOBSTORE_CONNECT_TRIES;
|
||||
int BLOBSTORE_CONNECT_TIMEOUT;
|
||||
|
|
|
@ -162,7 +162,7 @@ struct CommitTransactionRequest : TimedRequest {
|
|||
bool firstInBatch() const { return (flags & FLAG_FIRST_IN_BATCH) != 0; }
|
||||
|
||||
Arena arena;
|
||||
SpanID spanContext;
|
||||
SpanContext spanContext;
|
||||
CommitTransactionRef transaction;
|
||||
ReplyPromise<CommitID> reply;
|
||||
uint32_t flags;
|
||||
|
@ -172,8 +172,8 @@ struct CommitTransactionRequest : TimedRequest {
|
|||
|
||||
TenantInfo tenantInfo;
|
||||
|
||||
CommitTransactionRequest() : CommitTransactionRequest(SpanID()) {}
|
||||
CommitTransactionRequest(SpanID const& context) : spanContext(context), flags(0) {}
|
||||
CommitTransactionRequest() : CommitTransactionRequest(SpanContext()) {}
|
||||
CommitTransactionRequest(SpanContext const& context) : spanContext(context), flags(0) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
@ -242,7 +242,7 @@ struct GetReadVersionRequest : TimedRequest {
|
|||
FLAG_PRIORITY_MASK = PRIORITY_SYSTEM_IMMEDIATE,
|
||||
};
|
||||
|
||||
SpanID spanContext;
|
||||
SpanContext spanContext;
|
||||
uint32_t transactionCount;
|
||||
uint32_t flags;
|
||||
TransactionPriority priority;
|
||||
|
@ -255,7 +255,7 @@ struct GetReadVersionRequest : TimedRequest {
|
|||
Version maxVersion; // max version in the client's version vector cache
|
||||
|
||||
GetReadVersionRequest() : transactionCount(1), flags(0), maxVersion(invalidVersion) {}
|
||||
GetReadVersionRequest(SpanID spanContext,
|
||||
GetReadVersionRequest(SpanContext spanContext,
|
||||
uint32_t transactionCount,
|
||||
TransactionPriority priority,
|
||||
Version maxVersion,
|
||||
|
@ -325,7 +325,7 @@ struct GetKeyServerLocationsReply {
|
|||
struct GetKeyServerLocationsRequest {
|
||||
constexpr static FileIdentifier file_identifier = 9144680;
|
||||
Arena arena;
|
||||
SpanID spanContext;
|
||||
SpanContext spanContext;
|
||||
Optional<TenantNameRef> tenant;
|
||||
KeyRef begin;
|
||||
Optional<KeyRef> end;
|
||||
|
@ -340,7 +340,7 @@ struct GetKeyServerLocationsRequest {
|
|||
Version minTenantVersion;
|
||||
|
||||
GetKeyServerLocationsRequest() : limit(0), reverse(false), minTenantVersion(latestVersion) {}
|
||||
GetKeyServerLocationsRequest(SpanID spanContext,
|
||||
GetKeyServerLocationsRequest(SpanContext spanContext,
|
||||
Optional<TenantNameRef> const& tenant,
|
||||
KeyRef const& begin,
|
||||
Optional<KeyRef> const& end,
|
||||
|
@ -378,12 +378,12 @@ struct GetRawCommittedVersionReply {
|
|||
|
||||
struct GetRawCommittedVersionRequest {
|
||||
constexpr static FileIdentifier file_identifier = 12954034;
|
||||
SpanID spanContext;
|
||||
SpanContext spanContext;
|
||||
Optional<UID> debugID;
|
||||
ReplyPromise<GetRawCommittedVersionReply> reply;
|
||||
Version maxVersion; // max version in the grv proxy's version vector cache
|
||||
|
||||
explicit GetRawCommittedVersionRequest(SpanID spanContext,
|
||||
explicit GetRawCommittedVersionRequest(SpanContext spanContext,
|
||||
Optional<UID> const& debugID = Optional<UID>(),
|
||||
Version maxVersion = invalidVersion)
|
||||
: spanContext(spanContext), debugID(debugID), maxVersion(maxVersion) {}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
#include "flow/Tracing.h"
|
||||
|
||||
// The versioned message has wire format : -1, version, messages
|
||||
static const int32_t VERSION_HEADER = -1;
|
||||
|
@ -77,6 +78,7 @@ struct MutationRef {
|
|||
AndV2,
|
||||
CompareAndClear,
|
||||
Reserved_For_SpanContextMessage /* See fdbserver/SpanContextMessage.h */,
|
||||
Reserved_For_OTELSpanContextMessage,
|
||||
MAX_ATOMIC_OP
|
||||
};
|
||||
// This is stored this way for serialization purposes.
|
||||
|
@ -190,7 +192,7 @@ struct CommitTransactionRef {
|
|||
Version read_snapshot = 0;
|
||||
bool report_conflicting_keys = false;
|
||||
bool lock_aware = false; // set when metadata mutations are present
|
||||
Optional<SpanID> spanContext;
|
||||
Optional<SpanContext> spanContext;
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
|
|
|
@ -66,7 +66,6 @@ public:
|
|||
ClusterConnectionString(const std::vector<NetworkAddress>& coordinators, Key key);
|
||||
ClusterConnectionString(const std::vector<Hostname>& hosts, Key key);
|
||||
|
||||
std::vector<NetworkAddress> const& coordinators() const { return coords; }
|
||||
Key clusterKey() const { return key; }
|
||||
Key clusterKeyName() const {
|
||||
return keyDesc;
|
||||
|
@ -251,9 +250,9 @@ struct OpenDatabaseCoordRequest {
|
|||
traceLogGroup,
|
||||
knownClientInfoID,
|
||||
clusterKey,
|
||||
hostnames,
|
||||
coordinators,
|
||||
reply);
|
||||
reply,
|
||||
hostnames);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ struct WatchParameters : public ReferenceCounted<WatchParameters> {
|
|||
|
||||
const Version version;
|
||||
const TagSet tags;
|
||||
const SpanID spanID;
|
||||
const SpanContext spanContext;
|
||||
const TaskPriority taskID;
|
||||
const Optional<UID> debugID;
|
||||
const UseProvisionalProxies useProvisionalProxies;
|
||||
|
@ -151,11 +151,11 @@ struct WatchParameters : public ReferenceCounted<WatchParameters> {
|
|||
Optional<Value> value,
|
||||
Version version,
|
||||
TagSet tags,
|
||||
SpanID spanID,
|
||||
SpanContext spanContext,
|
||||
TaskPriority taskID,
|
||||
Optional<UID> debugID,
|
||||
UseProvisionalProxies useProvisionalProxies)
|
||||
: tenant(tenant), key(key), value(value), version(version), tags(tags), spanID(spanID), taskID(taskID),
|
||||
: tenant(tenant), key(key), value(value), version(version), tags(tags), spanContext(spanContext), taskID(taskID),
|
||||
debugID(debugID), useProvisionalProxies(useProvisionalProxies) {}
|
||||
};
|
||||
|
||||
|
@ -222,6 +222,8 @@ struct KeyRangeLocationInfo {
|
|||
: tenantEntry(tenantEntry), range(range), locations(locations) {}
|
||||
};
|
||||
|
||||
class GlobalConfig;
|
||||
|
||||
class DatabaseContext : public ReferenceCounted<DatabaseContext>, public FastAllocated<DatabaseContext>, NonCopyable {
|
||||
public:
|
||||
static DatabaseContext* allocateOnForeignThread() {
|
||||
|
@ -416,12 +418,12 @@ public:
|
|||
Optional<TenantName> defaultTenant;
|
||||
|
||||
struct VersionRequest {
|
||||
SpanID spanContext;
|
||||
SpanContext spanContext;
|
||||
Promise<GetReadVersionReply> reply;
|
||||
TagSet tags;
|
||||
Optional<UID> debugID;
|
||||
|
||||
VersionRequest(SpanID spanContext, TagSet tags = TagSet(), Optional<UID> debugID = Optional<UID>())
|
||||
VersionRequest(SpanContext spanContext, TagSet tags = TagSet(), Optional<UID> debugID = Optional<UID>())
|
||||
: spanContext(spanContext), tags(tags), debugID(debugID) {}
|
||||
};
|
||||
|
||||
|
@ -524,7 +526,6 @@ public:
|
|||
Counter transactionsExpensiveClearCostEstCount;
|
||||
Counter transactionGrvFullBatches;
|
||||
Counter transactionGrvTimedOutBatches;
|
||||
Counter transactionsStaleVersionVectors;
|
||||
|
||||
ContinuousSample<double> latencies, readLatencies, commitLatencies, GRVLatencies, mutationsPerCommit,
|
||||
bytesPerCommit, bgLatencies, bgGranulesPerRequest;
|
||||
|
@ -628,6 +629,7 @@ public:
|
|||
using TransactionT = ReadYourWritesTransaction;
|
||||
Reference<TransactionT> createTransaction();
|
||||
|
||||
std::unique_ptr<GlobalConfig> globalConfig;
|
||||
EventCacheHolder connectToDatabaseEventCacheHolder;
|
||||
|
||||
private:
|
||||
|
|
|
@ -29,29 +29,10 @@
|
|||
#include <unordered_set>
|
||||
#include <boost/functional/hash.hpp>
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ProtocolVersion.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
enum class TraceFlags : uint8_t { unsampled = 0b00000000, sampled = 0b00000001 };
|
||||
|
||||
inline TraceFlags operator&(TraceFlags lhs, TraceFlags rhs) {
|
||||
return static_cast<TraceFlags>(static_cast<std::underlying_type_t<TraceFlags>>(lhs) &
|
||||
static_cast<std::underlying_type_t<TraceFlags>>(rhs));
|
||||
}
|
||||
|
||||
struct SpanContext {
|
||||
UID traceID;
|
||||
uint64_t spanID;
|
||||
TraceFlags m_Flags;
|
||||
SpanContext() : traceID(UID()), spanID(0), m_Flags(TraceFlags::unsampled) {}
|
||||
SpanContext(UID traceID, uint64_t spanID, TraceFlags flags) : traceID(traceID), spanID(spanID), m_Flags(flags) {}
|
||||
SpanContext(UID traceID, uint64_t spanID) : traceID(traceID), spanID(spanID), m_Flags(TraceFlags::unsampled) {}
|
||||
SpanContext(Arena arena, const SpanContext& span)
|
||||
: traceID(span.traceID), spanID(span.spanID), m_Flags(span.m_Flags) {}
|
||||
bool isSampled() const { return (m_Flags & TraceFlags::sampled) == TraceFlags::sampled; }
|
||||
};
|
||||
#include "fdbclient/Status.h"
|
||||
|
||||
typedef int64_t Version;
|
||||
typedef uint64_t LogEpoch;
|
||||
|
@ -167,6 +148,11 @@ static const Tag invalidTag{ tagLocalitySpecial, 0 };
|
|||
static const Tag txsTag{ tagLocalitySpecial, 1 };
|
||||
static const Tag cacheTag{ tagLocalitySpecial, 2 };
|
||||
|
||||
const int MATCH_INDEX_ALL = 0;
|
||||
const int MATCH_INDEX_NONE = 1;
|
||||
const int MATCH_INDEX_MATCHED_ONLY = 2;
|
||||
const int MATCH_INDEX_UNMATCHED_ONLY = 3;
|
||||
|
||||
enum { txsTagOld = -1, invalidTagOld = -100 };
|
||||
|
||||
struct TagsAndMessage {
|
||||
|
@ -778,9 +764,18 @@ struct MappedKeyValueRef : KeyValueRef {
|
|||
|
||||
MappedReqAndResultRef reqAndResult;
|
||||
|
||||
// boundary KVs are always returned so that caller can use it as a continuation,
|
||||
// for non-boundary KV, it is always false.
|
||||
// for boundary KV, it is true only when the secondary query succeeds(return non-empty).
|
||||
// Note: only MATCH_INDEX_MATCHED_ONLY and MATCH_INDEX_UNMATCHED_ONLY modes can make use of it,
|
||||
// to decide whether the boudnary is a match/unmatch.
|
||||
// In the case of MATCH_INDEX_ALL and MATCH_INDEX_NONE, caller should not care if boundary has a match or not.
|
||||
bool boundaryAndExist;
|
||||
|
||||
MappedKeyValueRef() = default;
|
||||
MappedKeyValueRef(Arena& a, const MappedKeyValueRef& copyFrom) : KeyValueRef(a, copyFrom) {
|
||||
const auto& reqAndResultCopyFrom = copyFrom.reqAndResult;
|
||||
boundaryAndExist = copyFrom.boundaryAndExist;
|
||||
if (std::holds_alternative<GetValueReqAndResultRef>(reqAndResultCopyFrom)) {
|
||||
auto getValue = std::get<GetValueReqAndResultRef>(reqAndResultCopyFrom);
|
||||
reqAndResult = GetValueReqAndResultRef(a, getValue);
|
||||
|
@ -794,7 +789,7 @@ struct MappedKeyValueRef : KeyValueRef {
|
|||
|
||||
bool operator==(const MappedKeyValueRef& rhs) const {
|
||||
return static_cast<const KeyValueRef&>(*this) == static_cast<const KeyValueRef&>(rhs) &&
|
||||
reqAndResult == rhs.reqAndResult;
|
||||
reqAndResult == rhs.reqAndResult && boundaryAndExist == rhs.boundaryAndExist;
|
||||
}
|
||||
bool operator!=(const MappedKeyValueRef& rhs) const { return !(rhs == *this); }
|
||||
|
||||
|
@ -804,7 +799,7 @@ struct MappedKeyValueRef : KeyValueRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, ((KeyValueRef&)*this), reqAndResult);
|
||||
serializer(ar, ((KeyValueRef&)*this), reqAndResult, boundaryAndExist);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -869,8 +864,8 @@ struct KeyValueStoreType {
|
|||
serializer(ar, type);
|
||||
}
|
||||
|
||||
std::string toString() const {
|
||||
switch (type) {
|
||||
static std::string getStoreTypeStr(const StoreType& storeType) {
|
||||
switch (storeType) {
|
||||
case SSD_BTREE_V1:
|
||||
return "ssd-1";
|
||||
case SSD_BTREE_V2:
|
||||
|
@ -889,6 +884,7 @@ struct KeyValueStoreType {
|
|||
return "unknown";
|
||||
}
|
||||
}
|
||||
std::string toString() const { return getStoreTypeStr((StoreType)type); }
|
||||
|
||||
private:
|
||||
uint32_t type;
|
||||
|
@ -1183,10 +1179,10 @@ class Database;
|
|||
|
||||
struct HealthMetrics {
|
||||
struct StorageStats {
|
||||
int64_t storageQueue;
|
||||
int64_t storageDurabilityLag;
|
||||
double diskUsage;
|
||||
double cpuUsage;
|
||||
int64_t storageQueue = 0;
|
||||
int64_t storageDurabilityLag = 0;
|
||||
double diskUsage = 0.0;
|
||||
double cpuUsage = 0.0;
|
||||
|
||||
bool operator==(StorageStats const& r) const {
|
||||
return ((storageQueue == r.storageQueue) && (storageDurabilityLag == r.storageDurabilityLag) &&
|
||||
|
@ -1441,16 +1437,44 @@ struct StorageMetadataType {
|
|||
constexpr static FileIdentifier file_identifier = 732123;
|
||||
// when the SS is initialized, in epoch seconds, comes from currentTime()
|
||||
double createdTime;
|
||||
KeyValueStoreType storeType;
|
||||
|
||||
// no need to serialize part (should be assigned after initialization)
|
||||
bool wrongConfigured = false;
|
||||
|
||||
StorageMetadataType() : createdTime(0) {}
|
||||
StorageMetadataType(uint64_t t) : createdTime(t) {}
|
||||
StorageMetadataType(uint64_t t, KeyValueStoreType storeType = KeyValueStoreType::END, bool wrongConfigured = false)
|
||||
: createdTime(t), storeType(storeType), wrongConfigured(wrongConfigured) {}
|
||||
|
||||
static double currentTime() { return g_network->timer(); }
|
||||
|
||||
bool operator==(const StorageMetadataType& b) const {
|
||||
return createdTime == b.createdTime && storeType == b.storeType && wrongConfigured == b.wrongConfigured;
|
||||
}
|
||||
|
||||
bool operator<(const StorageMetadataType& b) const {
|
||||
if (wrongConfigured == b.wrongConfigured) {
|
||||
// the older SS has smaller createdTime
|
||||
return createdTime < b.createdTime;
|
||||
}
|
||||
return wrongConfigured > b.wrongConfigured;
|
||||
}
|
||||
|
||||
bool operator>(const StorageMetadataType& b) const { return b < *this; }
|
||||
|
||||
// To change this serialization, ProtocolVersion::StorageMetadata must be updated, and downgrades need
|
||||
// to be considered
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, createdTime);
|
||||
serializer(ar, createdTime, storeType);
|
||||
}
|
||||
|
||||
StatusObject toJSON() const {
|
||||
StatusObject result;
|
||||
result["created_time_timestamp"] = createdTime;
|
||||
result["created_time_datetime"] = epochsToGMTString(createdTime);
|
||||
result["storage_engine"] = storeType.toString();
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -37,12 +37,33 @@ const KeyRef transactionTagSampleCost = LiteralStringRef("config/transaction_tag
|
|||
const KeyRef samplingFrequency = LiteralStringRef("visibility/sampling/frequency");
|
||||
const KeyRef samplingWindow = LiteralStringRef("visibility/sampling/window");
|
||||
|
||||
GlobalConfig::GlobalConfig(Database& cx) : cx(cx), lastUpdate(0) {}
|
||||
GlobalConfig::GlobalConfig(const Database& cx) : cx(cx), lastUpdate(0) {}
|
||||
|
||||
GlobalConfig& GlobalConfig::globalConfig() {
|
||||
void* res = g_network->global(INetwork::enGlobalConfig);
|
||||
ASSERT(res);
|
||||
return *reinterpret_cast<GlobalConfig*>(res);
|
||||
void GlobalConfig::applyChanges(Transaction& tr,
|
||||
const VectorRef<KeyValueRef>& insertions,
|
||||
const VectorRef<KeyRangeRef>& clears) {
|
||||
VersionHistory vh{ 0 };
|
||||
for (const auto& kv : insertions) {
|
||||
vh.mutations.emplace_back_deep(vh.mutations.arena(), MutationRef(MutationRef::SetValue, kv.key, kv.value));
|
||||
tr.set(kv.key.withPrefix(globalConfigKeysPrefix), kv.value);
|
||||
}
|
||||
for (const auto& range : clears) {
|
||||
vh.mutations.emplace_back_deep(vh.mutations.arena(),
|
||||
MutationRef(MutationRef::ClearRange, range.begin, range.end));
|
||||
tr.clear(
|
||||
KeyRangeRef(range.begin.withPrefix(globalConfigKeysPrefix), range.end.withPrefix(globalConfigKeysPrefix)));
|
||||
}
|
||||
|
||||
// Record the mutations in this commit into the global configuration history.
|
||||
Key historyKey = addVersionStampAtEnd(globalConfigHistoryPrefix);
|
||||
ObjectWriter historyWriter(IncludeVersion());
|
||||
historyWriter.serialize(vh);
|
||||
tr.atomicOp(historyKey, historyWriter.toStringRef(), MutationRef::SetVersionstampedKey);
|
||||
|
||||
// Write version key to trigger update in cluster controller.
|
||||
tr.atomicOp(globalConfigVersionKey,
|
||||
LiteralStringRef("0123456789\x00\x00\x00\x00"), // versionstamp
|
||||
MutationRef::SetVersionstampedValue);
|
||||
}
|
||||
|
||||
Key GlobalConfig::prefixedKey(KeyRef key) {
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <unordered_map>
|
||||
|
||||
#include "fdbclient/CommitProxyInterface.h"
|
||||
#include "fdbclient/DatabaseContext.h"
|
||||
#include "fdbclient/GlobalConfig.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
|
||||
|
@ -66,34 +67,32 @@ struct ConfigValue : ReferenceCounted<ConfigValue> {
|
|||
|
||||
class GlobalConfig : NonCopyable {
|
||||
public:
|
||||
// Creates a GlobalConfig singleton, accessed by calling
|
||||
// GlobalConfig::globalConfig(). This function requires a database object
|
||||
// to allow global configuration to run transactions on the database, and
|
||||
// an AsyncVar object to watch for changes on. The ClientDBInfo pointer
|
||||
// Requires a database object to allow global configuration to run
|
||||
// transactions on the database.
|
||||
explicit GlobalConfig(const Database& cx);
|
||||
|
||||
// Requires an AsyncVar object to watch for changes on. The ClientDBInfo pointer
|
||||
// should point to a ClientDBInfo object which will contain the updated
|
||||
// global configuration history when the given AsyncVar changes. This
|
||||
// function should be called whenever the database object changes, in order
|
||||
// to allow global configuration to run transactions on the latest
|
||||
// database.
|
||||
template <class T>
|
||||
static void create(Database& cx, Reference<AsyncVar<T> const> db, const ClientDBInfo* dbInfo) {
|
||||
if (g_network->global(INetwork::enGlobalConfig) == nullptr) {
|
||||
auto config = new GlobalConfig{ cx };
|
||||
g_network->setGlobal(INetwork::enGlobalConfig, config);
|
||||
config->_updater = updater(config, dbInfo);
|
||||
// Bind changes in `db` to the `dbInfoChanged` AsyncTrigger.
|
||||
// TODO: Change AsyncTrigger to a Reference
|
||||
forward(db, std::addressof(config->dbInfoChanged));
|
||||
} else {
|
||||
GlobalConfig* config = reinterpret_cast<GlobalConfig*>(g_network->global(INetwork::enGlobalConfig));
|
||||
config->cx = cx;
|
||||
}
|
||||
void init(Reference<AsyncVar<T> const> db, const ClientDBInfo* dbInfo) {
|
||||
_updater = updater(this, dbInfo);
|
||||
// Bind changes in `db` to the `dbInfoChanged` AsyncTrigger.
|
||||
// TODO: Change AsyncTrigger to a Reference
|
||||
_forward = forward(db, std::addressof(dbInfoChanged));
|
||||
}
|
||||
|
||||
// Returns a reference to the global GlobalConfig object. Clients should
|
||||
// call this function whenever they need to read a value out of the global
|
||||
// configuration.
|
||||
static GlobalConfig& globalConfig();
|
||||
// Given a list of insertions and clears, applies the necessary changes to
|
||||
// the given transaction to update the global configuration database. Keys
|
||||
// in the list of mutations should not include the global configuration
|
||||
// prefix (`\xff\xff/global_config/`). The caller must still commit the
|
||||
// given transaction in order to persist the changes.
|
||||
static void applyChanges(Transaction& tr,
|
||||
const VectorRef<KeyValueRef>& insertions,
|
||||
const VectorRef<KeyRangeRef>& clears);
|
||||
|
||||
// Use this function to turn a global configuration key defined above into
|
||||
// the full path needed to set the value in the database.
|
||||
|
@ -150,8 +149,6 @@ public:
|
|||
void trigger(KeyRef key, std::function<void(std::optional<std::any>)> fn);
|
||||
|
||||
private:
|
||||
GlobalConfig(Database& cx);
|
||||
|
||||
// The functions below only affect the local copy of the global
|
||||
// configuration keyspace! To insert or remove values across all nodes you
|
||||
// must use a transaction (see the note above).
|
||||
|
@ -173,6 +170,7 @@ private:
|
|||
|
||||
Database cx;
|
||||
AsyncTrigger dbInfoChanged;
|
||||
Future<Void> _forward;
|
||||
Future<Void> _updater;
|
||||
Promise<Void> initialized;
|
||||
AsyncTrigger configChanged;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/Tenant.h"
|
||||
|
||||
#include "flow/Tracing.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
|
||||
struct VersionVector;
|
||||
|
@ -67,6 +68,7 @@ public:
|
|||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex = MATCH_INDEX_ALL,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) = 0;
|
||||
virtual ThreadFuture<Standalone<VectorRef<const char*>>> getAddressesForKey(const KeyRef& key) = 0;
|
||||
|
@ -96,11 +98,11 @@ public:
|
|||
|
||||
virtual ThreadFuture<Void> commit() = 0;
|
||||
virtual Version getCommittedVersion() = 0;
|
||||
// @todo This API and the "getSpanID()" API may help with debugging simulation
|
||||
// @todo This API and the "getSpanContext()" API may help with debugging simulation
|
||||
// test failures. (These APIs are not currently invoked anywhere.) Remove them
|
||||
// later if they are not really needed.
|
||||
virtual VersionVector getVersionVector() = 0;
|
||||
virtual UID getSpanID() = 0;
|
||||
virtual SpanContext getSpanContext() = 0;
|
||||
virtual ThreadFuture<int64_t> getApproximateSize() = 0;
|
||||
|
||||
virtual void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>()) = 0;
|
||||
|
@ -151,7 +153,11 @@ public:
|
|||
virtual void addref() = 0;
|
||||
virtual void delref() = 0;
|
||||
|
||||
// Management API, attempt to kill or suspend a process, return 1 for request sent out, 0 for failure
|
||||
// Management API, attempt to kill or suspend a process, return 1 for request being sent out, 0 for failure
|
||||
// The address string can be extended to a comma-delimited string like <addr1>,<addr2>...,<addrN> to send reboot
|
||||
// requests to multiple processes simultaneously
|
||||
// If multiple addresses are provided, it returns 1 for requests being sent out to all provided addresses.
|
||||
// On the contrary, if the client cannot connect to any of the given address, no requests will be sent out
|
||||
virtual ThreadFuture<int64_t> rebootWorker(const StringRef& address, bool check, int duration) = 0;
|
||||
// Management API, force the database to recover into DCID, causing the database to lose the most recently committed
|
||||
// mutations
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
// Not implemented:
|
||||
void setVersion(Version) override { throw client_invalid_operation(); }
|
||||
VersionVector getVersionVector() const override { throw client_invalid_operation(); }
|
||||
UID getSpanID() const override { throw client_invalid_operation(); }
|
||||
SpanContext getSpanContext() const override { throw client_invalid_operation(); }
|
||||
Future<Key> getKey(KeySelector const& key, Snapshot snapshot = Snapshot::False) override {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
|
|
|
@ -108,6 +108,11 @@ void IKnobCollection::setupKnobs(const std::vector<std::pair<std::string, std::s
|
|||
TraceEvent(SevWarnAlways, "InvalidKnobValue")
|
||||
.detail("Knob", printable(knobName))
|
||||
.detail("Value", printable(knobValueString));
|
||||
} else if (e.code() == error_code_invalid_option) {
|
||||
std::cerr << "WARNING: Invalid knob option '" << knobName << "'\n";
|
||||
TraceEvent(SevWarnAlways, "InvalidKnobName")
|
||||
.detail("Knob", printable(knobName))
|
||||
.detail("Value", printable(knobValueString));
|
||||
} else {
|
||||
std::cerr << "ERROR: Failed to set knob option '" << knobName << "': " << e.what() << "\n";
|
||||
TraceEvent(SevError, "FailedToSetKnob")
|
||||
|
|
|
@ -74,6 +74,7 @@ public:
|
|||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex = MATCH_INDEX_ALL,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<Standalone<VectorRef<const char*>>> getAddressesForKey(Key const& key) = 0;
|
||||
|
@ -95,7 +96,7 @@ public:
|
|||
virtual Future<Void> commit() = 0;
|
||||
virtual Version getCommittedVersion() const = 0;
|
||||
virtual VersionVector getVersionVector() const = 0;
|
||||
virtual UID getSpanID() const = 0;
|
||||
virtual SpanContext getSpanContext() const = 0;
|
||||
virtual int64_t getApproximateSize() const = 0;
|
||||
virtual Future<Standalone<StringRef>> getVersionstamp() = 0;
|
||||
virtual void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>()) = 0;
|
||||
|
|
|
@ -799,8 +799,8 @@ ACTOR Future<Optional<ClusterConnectionString>> getConnectionString(Database cx)
|
|||
}
|
||||
|
||||
ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
||||
Reference<IQuorumChange> change,
|
||||
std::vector<NetworkAddress> desiredCoordinators) {
|
||||
ClusterConnectionString* conn,
|
||||
std::string newName) {
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
@ -816,34 +816,30 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
|||
tr->getDatabase()->getConnectionRecord()->getConnectionString().clusterKeyName())
|
||||
return CoordinatorsResult::BAD_DATABASE_STATE; // Someone changed the "name" of the database??
|
||||
|
||||
state std::vector<NetworkAddress> oldCoordinators = wait(old.tryResolveHostnames());
|
||||
state CoordinatorsResult result = CoordinatorsResult::SUCCESS;
|
||||
if (!desiredCoordinators.size()) {
|
||||
std::vector<NetworkAddress> _desiredCoordinators = wait(change->getDesiredCoordinators(
|
||||
tr,
|
||||
oldCoordinators,
|
||||
Reference<ClusterConnectionMemoryRecord>(new ClusterConnectionMemoryRecord(old)),
|
||||
result));
|
||||
desiredCoordinators = _desiredCoordinators;
|
||||
if (conn->hostnames.size() + conn->coords.size() == 0) {
|
||||
conn->hostnames = old.hostnames;
|
||||
conn->coords = old.coords;
|
||||
}
|
||||
std::vector<NetworkAddress> desiredCoordinators = wait(conn->tryResolveHostnames());
|
||||
if (desiredCoordinators.size() != conn->hostnames.size() + conn->coords.size()) {
|
||||
TraceEvent("ChangeQuorumCheckerEarlyTermination")
|
||||
.detail("Reason", "One or more hostnames are unresolvable")
|
||||
.backtrace();
|
||||
return CoordinatorsResult::COORDINATOR_UNREACHABLE;
|
||||
}
|
||||
|
||||
if (result != CoordinatorsResult::SUCCESS)
|
||||
return result;
|
||||
|
||||
if (!desiredCoordinators.size())
|
||||
return CoordinatorsResult::INVALID_NETWORK_ADDRESSES;
|
||||
|
||||
std::sort(desiredCoordinators.begin(), desiredCoordinators.end());
|
||||
|
||||
std::string newName = change->getDesiredClusterKeyName();
|
||||
if (newName.empty())
|
||||
if (newName.empty()) {
|
||||
newName = old.clusterKeyName().toString();
|
||||
|
||||
if (oldCoordinators == desiredCoordinators && old.clusterKeyName() == newName)
|
||||
}
|
||||
std::sort(conn->hostnames.begin(), conn->hostnames.end());
|
||||
std::sort(conn->coords.begin(), conn->coords.end());
|
||||
std::sort(old.hostnames.begin(), old.hostnames.end());
|
||||
std::sort(old.coords.begin(), old.coords.end());
|
||||
if (conn->hostnames == old.hostnames && conn->coords == old.coords && old.clusterKeyName() == newName) {
|
||||
return CoordinatorsResult::SAME_NETWORK_ADDRESSES;
|
||||
}
|
||||
|
||||
state ClusterConnectionString conn(desiredCoordinators,
|
||||
StringRef(newName + ':' + deterministicRandom()->randomAlphaNumeric(32)));
|
||||
conn->parseKey(newName + ':' + deterministicRandom()->randomAlphaNumeric(32));
|
||||
|
||||
if (g_network->isSimulated()) {
|
||||
int i = 0;
|
||||
|
@ -868,19 +864,27 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
|||
}
|
||||
|
||||
std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
ClientCoordinators coord(Reference<ClusterConnectionMemoryRecord>(new ClusterConnectionMemoryRecord(conn)));
|
||||
ClientCoordinators coord(Reference<ClusterConnectionMemoryRecord>(new ClusterConnectionMemoryRecord(*conn)));
|
||||
|
||||
leaderServers.reserve(coord.clientLeaderServers.size());
|
||||
for (int i = 0; i < coord.clientLeaderServers.size(); i++)
|
||||
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader,
|
||||
GetLeaderRequest(coord.clusterKey, UID()),
|
||||
TaskPriority::CoordinationReply));
|
||||
for (int i = 0; i < coord.clientLeaderServers.size(); i++) {
|
||||
if (coord.clientLeaderServers[i].hostname.present()) {
|
||||
leaderServers.push_back(retryGetReplyFromHostname(GetLeaderRequest(coord.clusterKey, UID()),
|
||||
coord.clientLeaderServers[i].hostname.get(),
|
||||
WLTOKEN_CLIENTLEADERREG_GETLEADER,
|
||||
TaskPriority::CoordinationReply));
|
||||
} else {
|
||||
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader,
|
||||
GetLeaderRequest(coord.clusterKey, UID()),
|
||||
TaskPriority::CoordinationReply));
|
||||
}
|
||||
}
|
||||
|
||||
choose {
|
||||
when(wait(waitForAll(leaderServers))) {}
|
||||
when(wait(delay(5.0))) { return CoordinatorsResult::COORDINATOR_UNREACHABLE; }
|
||||
}
|
||||
tr->set(coordinatorsKey, conn.toString());
|
||||
tr->set(coordinatorsKey, conn->toString());
|
||||
return Optional<CoordinatorsResult>();
|
||||
}
|
||||
|
||||
|
@ -990,32 +994,6 @@ ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChan
|
|||
}
|
||||
}
|
||||
|
||||
struct SpecifiedQuorumChange final : IQuorumChange {
|
||||
std::vector<NetworkAddress> desired;
|
||||
explicit SpecifiedQuorumChange(std::vector<NetworkAddress> const& desired) : desired(desired) {}
|
||||
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<IClusterConnectionRecord>,
|
||||
CoordinatorsResult&) override {
|
||||
return desired;
|
||||
}
|
||||
};
|
||||
Reference<IQuorumChange> specifiedQuorumChange(std::vector<NetworkAddress> const& addresses) {
|
||||
return Reference<IQuorumChange>(new SpecifiedQuorumChange(addresses));
|
||||
}
|
||||
|
||||
struct NoQuorumChange final : IQuorumChange {
|
||||
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<IClusterConnectionRecord>,
|
||||
CoordinatorsResult&) override {
|
||||
return oldCoordinators;
|
||||
}
|
||||
};
|
||||
Reference<IQuorumChange> noQuorumChange() {
|
||||
return Reference<IQuorumChange>(new NoQuorumChange);
|
||||
}
|
||||
|
||||
struct NameQuorumChange final : IQuorumChange {
|
||||
std::string newName;
|
||||
Reference<IQuorumChange> otherChange;
|
||||
|
@ -1062,12 +1040,30 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
Reference<IClusterConnectionRecord> ccr,
|
||||
int desiredCount,
|
||||
std::set<AddressExclusion>* excluded) {
|
||||
ClusterConnectionString cs = ccr->getConnectionString();
|
||||
if (oldCoordinators.size() != cs.hostnames.size() + cs.coords.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Are there enough coordinators for the redundancy level?
|
||||
if (oldCoordinators.size() < desiredCount)
|
||||
return false;
|
||||
if (oldCoordinators.size() % 2 != 1)
|
||||
return false;
|
||||
|
||||
// Check exclusions
|
||||
for (auto& c : oldCoordinators) {
|
||||
if (addressExcluded(*excluded, c))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check locality
|
||||
// FIXME: Actual locality!
|
||||
std::sort(oldCoordinators.begin(), oldCoordinators.end());
|
||||
for (int i = 1; i < oldCoordinators.size(); i++)
|
||||
if (oldCoordinators[i - 1].ip == oldCoordinators[i].ip)
|
||||
return false; // Multiple coordinators share an IP
|
||||
|
||||
// Check availability
|
||||
ClientCoordinators coord(ccr);
|
||||
std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
|
@ -1095,19 +1091,6 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
}
|
||||
}
|
||||
|
||||
// Check exclusions
|
||||
for (auto& c : oldCoordinators) {
|
||||
if (addressExcluded(*excluded, c))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check locality
|
||||
// FIXME: Actual locality!
|
||||
std::sort(oldCoordinators.begin(), oldCoordinators.end());
|
||||
for (int i = 1; i < oldCoordinators.size(); i++)
|
||||
if (oldCoordinators[i - 1].ip == oldCoordinators[i].ip)
|
||||
return false; // Multiple coordinators share an IP
|
||||
|
||||
return true; // The status quo seems fine
|
||||
}
|
||||
|
||||
|
@ -1149,8 +1132,10 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
|
||||
if (checkAcceptable) {
|
||||
bool ok = wait(isAcceptable(self.getPtr(), tr, oldCoordinators, ccr, desiredCount, &excluded));
|
||||
if (ok)
|
||||
if (ok) {
|
||||
*err = CoordinatorsResult::SAME_NETWORK_ADDRESSES;
|
||||
return oldCoordinators;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<NetworkAddress> chosen;
|
||||
|
@ -1796,25 +1781,6 @@ ACTOR Future<bool> setHealthyZone(Database cx, StringRef zoneId, double seconds,
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> setDDIgnoreRebalanceSwitch(Database cx, bool ignoreRebalance) {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
if (ignoreRebalance) {
|
||||
tr.set(rebalanceDDIgnoreKey, LiteralStringRef("on"));
|
||||
} else {
|
||||
tr.clear(rebalanceDDIgnoreKey);
|
||||
}
|
||||
wait(tr.commit());
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<int> setDDMode(Database cx, int mode) {
|
||||
state Transaction tr(cx);
|
||||
state int oldMode = -1;
|
||||
|
|
|
@ -55,12 +55,10 @@ struct IQuorumChange : ReferenceCounted<IQuorumChange> {
|
|||
|
||||
// Change to use the given set of coordination servers
|
||||
ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
||||
Reference<IQuorumChange> change,
|
||||
std::vector<NetworkAddress> desiredCoordinators);
|
||||
ClusterConnectionString* conn,
|
||||
std::string newName);
|
||||
ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChange> change);
|
||||
Reference<IQuorumChange> autoQuorumChange(int desired = -1);
|
||||
Reference<IQuorumChange> noQuorumChange();
|
||||
Reference<IQuorumChange> specifiedQuorumChange(std::vector<NetworkAddress> const&);
|
||||
Reference<IQuorumChange> nameQuorumChange(std::string const& name, Reference<IQuorumChange> const& other);
|
||||
|
||||
// Exclude the given set of servers from use as state servers. Returns as soon as the change is durable, without
|
||||
|
@ -139,7 +137,6 @@ ACTOR Future<int> setDDMode(Database cx, int mode);
|
|||
ACTOR Future<Void> forceRecovery(Reference<IClusterConnectionRecord> clusterFile, Standalone<StringRef> dcId);
|
||||
|
||||
ACTOR Future<Void> printHealthyZone(Database cx);
|
||||
ACTOR Future<Void> setDDIgnoreRebalanceSwitch(Database cx, bool ignoreRebalance);
|
||||
ACTOR Future<bool> clearHealthyZone(Database cx, bool printWarning = false, bool clearSSFailureZoneString = false);
|
||||
ACTOR Future<bool> setHealthyZone(Database cx, StringRef zoneId, double seconds, bool printWarning = false);
|
||||
|
||||
|
|
|
@ -250,7 +250,7 @@ TEST_CASE("/fdbclient/MonitorLeader/ConnectionString/hostname") {
|
|||
|
||||
ClusterConnectionString cs(hostnames, LiteralStringRef("TestCluster:0"));
|
||||
ASSERT(cs.hostnames.size() == 2);
|
||||
ASSERT(cs.coordinators().size() == 0);
|
||||
ASSERT(cs.coords.size() == 0);
|
||||
ASSERT(cs.toString() == connectionString);
|
||||
}
|
||||
|
||||
|
@ -270,6 +270,9 @@ TEST_CASE("/fdbclient/MonitorLeader/ConnectionString/hostname") {
|
|||
|
||||
ACTOR Future<std::vector<NetworkAddress>> tryResolveHostnamesImpl(ClusterConnectionString* self) {
|
||||
state std::set<NetworkAddress> allCoordinatorsSet;
|
||||
for (const auto& coord : self->coords) {
|
||||
allCoordinatorsSet.insert(coord);
|
||||
}
|
||||
std::vector<Future<Void>> fs;
|
||||
for (auto& hostname : self->hostnames) {
|
||||
fs.push_back(map(hostname.resolve(), [&](Optional<NetworkAddress> const& addr) -> Void {
|
||||
|
@ -280,9 +283,6 @@ ACTOR Future<std::vector<NetworkAddress>> tryResolveHostnamesImpl(ClusterConnect
|
|||
}));
|
||||
}
|
||||
wait(waitForAll(fs));
|
||||
for (const auto& coord : self->coords) {
|
||||
allCoordinatorsSet.insert(coord);
|
||||
}
|
||||
std::vector<NetworkAddress> allCoordinators(allCoordinatorsSet.begin(), allCoordinatorsSet.end());
|
||||
std::sort(allCoordinators.begin(), allCoordinators.end());
|
||||
return allCoordinators;
|
||||
|
@ -300,8 +300,8 @@ TEST_CASE("/fdbclient/MonitorLeader/PartialResolve") {
|
|||
|
||||
INetworkConnections::net()->addMockTCPEndpoint(hn, port, { address });
|
||||
|
||||
state ClusterConnectionString cs(connectionString);
|
||||
state std::vector<NetworkAddress> allCoordinators = wait(cs.tryResolveHostnames());
|
||||
ClusterConnectionString cs(connectionString);
|
||||
std::vector<NetworkAddress> allCoordinators = wait(cs.tryResolveHostnames());
|
||||
ASSERT(allCoordinators.size() == 1 &&
|
||||
std::find(allCoordinators.begin(), allCoordinators.end(), address) != allCoordinators.end());
|
||||
|
||||
|
@ -460,7 +460,7 @@ ClientCoordinators::ClientCoordinators(Reference<IClusterConnectionRecord> ccr)
|
|||
for (auto h : cs.hostnames) {
|
||||
clientLeaderServers.push_back(ClientLeaderRegInterface(h));
|
||||
}
|
||||
for (auto s : cs.coordinators()) {
|
||||
for (auto s : cs.coords) {
|
||||
clientLeaderServers.push_back(ClientLeaderRegInterface(s));
|
||||
}
|
||||
}
|
||||
|
@ -866,7 +866,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
|||
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions,
|
||||
Key traceLogGroup) {
|
||||
state ClusterConnectionString cs = info.intermediateConnRecord->getConnectionString();
|
||||
state int coordinatorsSize = cs.hostnames.size() + cs.coordinators().size();
|
||||
state int coordinatorsSize = cs.hostnames.size() + cs.coords.size();
|
||||
state int index = 0;
|
||||
state int successIndex = 0;
|
||||
state Optional<double> incorrectTime;
|
||||
|
@ -880,7 +880,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
|||
for (const auto& h : cs.hostnames) {
|
||||
clientLeaderServers.push_back(ClientLeaderRegInterface(h));
|
||||
}
|
||||
for (const auto& c : cs.coordinators()) {
|
||||
for (const auto& c : cs.coords) {
|
||||
clientLeaderServers.push_back(ClientLeaderRegInterface(c));
|
||||
}
|
||||
|
||||
|
@ -890,11 +890,9 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
|||
state ClientLeaderRegInterface clientLeaderServer = clientLeaderServers[index];
|
||||
state OpenDatabaseCoordRequest req;
|
||||
|
||||
coordinator->set(clientLeaderServer);
|
||||
|
||||
req.clusterKey = cs.clusterKey();
|
||||
req.hostnames = cs.hostnames;
|
||||
req.coordinators = cs.coordinators();
|
||||
req.coordinators = cs.coords;
|
||||
req.knownClientInfoID = clientInfo->get().id;
|
||||
req.supportedVersions = supportedVersions->get();
|
||||
req.traceLogGroup = traceLogGroup;
|
||||
|
@ -922,16 +920,26 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
|||
incorrectTime = Optional<double>();
|
||||
}
|
||||
|
||||
state ErrorOr<CachedSerialization<ClientDBInfo>> rep;
|
||||
state Future<ErrorOr<CachedSerialization<ClientDBInfo>>> repFuture;
|
||||
if (clientLeaderServer.hostname.present()) {
|
||||
wait(store(rep,
|
||||
tryGetReplyFromHostname(req,
|
||||
clientLeaderServer.hostname.get(),
|
||||
WLTOKEN_CLIENTLEADERREG_OPENDATABASE,
|
||||
TaskPriority::CoordinationReply)));
|
||||
repFuture = tryGetReplyFromHostname(req,
|
||||
clientLeaderServer.hostname.get(),
|
||||
WLTOKEN_CLIENTLEADERREG_OPENDATABASE,
|
||||
TaskPriority::CoordinationReply);
|
||||
} else {
|
||||
wait(store(rep, clientLeaderServer.openDatabase.tryGetReply(req, TaskPriority::CoordinationReply)));
|
||||
repFuture = clientLeaderServer.openDatabase.tryGetReply(req, TaskPriority::CoordinationReply);
|
||||
}
|
||||
|
||||
// We need to update the coordinator even if it hasn't changed in case we are establishing a new connection in
|
||||
// FlowTransport. If so, setting the coordinator here forces protocol version monitoring to restart with the new
|
||||
// peer object.
|
||||
//
|
||||
// Both the tryGetReply call and the creation of the ClientLeaderRegInterface above should result in the Peer
|
||||
// object being created in FlowTransport. Having this peer is a prerequisite to us signaling the AsyncVar.
|
||||
coordinator->setUnconditional(clientLeaderServer);
|
||||
|
||||
state ErrorOr<CachedSerialization<ClientDBInfo>> rep = wait(repFuture);
|
||||
|
||||
if (rep.present()) {
|
||||
if (rep.get().read().forward.present()) {
|
||||
TraceEvent("MonitorProxiesForwarding")
|
||||
|
|
|
@ -158,6 +158,7 @@ ThreadFuture<MappedRangeResult> DLTransaction::getMappedRange(const KeySelectorR
|
|||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
FdbCApi::FDBFuture* f = api->transactionGetMappedRange(tr,
|
||||
|
@ -175,6 +176,7 @@ ThreadFuture<MappedRangeResult> DLTransaction::getMappedRange(const KeySelectorR
|
|||
limits.bytes,
|
||||
FDB_STREAMING_MODE_EXACT,
|
||||
0,
|
||||
matchIndex,
|
||||
snapshot,
|
||||
reverse);
|
||||
return toThreadFuture<MappedRangeResult>(api, f, [](FdbCApi::FDBFuture* f, FdbCApi* api) {
|
||||
|
@ -971,10 +973,11 @@ ThreadFuture<MappedRangeResult> MultiVersionTransaction::getMappedRange(const Ke
|
|||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getMappedRange(begin, end, mapper, limits, snapshot, reverse)
|
||||
auto f = tr.transaction ? tr.transaction->getMappedRange(begin, end, mapper, limits, matchIndex, snapshot, reverse)
|
||||
: makeTimeout<MappedRangeResult>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
@ -1105,13 +1108,13 @@ VersionVector MultiVersionTransaction::getVersionVector() {
|
|||
return VersionVector();
|
||||
}
|
||||
|
||||
UID MultiVersionTransaction::getSpanID() {
|
||||
SpanContext MultiVersionTransaction::getSpanContext() {
|
||||
auto tr = getTransaction();
|
||||
if (tr.transaction) {
|
||||
return tr.transaction->getSpanID();
|
||||
return tr.transaction->getSpanContext();
|
||||
}
|
||||
|
||||
return UID();
|
||||
return SpanContext();
|
||||
}
|
||||
|
||||
ThreadFuture<int64_t> MultiVersionTransaction::getApproximateSize() {
|
||||
|
@ -1609,7 +1612,7 @@ void MultiVersionDatabase::DatabaseState::protocolVersionChanged(ProtocolVersion
|
|||
// When the protocol version changes, clear the corresponding entry in the shared state map
|
||||
// so it can be re-initialized. Only do so if there was a valid previous protocol version.
|
||||
if (dbProtocolVersion.present() && MultiVersionApi::apiVersionAtLeast(710)) {
|
||||
MultiVersionApi::api->clearClusterSharedStateMapEntry(clusterFilePath);
|
||||
MultiVersionApi::api->clearClusterSharedStateMapEntry(clusterFilePath, dbProtocolVersion.get());
|
||||
}
|
||||
|
||||
dbProtocolVersion = protocolVersion;
|
||||
|
@ -1722,8 +1725,10 @@ void MultiVersionDatabase::DatabaseState::updateDatabase(Reference<IDatabase> ne
|
|||
}
|
||||
}
|
||||
if (db.isValid() && dbProtocolVersion.present() && MultiVersionApi::apiVersionAtLeast(710)) {
|
||||
auto updateResult = MultiVersionApi::api->updateClusterSharedStateMap(clusterFilePath, db);
|
||||
auto updateResult =
|
||||
MultiVersionApi::api->updateClusterSharedStateMap(clusterFilePath, dbProtocolVersion.get(), db);
|
||||
auto handler = mapThreadFuture<Void, Void>(updateResult, [this](ErrorOr<Void> result) {
|
||||
TraceEvent("ClusterSharedStateUpdated").detail("ClusterFilePath", clusterFilePath);
|
||||
dbVar->set(db);
|
||||
return ErrorOr<Void>(Void());
|
||||
});
|
||||
|
@ -2389,12 +2394,30 @@ void MultiVersionApi::updateSupportedVersions() {
|
|||
}
|
||||
}
|
||||
|
||||
ThreadFuture<Void> MultiVersionApi::updateClusterSharedStateMap(std::string clusterFilePath, Reference<IDatabase> db) {
|
||||
ThreadFuture<Void> MultiVersionApi::updateClusterSharedStateMap(std::string clusterFilePath,
|
||||
ProtocolVersion dbProtocolVersion,
|
||||
Reference<IDatabase> db) {
|
||||
MutexHolder holder(lock);
|
||||
if (clusterSharedStateMap.find(clusterFilePath) == clusterSharedStateMap.end()) {
|
||||
clusterSharedStateMap[clusterFilePath] = db->createSharedState();
|
||||
TraceEvent("CreatingClusterSharedState")
|
||||
.detail("ClusterFilePath", clusterFilePath)
|
||||
.detail("ProtocolVersion", dbProtocolVersion);
|
||||
clusterSharedStateMap[clusterFilePath] = { db->createSharedState(), dbProtocolVersion };
|
||||
} else {
|
||||
ThreadFuture<DatabaseSharedState*> entry = clusterSharedStateMap[clusterFilePath];
|
||||
auto& sharedStateInfo = clusterSharedStateMap[clusterFilePath];
|
||||
if (sharedStateInfo.protocolVersion != dbProtocolVersion) {
|
||||
// This situation should never happen, because we are connecting to the same cluster,
|
||||
// so the protocol version must be the same
|
||||
TraceEvent(SevError, "ClusterStateProtocolVersionMismatch")
|
||||
.detail("ClusterFilePath", clusterFilePath)
|
||||
.detail("ProtocolVersionExpected", dbProtocolVersion)
|
||||
.detail("ProtocolVersionFound", sharedStateInfo.protocolVersion);
|
||||
return Void();
|
||||
}
|
||||
TraceEvent("SettingClusterSharedState")
|
||||
.detail("ClusterFilePath", clusterFilePath)
|
||||
.detail("ProtocolVersion", dbProtocolVersion);
|
||||
ThreadFuture<DatabaseSharedState*> entry = sharedStateInfo.sharedStateFuture;
|
||||
return mapThreadFuture<DatabaseSharedState*, Void>(entry, [db](ErrorOr<DatabaseSharedState*> result) {
|
||||
if (result.isError()) {
|
||||
return ErrorOr<Void>(result.getError());
|
||||
|
@ -2407,16 +2430,29 @@ ThreadFuture<Void> MultiVersionApi::updateClusterSharedStateMap(std::string clus
|
|||
return Void();
|
||||
}
|
||||
|
||||
void MultiVersionApi::clearClusterSharedStateMapEntry(std::string clusterFilePath) {
|
||||
void MultiVersionApi::clearClusterSharedStateMapEntry(std::string clusterFilePath, ProtocolVersion dbProtocolVersion) {
|
||||
MutexHolder holder(lock);
|
||||
auto mapEntry = clusterSharedStateMap.find(clusterFilePath);
|
||||
// It can be that other database instances on the same cluster path are already upgraded and thus
|
||||
// have cleared or even created a new shared object entry
|
||||
if (mapEntry == clusterSharedStateMap.end()) {
|
||||
TraceEvent(SevError, "ClusterSharedStateMapEntryNotFound").detail("ClusterFilePath", clusterFilePath);
|
||||
TraceEvent("ClusterSharedStateMapEntryNotFound").detail("ClusterFilePath", clusterFilePath);
|
||||
return;
|
||||
}
|
||||
auto ssPtr = mapEntry->second.get();
|
||||
auto sharedStateInfo = mapEntry->second;
|
||||
if (sharedStateInfo.protocolVersion != dbProtocolVersion) {
|
||||
TraceEvent("ClusterSharedStateClearSkipped")
|
||||
.detail("ClusterFilePath", clusterFilePath)
|
||||
.detail("ProtocolVersionExpected", dbProtocolVersion)
|
||||
.detail("ProtocolVersionFound", sharedStateInfo.protocolVersion);
|
||||
return;
|
||||
}
|
||||
auto ssPtr = sharedStateInfo.sharedStateFuture.get();
|
||||
ssPtr->delRef(ssPtr);
|
||||
clusterSharedStateMap.erase(mapEntry);
|
||||
TraceEvent("ClusterSharedStateCleared")
|
||||
.detail("ClusterFilePath", clusterFilePath)
|
||||
.detail("ProtocolVersion", dbProtocolVersion);
|
||||
}
|
||||
|
||||
std::vector<std::string> parseOptionValues(std::string valueStr) {
|
||||
|
|
|
@ -80,6 +80,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
|||
* and take the shortcut. */
|
||||
FDBGetRangeReqAndResult getRange;
|
||||
unsigned char buffer[32];
|
||||
bool boundaryAndExist;
|
||||
} FDBMappedKeyValue;
|
||||
|
||||
#pragma pack(push, 4)
|
||||
|
@ -218,6 +219,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
|||
int targetBytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
int matchIndex,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
FDBFuture* (*transactionGetVersionstamp)(FDBTransaction* tr);
|
||||
|
@ -349,6 +351,7 @@ public:
|
|||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex,
|
||||
bool snapshot,
|
||||
bool reverse) override;
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> getAddressesForKey(const KeyRef& key) override;
|
||||
|
@ -378,7 +381,7 @@ public:
|
|||
ThreadFuture<Void> commit() override;
|
||||
Version getCommittedVersion() override;
|
||||
VersionVector getVersionVector() override;
|
||||
UID getSpanID() override { return UID(); };
|
||||
SpanContext getSpanContext() override { return SpanContext(); };
|
||||
ThreadFuture<int64_t> getApproximateSize() override;
|
||||
|
||||
void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
|
@ -537,6 +540,7 @@ public:
|
|||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex,
|
||||
bool snapshot,
|
||||
bool reverse) override;
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> getAddressesForKey(const KeyRef& key) override;
|
||||
|
@ -567,7 +571,7 @@ public:
|
|||
ThreadFuture<Void> commit() override;
|
||||
Version getCommittedVersion() override;
|
||||
VersionVector getVersionVector() override;
|
||||
UID getSpanID() override;
|
||||
SpanContext getSpanContext() override;
|
||||
ThreadFuture<int64_t> getApproximateSize() override;
|
||||
|
||||
void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
|
@ -861,8 +865,10 @@ public:
|
|||
|
||||
bool callbackOnMainThread;
|
||||
bool localClientDisabled;
|
||||
ThreadFuture<Void> updateClusterSharedStateMap(std::string clusterFilePath, Reference<IDatabase> db);
|
||||
void clearClusterSharedStateMapEntry(std::string clusterFilePath);
|
||||
ThreadFuture<Void> updateClusterSharedStateMap(std::string clusterFilePath,
|
||||
ProtocolVersion dbProtocolVersion,
|
||||
Reference<IDatabase> db);
|
||||
void clearClusterSharedStateMapEntry(std::string clusterFilePath, ProtocolVersion dbProtocolVersion);
|
||||
|
||||
static bool apiVersionAtLeast(int minVersion);
|
||||
|
||||
|
@ -888,7 +894,11 @@ private:
|
|||
std::map<std::string, std::vector<Reference<ClientInfo>>> externalClients;
|
||||
// Map of clusterFilePath -> DatabaseSharedState pointer Future
|
||||
// Upon cluster version upgrade, clear the map entry for that cluster
|
||||
std::map<std::string, ThreadFuture<DatabaseSharedState*>> clusterSharedStateMap;
|
||||
struct SharedStateInfo {
|
||||
ThreadFuture<DatabaseSharedState*> sharedStateFuture;
|
||||
ProtocolVersion protocolVersion;
|
||||
};
|
||||
std::map<std::string, SharedStateInfo> clusterSharedStateMap;
|
||||
|
||||
bool networkStartSetup;
|
||||
volatile bool networkSetup;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -243,7 +243,7 @@ struct TransactionState : ReferenceCounted<TransactionState> {
|
|||
|
||||
Optional<UID> debugID;
|
||||
TaskPriority taskID;
|
||||
SpanID spanID;
|
||||
SpanContext spanContext;
|
||||
UseProvisionalProxies useProvisionalProxies = UseProvisionalProxies::False;
|
||||
bool readVersionObtainedFromGrvProxy;
|
||||
|
||||
|
@ -259,13 +259,14 @@ struct TransactionState : ReferenceCounted<TransactionState> {
|
|||
std::shared_ptr<CoalescedKeyRangeMap<Value>> conflictingKeys;
|
||||
|
||||
// Only available so that Transaction can have a default constructor, for use in state variables
|
||||
TransactionState(TaskPriority taskID, SpanID spanID) : taskID(taskID), spanID(spanID), tenantSet(false) {}
|
||||
TransactionState(TaskPriority taskID, SpanContext spanContext)
|
||||
: taskID(taskID), spanContext(spanContext), tenantSet(false) {}
|
||||
|
||||
// VERSION_VECTOR changed default values of readVersionObtainedFromGrvProxy
|
||||
TransactionState(Database cx,
|
||||
Optional<TenantName> tenant,
|
||||
TaskPriority taskID,
|
||||
SpanID spanID,
|
||||
SpanContext spanContext,
|
||||
Reference<TransactionLogInfo> trLogInfo);
|
||||
|
||||
Reference<TransactionState> cloneAndReset(Reference<TransactionLogInfo> newTrLogInfo, bool generateNewSpan) const;
|
||||
|
@ -328,6 +329,7 @@ public:
|
|||
const KeySelector& end,
|
||||
const Key& mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex = MATCH_INDEX_ALL,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False);
|
||||
|
||||
|
@ -337,6 +339,7 @@ private:
|
|||
const KeySelector& end,
|
||||
const Key& mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse);
|
||||
|
||||
|
@ -435,7 +438,7 @@ public:
|
|||
|
||||
void debugTransaction(UID dID) { trState->debugID = dID; }
|
||||
VersionVector getVersionVector() const;
|
||||
UID getSpanID() const { return trState->spanID; }
|
||||
SpanContext getSpanContext() const { return trState->spanContext; }
|
||||
|
||||
Future<Void> commitMutations();
|
||||
void setupWatches();
|
||||
|
@ -447,7 +450,7 @@ public:
|
|||
Database getDatabase() const { return trState->cx; }
|
||||
static Reference<TransactionLogInfo> createTrLogInfoProbabilistically(const Database& cx);
|
||||
|
||||
void setTransactionID(uint64_t id);
|
||||
void setTransactionID(UID id);
|
||||
void setToken(uint64_t token);
|
||||
|
||||
const std::vector<Future<std::pair<Key, Key>>>& getExtraReadConflictRanges() const { return extraConflictRanges; }
|
||||
|
@ -490,7 +493,7 @@ private:
|
|||
Future<Void> committing;
|
||||
};
|
||||
|
||||
ACTOR Future<Version> waitForCommittedVersion(Database cx, Version version, SpanID spanContext);
|
||||
ACTOR Future<Version> waitForCommittedVersion(Database cx, Version version, SpanContext spanContext);
|
||||
ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsList(Database cx,
|
||||
KeyRange keys,
|
||||
int shardLimit);
|
||||
|
|
|
@ -44,13 +44,18 @@ class CommitQuorum {
|
|||
} else if (failed >= ctis.size() / 2 + 1 && result.canBeSet()) {
|
||||
// Rollforwards could cause a version that didn't have quorum to
|
||||
// commit, so send commit_unknown_result instead of commit_failed.
|
||||
result.sendError(commit_unknown_result());
|
||||
|
||||
// Calling sendError could delete this
|
||||
auto local = this->result;
|
||||
local.sendError(commit_unknown_result());
|
||||
} else {
|
||||
// Check if it is possible to ever receive quorum agreement
|
||||
auto totalRequestsOutstanding = ctis.size() - (failed + successful + maybeCommitted);
|
||||
if ((failed + totalRequestsOutstanding < ctis.size() / 2 + 1) &&
|
||||
(successful + totalRequestsOutstanding < ctis.size() / 2 + 1) && result.canBeSet()) {
|
||||
result.sendError(commit_unknown_result());
|
||||
// Calling sendError could delete this
|
||||
auto local = this->result;
|
||||
local.sendError(commit_unknown_result());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -112,7 +117,7 @@ public:
|
|||
}
|
||||
return result.getFuture();
|
||||
}
|
||||
bool committed() const { return result.isSet(); }
|
||||
bool committed() const { return result.isSet() && !result.isError(); }
|
||||
};
|
||||
|
||||
class GetGenerationQuorum {
|
||||
|
@ -155,7 +160,9 @@ class GetGenerationQuorum {
|
|||
} else if (self->maxAgreement + (self->ctis.size() - self->totalRepliesReceived) <
|
||||
(self->ctis.size() / 2 + 1)) {
|
||||
if (!self->result.isError()) {
|
||||
self->result.sendError(failed_to_reach_quorum());
|
||||
// Calling sendError could delete self
|
||||
auto local = self->result;
|
||||
local.sendError(failed_to_reach_quorum());
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -166,7 +173,9 @@ class GetGenerationQuorum {
|
|||
++self->totalRepliesReceived;
|
||||
if (self->totalRepliesReceived == self->ctis.size() && self->result.canBeSet() &&
|
||||
!self->result.isError()) {
|
||||
self->result.sendError(failed_to_reach_quorum());
|
||||
// Calling sendError could delete self
|
||||
auto local = self->result;
|
||||
local.sendError(failed_to_reach_quorum());
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
|
@ -409,11 +418,11 @@ public:
|
|||
|
||||
PaxosConfigTransactionImpl(Database const& cx) : cx(cx) {
|
||||
const ClusterConnectionString& cs = cx->getConnectionRecord()->getConnectionString();
|
||||
ctis.reserve(cs.hostnames.size() + cs.coordinators().size());
|
||||
ctis.reserve(cs.hostnames.size() + cs.coords.size());
|
||||
for (const auto& h : cs.hostnames) {
|
||||
ctis.emplace_back(h);
|
||||
}
|
||||
for (const auto& c : cs.coordinators()) {
|
||||
for (const auto& c : cs.coords) {
|
||||
ctis.emplace_back(c);
|
||||
}
|
||||
getGenerationQuorum = GetGenerationQuorum{ ctis };
|
||||
|
|
|
@ -54,6 +54,7 @@ public:
|
|||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
int matchIndex = MATCH_INDEX_ALL,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override {
|
||||
throw client_invalid_operation();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue