2017-05-26 04:48:44 +08:00
|
|
|
|
/*
|
|
|
|
|
* fdbcli.actor.cpp
|
|
|
|
|
*
|
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
|
*
|
|
|
|
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
2018-02-22 02:25:11 +08:00
|
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at
|
2018-02-22 02:25:11 +08:00
|
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2018-02-22 02:25:11 +08:00
|
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
2017-10-04 11:57:39 +08:00
|
|
|
|
#include "boost/lexical_cast.hpp"
|
2019-02-18 07:41:16 +08:00
|
|
|
|
#include "fdbclient/NativeAPI.actor.h"
|
2020-09-10 02:54:58 +08:00
|
|
|
|
#include "fdbclient/FDBTypes.h"
|
2021-03-25 00:33:20 +08:00
|
|
|
|
#include "fdbclient/IClientApi.h"
|
|
|
|
|
#include "fdbclient/MultiVersionTransaction.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#include "fdbclient/Status.h"
|
2021-06-08 03:54:24 +08:00
|
|
|
|
#include "fdbclient/KeyBackedTypes.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#include "fdbclient/StatusClient.h"
|
|
|
|
|
#include "fdbclient/DatabaseContext.h"
|
2021-02-24 08:17:05 +08:00
|
|
|
|
#include "fdbclient/GlobalConfig.actor.h"
|
2021-06-03 14:40:52 +08:00
|
|
|
|
#include "fdbclient/IKnobCollection.h"
|
2019-02-18 07:41:16 +08:00
|
|
|
|
#include "fdbclient/NativeAPI.actor.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#include "fdbclient/ReadYourWrites.h"
|
|
|
|
|
#include "fdbclient/ClusterInterface.h"
|
2019-02-18 09:38:13 +08:00
|
|
|
|
#include "fdbclient/ManagementAPI.actor.h"
|
2018-08-17 08:34:59 +08:00
|
|
|
|
#include "fdbclient/Schemas.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#include "fdbclient/CoordinationInterface.h"
|
|
|
|
|
#include "fdbclient/FDBOptions.g.h"
|
2021-08-11 09:07:36 +08:00
|
|
|
|
#include "fdbclient/TagThrottle.actor.h"
|
2021-05-31 02:51:47 +08:00
|
|
|
|
#include "fdbclient/Tuple.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-04-23 16:32:30 +08:00
|
|
|
|
#include "fdbclient/ThreadSafeTransaction.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#include "flow/DeterministicRandom.h"
|
2020-02-29 06:56:10 +08:00
|
|
|
|
#include "flow/Platform.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2020-03-05 12:14:47 +08:00
|
|
|
|
#include "flow/TLSConfig.actor.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#include "flow/SimpleOpt.h"
|
|
|
|
|
|
2018-10-20 01:30:13 +08:00
|
|
|
|
#include "fdbcli/FlowLineNoise.h"
|
2021-05-12 05:31:08 +08:00
|
|
|
|
#include "fdbcli/fdbcli.actor.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2019-05-05 02:28:29 +08:00
|
|
|
|
#include <cinttypes>
|
2019-04-02 02:40:26 +08:00
|
|
|
|
#include <type_traits>
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#include <signal.h>
|
|
|
|
|
|
|
|
|
|
#ifdef __unixish__
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
#include "fdbcli/linenoise/linenoise.h"
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-05-21 04:23:02 +08:00
|
|
|
|
#include "fdbclient/versions.h"
|
2020-09-11 04:54:33 +08:00
|
|
|
|
#include "fdbclient/BuildFlags.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
2018-08-23 00:40:45 +08:00
|
|
|
|
|
2021-05-12 03:08:48 +08:00
|
|
|
|
#define FDB_API_VERSION 710
|
2021-03-25 00:33:20 +08:00
|
|
|
|
/*
|
|
|
|
|
* While we could just use the MultiVersionApi instance directly, this #define allows us to swap in any other IClientApi
|
|
|
|
|
* instance (e.g. from ThreadSafeApi)
|
|
|
|
|
*/
|
|
|
|
|
#define API ((IClientApi*)MultiVersionApi::api)
|
|
|
|
|
|
2019-11-16 04:26:51 +08:00
|
|
|
|
extern const char* getSourceVersion();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
std::vector<std::string> validOptions;
|
|
|
|
|
|
2019-07-01 01:24:55 +08:00
|
|
|
|
enum {
|
|
|
|
|
OPT_CONNFILE,
|
|
|
|
|
OPT_DATABASE,
|
|
|
|
|
OPT_HELP,
|
|
|
|
|
OPT_TRACE,
|
|
|
|
|
OPT_TRACE_DIR,
|
|
|
|
|
OPT_TIMEOUT,
|
|
|
|
|
OPT_EXEC,
|
|
|
|
|
OPT_NO_STATUS,
|
2020-02-25 08:08:04 +08:00
|
|
|
|
OPT_NO_HINTS,
|
2019-07-01 01:24:55 +08:00
|
|
|
|
OPT_STATUS_FROM_JSON,
|
|
|
|
|
OPT_VERSION,
|
2020-09-11 04:54:33 +08:00
|
|
|
|
OPT_BUILD_FLAGS,
|
2020-03-05 03:15:32 +08:00
|
|
|
|
OPT_TRACE_FORMAT,
|
2020-03-14 06:46:03 +08:00
|
|
|
|
OPT_KNOB,
|
|
|
|
|
OPT_DEBUG_TLS
|
2019-07-01 01:24:55 +08:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
|
|
|
|
|
{ OPT_CONNFILE, "--cluster_file", SO_REQ_SEP },
|
|
|
|
|
{ OPT_DATABASE, "-d", SO_REQ_SEP },
|
|
|
|
|
{ OPT_TRACE, "--log", SO_NONE },
|
|
|
|
|
{ OPT_TRACE_DIR, "--log-dir", SO_REQ_SEP },
|
|
|
|
|
{ OPT_TIMEOUT, "--timeout", SO_REQ_SEP },
|
|
|
|
|
{ OPT_EXEC, "--exec", SO_REQ_SEP },
|
|
|
|
|
{ OPT_NO_STATUS, "--no-status", SO_NONE },
|
2020-02-25 08:08:04 +08:00
|
|
|
|
{ OPT_NO_HINTS, "--no-hints", SO_NONE },
|
2019-07-01 01:24:55 +08:00
|
|
|
|
{ OPT_HELP, "-?", SO_NONE },
|
|
|
|
|
{ OPT_HELP, "-h", SO_NONE },
|
|
|
|
|
{ OPT_HELP, "--help", SO_NONE },
|
|
|
|
|
{ OPT_STATUS_FROM_JSON, "--status-from-json", SO_REQ_SEP },
|
|
|
|
|
{ OPT_VERSION, "--version", SO_NONE },
|
|
|
|
|
{ OPT_VERSION, "-v", SO_NONE },
|
2020-09-11 04:54:33 +08:00
|
|
|
|
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
|
2019-07-01 01:24:55 +08:00
|
|
|
|
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
2020-03-05 03:15:32 +08:00
|
|
|
|
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
2020-03-14 06:46:03 +08:00
|
|
|
|
{ OPT_DEBUG_TLS, "--debug-tls", SO_NONE },
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2018-06-27 03:08:32 +08:00
|
|
|
|
#ifndef TLS_DISABLED
|
2019-07-01 01:24:55 +08:00
|
|
|
|
TLS_OPTION_FLAGS
|
2018-06-21 00:21:23 +08:00
|
|
|
|
#endif
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
SO_END_OF_OPTIONS };
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
void printAtCol(const char* text, int col) {
|
|
|
|
|
const char* iter = text;
|
|
|
|
|
const char* start = text;
|
2020-08-19 05:30:20 +08:00
|
|
|
|
const char* space = nullptr;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
iter++;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (*iter == '\n' || *iter == ' ' || *iter == '\0')
|
|
|
|
|
space = iter;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (*iter == '\n' || *iter == '\0' || (iter - start == col)) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!space)
|
|
|
|
|
space = iter;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("%.*s\n", (int)(space - start), start);
|
|
|
|
|
start = space;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (*start == ' ' || *start == '\n')
|
|
|
|
|
start++;
|
2020-08-19 05:30:20 +08:00
|
|
|
|
space = nullptr;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
} while (*iter);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string lineWrap(const char* text, int col) {
|
|
|
|
|
const char* iter = text;
|
|
|
|
|
const char* start = text;
|
2020-08-19 05:30:20 +08:00
|
|
|
|
const char* space = nullptr;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string out = "";
|
|
|
|
|
do {
|
|
|
|
|
iter++;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (*iter == '\n' || *iter == ' ' || *iter == '\0')
|
|
|
|
|
space = iter;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (*iter == '\n' || *iter == '\0' || (iter - start == col)) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!space)
|
|
|
|
|
space = iter;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
out += format("%.*s\n", (int)(space - start), start);
|
|
|
|
|
start = space;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (*start == ' ' /* || *start == '\n'*/)
|
|
|
|
|
start++;
|
2020-08-19 05:30:20 +08:00
|
|
|
|
space = nullptr;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
} while (*iter);
|
|
|
|
|
return out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class FdbOptions {
|
|
|
|
|
public:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Prints an error and throws invalid_option or invalid_option_value if the option could not be set
|
|
|
|
|
void setOption(Reference<ReadYourWritesTransaction> tr,
|
|
|
|
|
StringRef optionStr,
|
|
|
|
|
bool enabled,
|
|
|
|
|
Optional<StringRef> arg,
|
|
|
|
|
bool intrans) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
auto transactionItr = transactionOptions.legalOptions.find(optionStr.toString());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (transactionItr != transactionOptions.legalOptions.end())
|
2017-05-26 04:48:44 +08:00
|
|
|
|
setTransactionOption(tr, transactionItr->second, enabled, arg, intrans);
|
|
|
|
|
else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: invalid option '%s'. Try `help options' for a list of available options.\n",
|
|
|
|
|
optionStr.toString().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
throw invalid_option();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Applies all enabled transaction options to the given transaction
|
2017-05-26 04:48:44 +08:00
|
|
|
|
void apply(Reference<ReadYourWritesTransaction> tr) {
|
2020-12-27 13:46:20 +08:00
|
|
|
|
for (const auto& [name, value] : transactionOptions.options) {
|
|
|
|
|
tr->setOption(name, value.castTo<StringRef>());
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-04-23 16:32:30 +08:00
|
|
|
|
// TODO: replace the above function after we refactor all fdbcli code
|
|
|
|
|
void apply(Reference<ITransaction> tr) {
|
|
|
|
|
for (const auto& [name, value] : transactionOptions.options) {
|
|
|
|
|
tr->setOption(name, value.castTo<StringRef>());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Returns true if any options have been set
|
2020-12-27 13:46:20 +08:00
|
|
|
|
bool hasAnyOptionsEnabled() const { return !transactionOptions.options.empty(); }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Prints a list of enabled options, along with their parameters (if any)
|
2020-12-27 13:46:20 +08:00
|
|
|
|
void print() const {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
bool found = false;
|
|
|
|
|
found = found || transactionOptions.print();
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!found)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("There are no options enabled\n");
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Returns a vector of the names of all documented options
|
2020-12-27 13:46:20 +08:00
|
|
|
|
std::vector<std::string> getValidOptions() const { return transactionOptions.getValidOptions(); }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Prints the help string obtained by invoking `help options'
|
2020-12-27 13:46:20 +08:00
|
|
|
|
void printHelpString() const { transactionOptions.printHelpString(); }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
private:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Sets a transaction option. If intrans == true, then this option is also applied to the passed in transaction.
|
|
|
|
|
void setTransactionOption(Reference<ReadYourWritesTransaction> tr,
|
|
|
|
|
FDBTransactionOptions::Option option,
|
|
|
|
|
bool enabled,
|
|
|
|
|
Optional<StringRef> arg,
|
|
|
|
|
bool intrans) {
|
|
|
|
|
if (enabled && arg.present() != FDBTransactionOptions::optionInfo.getMustExist(option).hasParameter) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: option %s a parameter\n", arg.present() ? "did not expect" : "expected");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
throw invalid_option_value();
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (intrans)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
tr->setOption(option, arg);
|
|
|
|
|
|
2019-01-12 01:03:38 +08:00
|
|
|
|
transactionOptions.setOption(option, enabled, arg.castTo<StringRef>());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// A group of enabled options (of type T::Option) as well as a legal options map from string to T::Option
|
2017-05-26 04:48:44 +08:00
|
|
|
|
template <class T>
|
|
|
|
|
struct OptionGroup {
|
|
|
|
|
std::map<typename T::Option, Optional<Standalone<StringRef>>> options;
|
|
|
|
|
std::map<std::string, typename T::Option> legalOptions;
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
OptionGroup<T>() {}
|
|
|
|
|
OptionGroup<T>(OptionGroup<T>& base)
|
|
|
|
|
: options(base.options.begin(), base.options.end()), legalOptions(base.legalOptions) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Enable or disable an option. Returns true if option value changed
|
2017-05-26 04:48:44 +08:00
|
|
|
|
bool setOption(typename T::Option option, bool enabled, Optional<StringRef> arg) {
|
|
|
|
|
auto optionItr = options.find(option);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (enabled && (optionItr == options.end() ||
|
|
|
|
|
Optional<Standalone<StringRef>>(optionItr->second).castTo<StringRef>() != arg)) {
|
2019-01-12 01:03:38 +08:00
|
|
|
|
options[option] = arg.castTo<Standalone<StringRef>>();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (!enabled && optionItr != options.end()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
options.erase(optionItr);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Prints a list of all enabled options in this group
|
2020-12-27 13:46:20 +08:00
|
|
|
|
bool print() const {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
bool found = false;
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto itr = legalOptions.begin(); itr != legalOptions.end(); ++itr) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
auto optionItr = options.find(itr->second);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (optionItr != options.end()) {
|
|
|
|
|
if (optionItr->second.present())
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("%s: `%s'\n", itr->first.c_str(), formatStringRef(optionItr->second.get()).c_str());
|
|
|
|
|
else
|
|
|
|
|
printf("%s\n", itr->first.c_str());
|
|
|
|
|
|
|
|
|
|
found = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return found;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Returns true if the specified option is documented
|
2020-12-27 13:46:20 +08:00
|
|
|
|
bool isDocumented(typename T::Option option) const {
|
2019-07-12 02:25:39 +08:00
|
|
|
|
FDBOptionInfo info = T::optionInfo.getMustExist(option);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
std::string deprecatedStr = "Deprecated";
|
|
|
|
|
return !info.comment.empty() && info.comment.substr(0, deprecatedStr.size()) != deprecatedStr;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Returns a vector of the names of all documented options
|
2020-12-27 13:46:20 +08:00
|
|
|
|
std::vector<std::string> getValidOptions() const {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::vector<std::string> ret;
|
|
|
|
|
|
|
|
|
|
for (auto itr = legalOptions.begin(); itr != legalOptions.end(); ++itr)
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (isDocumented(itr->second))
|
2017-05-26 04:48:44 +08:00
|
|
|
|
ret.push_back(itr->first);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Prints a help string for each option in this group. Any options with no comment
|
|
|
|
|
// are excluded from this help string. Lines are wrapped to 80 characters.
|
2020-12-27 13:46:20 +08:00
|
|
|
|
void printHelpString() const {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto itr = legalOptions.begin(); itr != legalOptions.end(); ++itr) {
|
|
|
|
|
if (isDocumented(itr->second)) {
|
2019-07-12 02:25:39 +08:00
|
|
|
|
FDBOptionInfo info = T::optionInfo.getMustExist(itr->second);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string helpStr = info.name + " - " + info.comment;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (info.hasParameter)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpStr += " " + info.parameterComment;
|
|
|
|
|
helpStr += "\n";
|
|
|
|
|
|
|
|
|
|
printAtCol(helpStr.c_str(), 80);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
OptionGroup<FDBTransactionOptions> transactionOptions;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
FdbOptions() {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto itr = FDBTransactionOptions::optionInfo.begin(); itr != FDBTransactionOptions::optionInfo.end();
|
|
|
|
|
++itr)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
transactionOptions.legalOptions[itr->second.name] = itr->first;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
FdbOptions(FdbOptions& base) : transactionOptions(base.transactionOptions) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
};
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
static std::string formatStringRef(StringRef item, bool fullEscaping = false) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string ret;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < item.size(); i++) {
|
|
|
|
|
if (fullEscaping && item[i] == '\\')
|
|
|
|
|
ret += "\\\\";
|
|
|
|
|
else if (fullEscaping && item[i] == '"')
|
|
|
|
|
ret += "\\\"";
|
|
|
|
|
else if (fullEscaping && item[i] == ' ')
|
|
|
|
|
ret += format("\\x%02x", item[i]);
|
|
|
|
|
else if (item[i] >= 32 && item[i] < 127)
|
|
|
|
|
ret += item[i];
|
|
|
|
|
else
|
|
|
|
|
ret += format("\\x%02x", item[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
static std::vector<std::vector<StringRef>> parseLine(std::string& line, bool& err, bool& partial) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
err = false;
|
|
|
|
|
partial = false;
|
|
|
|
|
|
|
|
|
|
bool quoted = false;
|
|
|
|
|
std::vector<StringRef> buf;
|
|
|
|
|
std::vector<std::vector<StringRef>> ret;
|
|
|
|
|
|
|
|
|
|
size_t i = line.find_first_not_of(' ');
|
|
|
|
|
size_t offset = i;
|
|
|
|
|
|
|
|
|
|
bool forcetoken = false;
|
|
|
|
|
|
|
|
|
|
while (i <= line.length()) {
|
|
|
|
|
switch (line[i]) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
case ';':
|
|
|
|
|
if (!quoted) {
|
|
|
|
|
if (i > offset || (forcetoken && i == offset))
|
|
|
|
|
buf.push_back(StringRef((uint8_t*)(line.data() + offset), i - offset));
|
|
|
|
|
ret.push_back(std::move(buf));
|
|
|
|
|
offset = i = line.find_first_not_of(' ', i + 1);
|
|
|
|
|
forcetoken = false;
|
|
|
|
|
} else
|
|
|
|
|
i++;
|
|
|
|
|
break;
|
|
|
|
|
case '"':
|
|
|
|
|
quoted = !quoted;
|
|
|
|
|
line.erase(i, 1);
|
|
|
|
|
forcetoken = true;
|
|
|
|
|
break;
|
|
|
|
|
case ' ':
|
|
|
|
|
if (!quoted) {
|
|
|
|
|
if (i > offset || (forcetoken && i == offset))
|
|
|
|
|
buf.push_back(StringRef((uint8_t*)(line.data() + offset), i - offset));
|
|
|
|
|
offset = i = line.find_first_not_of(' ', i);
|
|
|
|
|
forcetoken = false;
|
|
|
|
|
} else
|
|
|
|
|
i++;
|
|
|
|
|
break;
|
|
|
|
|
case '\\':
|
|
|
|
|
if (i + 2 > line.length()) {
|
|
|
|
|
err = true;
|
|
|
|
|
ret.push_back(std::move(buf));
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
switch (line[i + 1]) {
|
|
|
|
|
char ent, save;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
case '"':
|
2021-03-11 02:06:03 +08:00
|
|
|
|
case '\\':
|
2017-05-26 04:48:44 +08:00
|
|
|
|
case ' ':
|
2021-03-11 02:06:03 +08:00
|
|
|
|
case ';':
|
|
|
|
|
line.erase(i, 1);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
case 'x':
|
|
|
|
|
if (i + 4 > line.length()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
err = true;
|
|
|
|
|
ret.push_back(std::move(buf));
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
char* pEnd;
|
|
|
|
|
save = line[i + 4];
|
|
|
|
|
line[i + 4] = 0;
|
|
|
|
|
ent = char(strtoul(line.data() + i + 2, &pEnd, 16));
|
|
|
|
|
if (*pEnd) {
|
|
|
|
|
err = true;
|
|
|
|
|
ret.push_back(std::move(buf));
|
|
|
|
|
return ret;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
line[i + 4] = save;
|
|
|
|
|
line.replace(i, 4, 1, ent);
|
|
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
default:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
err = true;
|
|
|
|
|
ret.push_back(std::move(buf));
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
i++;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i -= 1;
|
|
|
|
|
if (i > offset || (forcetoken && i == offset))
|
|
|
|
|
buf.push_back(StringRef((uint8_t*)(line.data() + offset), i - offset));
|
|
|
|
|
|
|
|
|
|
ret.push_back(std::move(buf));
|
|
|
|
|
|
|
|
|
|
if (quoted)
|
|
|
|
|
partial = true;
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void printProgramUsage(const char* name) {
|
|
|
|
|
printf("FoundationDB CLI " FDB_VT_PACKAGE_NAME " (v" FDB_VT_VERSION ")\n"
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"usage: %s [OPTIONS]\n"
|
|
|
|
|
"\n",
|
|
|
|
|
name);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf(" -C CONNFILE The path of a file containing the connection string for the\n"
|
2021-03-11 02:06:03 +08:00
|
|
|
|
" FoundationDB cluster. The default is first the value of the\n"
|
|
|
|
|
" FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',\n"
|
|
|
|
|
" then `%s'.\n",
|
|
|
|
|
platform::getDefaultClusterFilePath().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf(" --log Enables trace file logging for the CLI session.\n"
|
2019-07-01 01:24:55 +08:00
|
|
|
|
" --log-dir PATH Specifes the output directory for trace files. If\n"
|
|
|
|
|
" unspecified, defaults to the current directory. Has\n"
|
|
|
|
|
" no effect unless --log is specified.\n"
|
|
|
|
|
" --trace_format FORMAT\n"
|
|
|
|
|
" Select the format of the log files. xml (the default) and json\n"
|
|
|
|
|
" are supported. Has no effect unless --log is specified.\n"
|
|
|
|
|
" --exec CMDS Immediately executes the semicolon separated CLI commands\n"
|
|
|
|
|
" and then exits.\n"
|
|
|
|
|
" --no-status Disables the initial status check done when starting\n"
|
|
|
|
|
" the CLI.\n"
|
2018-06-27 03:08:32 +08:00
|
|
|
|
#ifndef TLS_DISABLED
|
2019-07-01 01:24:55 +08:00
|
|
|
|
TLS_HELP
|
2018-06-21 00:21:23 +08:00
|
|
|
|
#endif
|
2020-03-05 03:15:32 +08:00
|
|
|
|
" --knob_KNOBNAME KNOBVALUE\n"
|
|
|
|
|
" Changes a knob option. KNOBNAME should be lowercase.\n"
|
2021-03-11 02:06:03 +08:00
|
|
|
|
" --debug-tls Prints the TLS configuration and certificate chain, then exits.\n"
|
|
|
|
|
" Useful in reporting and diagnosing TLS issues.\n"
|
2020-09-11 04:54:33 +08:00
|
|
|
|
" --build_flags Print build information and exit.\n"
|
2019-07-01 01:24:55 +08:00
|
|
|
|
" -v, --version Print FoundationDB CLI version information and exit.\n"
|
|
|
|
|
" -h, --help Display this help and exit.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define ESCAPINGK "\n\nFor information on escaping keys, type `help escaping'."
|
|
|
|
|
#define ESCAPINGKV "\n\nFor information on escaping keys and values, type `help escaping'."
|
|
|
|
|
|
2021-03-30 03:51:32 +08:00
|
|
|
|
using namespace fdb_cli;
|
|
|
|
|
std::map<std::string, CommandHelp>& helpMap = CommandFactory::commands();
|
|
|
|
|
std::set<std::string>& hiddenCommands = CommandFactory::hiddenCommands();
|
2021-03-25 00:33:20 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
void initHelp() {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
helpMap["begin"] =
|
|
|
|
|
CommandHelp("begin",
|
|
|
|
|
"begin a new transaction",
|
|
|
|
|
"By default, the fdbcli operates in autocommit mode. All operations are performed in their own "
|
|
|
|
|
"transaction, and are automatically committed for you. By explicitly beginning a transaction, "
|
|
|
|
|
"successive operations are all performed as part of a single transaction.\n\nTo commit the "
|
|
|
|
|
"transaction, use the commit command. To discard the transaction, use the reset command.");
|
|
|
|
|
helpMap["commit"] = CommandHelp("commit",
|
|
|
|
|
"commit the current transaction",
|
|
|
|
|
"Any sets or clears executed after the start of the current transaction will be "
|
|
|
|
|
"committed to the database. On success, the committed version number is displayed. "
|
|
|
|
|
"If commit fails, the error is displayed and the transaction must be retried.");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["clear"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"clear <KEY>",
|
|
|
|
|
"clear a key from the database",
|
|
|
|
|
"Clear succeeds even if the specified key is not present, but may fail because of conflicts." ESCAPINGK);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["clearrange"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"clearrange <BEGINKEY> <ENDKEY>",
|
|
|
|
|
"clear a range of keys from the database",
|
|
|
|
|
"All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will "
|
|
|
|
|
"succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["configure"] = CommandHelp(
|
2021-06-03 06:23:29 +08:00
|
|
|
|
"configure [new|tss]"
|
2020-10-01 04:26:01 +08:00
|
|
|
|
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|proxies=<PROXIES>|"
|
2021-05-11 08:05:08 +08:00
|
|
|
|
"commit_proxies=<COMMIT_PROXIES>|grv_proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*|"
|
2021-06-03 06:23:29 +08:00
|
|
|
|
"count=<TSS_COUNT>|perpetual_storage_wiggle=<WIGGLE_SPEED>",
|
2020-08-06 15:01:57 +08:00
|
|
|
|
"change the database configuration",
|
|
|
|
|
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
|
|
|
|
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
|
2021-03-06 03:28:15 +08:00
|
|
|
|
"specified.\n\ntss: when enabled, configures the testing storage server for the cluster instead."
|
|
|
|
|
"When used with new to set up tss for the first time, it requires both a count and a storage engine."
|
|
|
|
|
"To disable the testing storage server, run \"configure tss count=0\"\n\n"
|
|
|
|
|
"Redundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies "
|
2020-08-06 15:01:57 +08:00
|
|
|
|
"of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - "
|
|
|
|
|
"See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage "
|
|
|
|
|
"engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small "
|
2020-10-01 04:26:01 +08:00
|
|
|
|
"datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. The proxy role is being "
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"deprecated and split into GRV proxy and Commit proxy, now prefer configure 'grv_proxies' and 'commit_proxies' "
|
|
|
|
|
"separately. Generally we should follow that 'commit_proxies' is three times of 'grv_proxies' and "
|
|
|
|
|
"'grv_proxies' "
|
|
|
|
|
"should be not more than 4. If 'proxies' is specified, it will be converted to 'grv_proxies' and "
|
|
|
|
|
"'commit_proxies'. "
|
|
|
|
|
"Must be at least 2 (1 GRV proxy, 1 Commit proxy), or set to -1 which restores the number of proxies to the "
|
|
|
|
|
"default value.\n\ncommit_proxies=<COMMIT_PROXIES>: Sets the desired number of commit proxies in the cluster. "
|
2020-10-01 04:26:01 +08:00
|
|
|
|
"Must be at least 1, or set to -1 which restores the number of commit proxies to the default "
|
2020-09-16 13:29:49 +08:00
|
|
|
|
"value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the desired number of GRV proxies in the cluster. Must be at least "
|
|
|
|
|
"1, or set to -1 which restores the number of GRV proxies to the default value.\n\nlogs=<LOGS>: Sets the "
|
|
|
|
|
"desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of "
|
|
|
|
|
"logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. "
|
2021-05-11 08:05:08 +08:00
|
|
|
|
"Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\n"
|
|
|
|
|
"perpetual_storage_wiggle=<WIGGLE_SPEED>: Set the value speed (a.k.a., the number of processes that the Data "
|
|
|
|
|
"Distributor should wiggle at a time). Currently, only 0 and 1 are supported. The value 0 means to disable the "
|
|
|
|
|
"perpetual storage wiggle.\n\n"
|
|
|
|
|
"See the FoundationDB Administration Guide for more information.");
|
2018-08-17 08:34:59 +08:00
|
|
|
|
helpMap["fileconfigure"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"fileconfigure [new] <FILENAME>",
|
|
|
|
|
"change the database configuration from a file",
|
|
|
|
|
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
|
|
|
|
"the configuration of an existing one. Load a JSON document from the provided file, and change the database "
|
|
|
|
|
"configuration to match the contents of the JSON document. The format should be the same as the value of the "
|
|
|
|
|
"\"configuration\" entry in status JSON without \"excluded_servers\" or \"coordinators_count\".");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["coordinators"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"coordinators auto|<ADDRESS>+ [description=new_cluster_description]",
|
|
|
|
|
"change cluster coordinators or description",
|
|
|
|
|
"If 'auto' is specified, coordinator addresses will be choosen automatically to support the configured "
|
|
|
|
|
"redundancy level. (If the current set of coordinators are healthy and already support the redundancy level, "
|
|
|
|
|
"nothing will be changed.)\n\nOtherwise, sets the coordinators to the list of IP:port pairs specified by "
|
|
|
|
|
"<ADDRESS>+. An fdbserver process must be running on each of the specified addresses.\n\ne.g. coordinators "
|
|
|
|
|
"10.0.0.1:4000 10.0.0.2:4000 10.0.0.3:4000\n\nIf 'description=desc' is specified then the description field in "
|
|
|
|
|
"the cluster\nfile is changed to desc, which must match [A-Za-z0-9_]+.");
|
|
|
|
|
helpMap["exclude"] = CommandHelp(
|
2021-06-24 04:55:17 +08:00
|
|
|
|
"exclude [FORCE] [failed] [no_wait] [<ADDRESS...>] [locality_dcid:<excludedcid>] "
|
|
|
|
|
"[locality_zoneid:<excludezoneid>] [locality_machineid:<excludemachineid>] "
|
|
|
|
|
"[locality_processid:<excludeprocessid>] or any locality data",
|
2021-06-05 06:23:04 +08:00
|
|
|
|
"exclude servers from the database either with IP address match or locality match",
|
|
|
|
|
"If no addresses or locaities are specified, lists the set of excluded addresses and localities."
|
2021-06-24 04:55:17 +08:00
|
|
|
|
"\n\nFor each IP address or IP:port pair in <ADDRESS...> or any LocalityData attributes (like dcid, zoneid, "
|
2021-06-05 06:23:04 +08:00
|
|
|
|
"machineid, processid), adds the address/locality to the set of excluded servers and localities then waits "
|
|
|
|
|
"until all database state has been safely moved away from the specified servers. If 'no_wait' is set, the "
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"command returns \nimmediately without checking if the exclusions have completed successfully.\n"
|
|
|
|
|
"If 'FORCE' is set, the command does not perform safety checks before excluding.\n"
|
|
|
|
|
"If 'failed' is set, the transaction log queue is dropped pre-emptively before waiting\n"
|
|
|
|
|
"for data movement to finish and the server cannot be included again.");
|
2021-06-05 06:23:04 +08:00
|
|
|
|
helpMap["include"] = CommandHelp(
|
2021-06-24 04:55:17 +08:00
|
|
|
|
"include all|[<ADDRESS...>] [locality_dcid:<excludedcid>] [locality_zoneid:<excludezoneid>] "
|
|
|
|
|
"[locality_machineid:<excludemachineid>] [locality_processid:<excludeprocessid>] or any locality data",
|
2021-06-05 06:23:04 +08:00
|
|
|
|
"permit previously-excluded servers and localities to rejoin the database",
|
|
|
|
|
"If `all' is specified, the excluded servers and localities list is cleared.\n\nFor each IP address or IP:port "
|
2021-06-24 04:55:17 +08:00
|
|
|
|
"pair in <ADDRESS...> or any LocalityData (like dcid, zoneid, machineid, processid), removes any "
|
2021-06-05 06:23:04 +08:00
|
|
|
|
"matching exclusions from the excluded servers and localities list. "
|
|
|
|
|
"(A specified IP will match all IP:* exclusion entries)");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
helpMap["status"] =
|
|
|
|
|
CommandHelp("status [minimal|details|json]",
|
|
|
|
|
"get the status of a FoundationDB cluster",
|
|
|
|
|
"If the cluster is down, this command will print a diagnostic which may be useful in figuring out "
|
|
|
|
|
"what is wrong. If the cluster is running, this command will print cluster "
|
|
|
|
|
"statistics.\n\nSpecifying `minimal' will provide a minimal description of the status of your "
|
|
|
|
|
"database.\n\nSpecifying `details' will provide load information for individual "
|
|
|
|
|
"workers.\n\nSpecifying `json' will provide status information in a machine readable JSON format.");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["exit"] = CommandHelp("exit", "exit the CLI", "");
|
|
|
|
|
helpMap["quit"] = CommandHelp();
|
|
|
|
|
helpMap["waitconnected"] = CommandHelp();
|
|
|
|
|
helpMap["waitopen"] = CommandHelp();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
helpMap["sleep"] = CommandHelp("sleep <SECONDS>", "sleep for a period of time", "");
|
|
|
|
|
helpMap["get"] =
|
|
|
|
|
CommandHelp("get <KEY>",
|
|
|
|
|
"fetch the value for a given key",
|
|
|
|
|
"Displays the value of KEY in the database, or `not found' if KEY is not present." ESCAPINGK);
|
|
|
|
|
helpMap["getrange"] =
|
|
|
|
|
CommandHelp("getrange <BEGINKEY> [ENDKEY] [LIMIT]",
|
|
|
|
|
"fetch key/value pairs in a range of keys",
|
|
|
|
|
"Displays up to LIMIT keys and values for keys between BEGINKEY (inclusive) and ENDKEY "
|
|
|
|
|
"(exclusive). If ENDKEY is omitted, then the range will include all keys starting with BEGINKEY. "
|
|
|
|
|
"LIMIT defaults to 25 if omitted." ESCAPINGK);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["getrangekeys"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"getrangekeys <BEGINKEY> [ENDKEY] [LIMIT]",
|
|
|
|
|
"fetch keys in a range of keys",
|
|
|
|
|
"Displays up to LIMIT keys for keys between BEGINKEY (inclusive) and ENDKEY (exclusive). If ENDKEY is omitted, "
|
|
|
|
|
"then the range will include all keys starting with BEGINKEY. LIMIT defaults to 25 if omitted." ESCAPINGK);
|
2020-03-31 08:10:00 +08:00
|
|
|
|
helpMap["getversion"] =
|
2021-03-11 02:06:03 +08:00
|
|
|
|
CommandHelp("getversion",
|
|
|
|
|
"Fetch the current read version",
|
2020-03-31 08:10:00 +08:00
|
|
|
|
"Displays the current read version of the database or currently running transaction.");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
helpMap["reset"] =
|
|
|
|
|
CommandHelp("reset",
|
|
|
|
|
"reset the current transaction",
|
|
|
|
|
"Any sets or clears executed after the start of the active transaction will be discarded.");
|
|
|
|
|
helpMap["rollback"] = CommandHelp("rollback",
|
|
|
|
|
"rolls back the current transaction",
|
|
|
|
|
"The active transaction will be discarded, including any sets or clears executed "
|
|
|
|
|
"since the transaction was started.");
|
|
|
|
|
helpMap["set"] = CommandHelp("set <KEY> <VALUE>",
|
|
|
|
|
"set a value for a given key",
|
|
|
|
|
"If KEY is not already present in the database, it will be created." ESCAPINGKV);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["option"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"option <STATE> <OPTION> <ARG>",
|
|
|
|
|
"enables or disables an option",
|
|
|
|
|
"If STATE is `on', then the option OPTION will be enabled with optional parameter ARG, if required. If STATE "
|
|
|
|
|
"is `off', then OPTION will be disabled.\n\nIf there is no active transaction, then the option will be applied "
|
|
|
|
|
"to all operations as well as all subsequently created transactions (using `begin').\n\nIf there is an active "
|
|
|
|
|
"transaction (one created with `begin'), then enabled options apply only to that transaction. Options cannot "
|
|
|
|
|
"be disabled on an active transaction.\n\nCalling `option' with no parameters prints a list of all enabled "
|
|
|
|
|
"options.\n\nFor information about specific options that can be set, type `help options'.");
|
|
|
|
|
helpMap["help"] = CommandHelp("help [<topic>]", "get help about a topic or command", "");
|
|
|
|
|
helpMap["writemode"] = CommandHelp("writemode <on|off>",
|
|
|
|
|
"enables or disables sets and clears",
|
|
|
|
|
"Setting or clearing keys from the CLI is not recommended.");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
helpMap["kill"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"kill all|list|<ADDRESS...>",
|
|
|
|
|
"attempts to kill one or more processes in the cluster",
|
|
|
|
|
"If no addresses are specified, populates the list of processes which can be killed. Processes cannot be "
|
|
|
|
|
"killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known "
|
|
|
|
|
"processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is "
|
|
|
|
|
"unresponsive.\n\nFor each IP:port pair in <ADDRESS ...>, attempt to kill the specified process.");
|
2020-07-23 07:37:00 +08:00
|
|
|
|
helpMap["suspend"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"suspend <SECONDS> <ADDRESS...>",
|
|
|
|
|
"attempts to suspend one or more processes in the cluster",
|
|
|
|
|
"If no parameters are specified, populates the list of processes which can be suspended. Processes cannot be "
|
|
|
|
|
"suspended before this list has been populated.\n\nFor each IP:port pair in <ADDRESS...>, attempt to suspend "
|
|
|
|
|
"the processes for the specified SECONDS after which the process will die.");
|
|
|
|
|
helpMap["profile"] = CommandHelp("profile <client|list|flow|heap> <action> <ARGS>",
|
|
|
|
|
"namespace for all the profiling-related commands.",
|
|
|
|
|
"Different types support different actions. Run `profile` to get a list of "
|
|
|
|
|
"types, and iteratively explore the help.\n");
|
2020-08-07 00:23:31 +08:00
|
|
|
|
helpMap["cache_range"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"cache_range <set|clear> <BEGINKEY> <ENDKEY>",
|
|
|
|
|
"Mark a key range to add to or remove from storage caches.",
|
|
|
|
|
"Use the storage caches to assist in balancing hot read shards. Set the appropriate ranges when experiencing "
|
|
|
|
|
"heavy load, and clear them when they are no longer necessary.");
|
2019-08-28 04:15:30 +08:00
|
|
|
|
helpMap["lock"] = CommandHelp(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"lock",
|
|
|
|
|
"lock the database with a randomly generated lockUID",
|
|
|
|
|
"Randomly generates a lockUID, prints this lockUID, and then uses the lockUID to lock the database.");
|
2020-04-02 08:39:16 +08:00
|
|
|
|
helpMap["unlock"] =
|
2021-03-11 02:06:03 +08:00
|
|
|
|
CommandHelp("unlock <UID>",
|
|
|
|
|
"unlock the database with the provided lockUID",
|
2020-04-02 08:39:16 +08:00
|
|
|
|
"Unlocks the database with the provided lockUID. This is a potentially dangerous operation, so the "
|
|
|
|
|
"user will be asked to enter a passphrase to confirm their intent.");
|
2020-12-04 06:49:23 +08:00
|
|
|
|
helpMap["triggerddteaminfolog"] =
|
2021-03-11 02:06:03 +08:00
|
|
|
|
CommandHelp("triggerddteaminfolog",
|
|
|
|
|
"trigger the data distributor teams logging",
|
2020-11-13 08:27:55 +08:00
|
|
|
|
"Trigger the data distributor to log detailed information about its teams.");
|
2021-06-08 03:54:24 +08:00
|
|
|
|
helpMap["tssq"] =
|
|
|
|
|
CommandHelp("tssq start|stop <StorageUID>",
|
|
|
|
|
"start/stop tss quarantine",
|
|
|
|
|
"Toggles Quarantine mode for a Testing Storage Server. Quarantine will happen automatically if the "
|
|
|
|
|
"TSS is detected to have incorrect data, but can also be initiated manually. You can also remove a "
|
|
|
|
|
"TSS from quarantine once your investigation is finished, which will destroy the TSS process.");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
hiddenCommands.insert("expensive_data_check");
|
2017-07-29 09:12:04 +08:00
|
|
|
|
hiddenCommands.insert("datadistribution");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void printVersion() {
|
|
|
|
|
printf("FoundationDB CLI " FDB_VT_PACKAGE_NAME " (v" FDB_VT_VERSION ")\n");
|
2019-11-16 04:26:51 +08:00
|
|
|
|
printf("source version %s\n", getSourceVersion());
|
2019-08-02 01:19:46 +08:00
|
|
|
|
printf("protocol %" PRIx64 "\n", currentProtocolVersion.version());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2020-09-11 04:54:33 +08:00
|
|
|
|
void printBuildInformation() {
|
|
|
|
|
printf("%s", jsonBuildInformation().c_str());
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
void printHelpOverview() {
|
|
|
|
|
printf("\nList of commands:\n\n");
|
2020-12-27 13:46:20 +08:00
|
|
|
|
for (const auto& [command, help] : helpMap) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (help.short_desc.size())
|
|
|
|
|
printf(" %s:\n %s\n", command.c_str(), help.short_desc.c_str());
|
2020-12-27 13:46:20 +08:00
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\nFor information on a specific command, type `help <command>'.");
|
|
|
|
|
printf("\nFor information on escaping keys and values, type `help escaping'.");
|
|
|
|
|
printf("\nFor information on available options, type `help options'.\n\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void printHelp(StringRef command) {
|
|
|
|
|
auto i = helpMap.find(command.toString());
|
|
|
|
|
if (i != helpMap.end() && i->second.short_desc.size()) {
|
|
|
|
|
printf("\n%s\n\n", i->second.usage.c_str());
|
|
|
|
|
auto cstr = i->second.short_desc.c_str();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("%c%s.\n", toupper(cstr[0]), cstr + 1);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!i->second.long_desc.empty()) {
|
|
|
|
|
printf("\n");
|
|
|
|
|
printAtCol(i->second.long_desc.c_str(), 80);
|
|
|
|
|
}
|
|
|
|
|
printf("\n");
|
|
|
|
|
} else
|
|
|
|
|
printf("I don't know anything about `%s'\n", formatStringRef(command).c_str());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string getCoordinatorsInfoString(StatusObjectReader statusObj) {
|
|
|
|
|
std::string outputString;
|
|
|
|
|
try {
|
|
|
|
|
StatusArray coordinatorsArr = statusObj["client.coordinators.coordinators"].get_array();
|
|
|
|
|
for (StatusObjectReader coor : coordinatorsArr)
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString += format("\n %s (%s)",
|
|
|
|
|
coor["address"].get_str().c_str(),
|
|
|
|
|
coor["reachable"].get_bool() ? "reachable" : "unreachable");
|
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString = "\n Unable to retrieve list of coordination servers";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return outputString;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string getDateInfoString(StatusObjectReader statusObj, std::string key) {
|
|
|
|
|
time_t curTime;
|
|
|
|
|
if (!statusObj.has(key)) {
|
|
|
|
|
return "";
|
|
|
|
|
}
|
|
|
|
|
curTime = statusObj.last().get_int64();
|
|
|
|
|
char buffer[128];
|
|
|
|
|
struct tm* timeinfo;
|
|
|
|
|
timeinfo = localtime(&curTime);
|
|
|
|
|
strftime(buffer, 128, "%m/%d/%y %H:%M:%S", timeinfo);
|
|
|
|
|
return std::string(buffer);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string getProcessAddressByServerID(StatusObjectReader processesMap, std::string serverID) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (serverID == "")
|
2019-07-18 05:47:08 +08:00
|
|
|
|
return "unknown";
|
2019-07-19 04:18:36 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto proc : processesMap.obj()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
StatusArray rolesArray = proc.second.get_obj()["roles"].get_array();
|
|
|
|
|
for (StatusObjectReader role : rolesArray) {
|
|
|
|
|
if (role["id"].get_str().find(serverID) == 0) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// If this next line throws, then we found the serverID but the role has no address, so the role is
|
|
|
|
|
// skipped.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return proc.second.get_obj()["address"].get_str();
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::exception&) {
|
2017-05-27 05:51:34 +08:00
|
|
|
|
// If an entry in the process map is badly formed then something will throw. Since we are
|
|
|
|
|
// looking for a positive match, just ignore any read execeptions and move on to the next proc
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return "unknown";
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
std::string getWorkloadRates(StatusObjectReader statusObj,
|
|
|
|
|
bool unknown,
|
|
|
|
|
std::string first,
|
|
|
|
|
std::string second,
|
|
|
|
|
bool transactionSection = false) {
|
|
|
|
|
// Re-point statusObj at either the transactions sub-doc or the operations sub-doc depending on transactionSection
|
|
|
|
|
// flag
|
|
|
|
|
if (transactionSection) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!statusObj.get("transactions", statusObj))
|
|
|
|
|
return "unknown";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!statusObj.get("operations", statusObj))
|
|
|
|
|
return "unknown";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string path = first + "." + second;
|
|
|
|
|
double value;
|
|
|
|
|
if (!unknown && statusObj.get(path, value)) {
|
|
|
|
|
return format("%d Hz", (int)round(value));
|
|
|
|
|
}
|
|
|
|
|
return "unknown";
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void getBackupDRTags(StatusObjectReader& statusObjCluster,
|
|
|
|
|
const char* context,
|
|
|
|
|
std::map<std::string, std::string>& tagMap) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
std::string path = format("layers.%s.tags", context);
|
|
|
|
|
StatusObjectReader tags;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjCluster.tryGet(path, tags)) {
|
|
|
|
|
for (auto itr : tags.obj()) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
JSONDoc tag(itr.second);
|
|
|
|
|
bool running = false;
|
2018-02-17 02:09:03 +08:00
|
|
|
|
tag.tryGet("running_backup", running);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (running) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
std::string uid;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tag.tryGet("mutation_stream_id", uid)) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
tagMap[itr.first] = uid;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
tagMap[itr.first] = "";
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
std::string logBackupDR(const char* context, std::map<std::string, std::string> const& tagMap) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
std::string outputString = "";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tagMap.size() > 0) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
outputString += format("\n\n%s:", context);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto itr : tagMap) {
|
2017-11-07 01:20:31 +08:00
|
|
|
|
outputString += format("\n %-22s", itr.first.c_str());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (itr.second.size() > 0) {
|
2017-11-07 01:20:31 +08:00
|
|
|
|
outputString += format(" - %s", itr.second.c_str());
|
2017-11-04 05:02:03 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return outputString;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-07 09:04:38 +08:00
|
|
|
|
int getNumofNonExcludedMachines(StatusObjectReader statusObjCluster) {
|
|
|
|
|
StatusObjectReader machineMap;
|
|
|
|
|
int numOfNonExcludedMachines = 0;
|
|
|
|
|
if (statusObjCluster.get("machines", machineMap)) {
|
|
|
|
|
for (auto mach : machineMap.obj()) {
|
2019-02-05 10:14:00 +08:00
|
|
|
|
StatusObjectReader machine(mach.second);
|
2019-02-07 09:04:38 +08:00
|
|
|
|
if (machine.has("excluded") && !machine.last().get_bool())
|
|
|
|
|
numOfNonExcludedMachines++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return numOfNonExcludedMachines;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::pair<int, int> getNumOfNonExcludedProcessAndZones(StatusObjectReader statusObjCluster) {
|
|
|
|
|
StatusObjectReader processesMap;
|
|
|
|
|
std::set<std::string> zones;
|
|
|
|
|
int numOfNonExcludedProcesses = 0;
|
|
|
|
|
if (statusObjCluster.get("processes", processesMap)) {
|
|
|
|
|
for (auto proc : processesMap.obj()) {
|
|
|
|
|
StatusObjectReader process(proc.second);
|
|
|
|
|
if (process.has("excluded") && process.last().get_bool())
|
|
|
|
|
continue;
|
|
|
|
|
numOfNonExcludedProcesses++;
|
2019-02-05 10:14:00 +08:00
|
|
|
|
std::string zoneId;
|
2019-02-07 09:04:38 +08:00
|
|
|
|
if (process.get("locality.zoneid", zoneId)) {
|
2019-02-05 10:14:00 +08:00
|
|
|
|
zones.insert(zoneId);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-02-07 09:04:38 +08:00
|
|
|
|
return { numOfNonExcludedProcesses, zones.size() };
|
2019-02-05 10:14:00 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void printStatus(StatusObjectReader statusObj,
|
|
|
|
|
StatusClient::StatusLevel level,
|
|
|
|
|
bool displayDatabaseAvailable = true,
|
|
|
|
|
bool hideErrorMessages = false) {
|
2017-11-10 03:20:35 +08:00
|
|
|
|
if (FlowTransport::transport().incompatibleOutgoingConnectionsPresent()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(
|
|
|
|
|
stderr,
|
|
|
|
|
"WARNING: One or more of the processes in the cluster is incompatible with this version of fdbcli.\n\n");
|
2017-11-10 03:20:35 +08:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
bool printedCoordinators = false;
|
|
|
|
|
|
|
|
|
|
// status or status details
|
|
|
|
|
if (level == StatusClient::NORMAL || level == StatusClient::DETAILED) {
|
|
|
|
|
|
|
|
|
|
StatusObjectReader statusObjClient;
|
|
|
|
|
statusObj.get("client", statusObjClient);
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// The way the output string is assembled is to add new line character before addition to the string rather
|
|
|
|
|
// than after
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string outputString = "";
|
|
|
|
|
std::string clusterFilePath;
|
|
|
|
|
if (statusObjClient.get("cluster_file.path", clusterFilePath))
|
|
|
|
|
outputString = format("Using cluster file `%s'.\n", clusterFilePath.c_str());
|
|
|
|
|
else
|
|
|
|
|
outputString = "Using unknown cluster file.\n";
|
|
|
|
|
|
|
|
|
|
StatusObjectReader statusObjCoordinators;
|
|
|
|
|
StatusArray coordinatorsArr;
|
|
|
|
|
|
|
|
|
|
if (statusObjClient.get("coordinators", statusObjCoordinators)) {
|
|
|
|
|
// Look for a second "coordinators", under the first one.
|
|
|
|
|
if (statusObjCoordinators.has("coordinators"))
|
|
|
|
|
coordinatorsArr = statusObjCoordinators.last().get_array();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if any coordination servers are unreachable
|
|
|
|
|
bool quorum_reachable;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjCoordinators.get("quorum_reachable", quorum_reachable) && !quorum_reachable) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\nCould not communicate with a quorum of coordination servers:";
|
|
|
|
|
outputString += getCoordinatorsInfoString(statusObj);
|
|
|
|
|
|
|
|
|
|
printf("%s\n", outputString.c_str());
|
|
|
|
|
return;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
|
|
|
|
for (StatusObjectReader coor : coordinatorsArr) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
bool reachable;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (coor.get("reachable", reachable) && !reachable) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\nCould not communicate with all of the coordination servers."
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"\n The database will remain operational as long as we"
|
|
|
|
|
"\n can connect to a quorum of servers, however the fault"
|
|
|
|
|
"\n tolerance of the system is reduced as long as the"
|
|
|
|
|
"\n servers remain disconnected.\n";
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += getCoordinatorsInfoString(statusObj);
|
|
|
|
|
outputString += "\n";
|
|
|
|
|
printedCoordinators = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// print any client messages
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjClient.has("messages")) {
|
|
|
|
|
for (StatusObjectReader message : statusObjClient.last().get_array()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string desc;
|
|
|
|
|
if (message.get("description", desc))
|
|
|
|
|
outputString += "\n" + lineWrap(desc.c_str(), 80);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool fatalRecoveryState = false;
|
|
|
|
|
StatusObjectReader statusObjCluster;
|
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObj.get("cluster", statusObjCluster)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
StatusObjectReader recoveryState;
|
|
|
|
|
if (statusObjCluster.get("recovery_state", recoveryState)) {
|
|
|
|
|
std::string name;
|
|
|
|
|
std::string description;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (recoveryState.get("name", name) && recoveryState.get("description", description) &&
|
|
|
|
|
name != "accepting_commits" && name != "all_logs_recruited" &&
|
|
|
|
|
name != "storage_recovered" && name != "fully_recovered") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
fatalRecoveryState = true;
|
|
|
|
|
|
|
|
|
|
if (name == "recruiting_transaction_servers") {
|
2020-09-11 08:44:15 +08:00
|
|
|
|
description +=
|
|
|
|
|
format("\nNeed at least %d log servers across unique zones, %d commit proxies, "
|
|
|
|
|
"%d GRV proxies and %d resolvers.",
|
|
|
|
|
recoveryState["required_logs"].get_int(),
|
|
|
|
|
recoveryState["required_commit_proxies"].get_int(),
|
|
|
|
|
recoveryState["required_grv_proxies"].get_int(),
|
|
|
|
|
recoveryState["required_resolvers"].get_int());
|
2019-02-07 09:04:38 +08:00
|
|
|
|
if (statusObjCluster.has("machines") && statusObjCluster.has("processes")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
auto numOfNonExcludedProcessesAndZones =
|
|
|
|
|
getNumOfNonExcludedProcessAndZones(statusObjCluster);
|
|
|
|
|
description +=
|
|
|
|
|
format("\nHave %d non-excluded processes on %d machines across %d zones.",
|
|
|
|
|
numOfNonExcludedProcessesAndZones.first,
|
|
|
|
|
getNumofNonExcludedMachines(statusObjCluster),
|
|
|
|
|
numOfNonExcludedProcessesAndZones.second);
|
2019-02-07 09:04:38 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (name == "locking_old_transaction_servers" &&
|
|
|
|
|
recoveryState["missing_logs"].get_str().size()) {
|
|
|
|
|
description += format("\nNeed one or more of the following log servers: %s",
|
|
|
|
|
recoveryState["missing_logs"].get_str().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
description = lineWrap(description.c_str(), 80);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!printedCoordinators &&
|
|
|
|
|
(name == "reading_coordinated_state" || name == "locking_coordinated_state" ||
|
|
|
|
|
name == "configuration_never_created" || name == "writing_coordinated_state")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
description += getCoordinatorsInfoString(statusObj);
|
|
|
|
|
description += "\n";
|
|
|
|
|
printedCoordinators = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
outputString += "\n" + description;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-08-06 15:01:57 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if cluster controllable is reachable
|
2017-05-27 05:51:34 +08:00
|
|
|
|
try {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
// print any cluster messages
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjCluster.has("messages") && statusObjCluster.last().get_array().size()) {
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
// any messages we don't want to display
|
|
|
|
|
std::set<std::string> skipMsgs = { "unreachable_process", "" };
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (fatalRecoveryState) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
skipMsgs.insert("status_incomplete");
|
|
|
|
|
skipMsgs.insert("unreadable_configuration");
|
|
|
|
|
skipMsgs.insert("immediate_priority_transaction_start_probe_timeout");
|
|
|
|
|
skipMsgs.insert("batch_priority_transaction_start_probe_timeout");
|
|
|
|
|
skipMsgs.insert("transaction_start_probe_timeout");
|
|
|
|
|
skipMsgs.insert("read_probe_timeout");
|
|
|
|
|
skipMsgs.insert("commit_probe_timeout");
|
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (StatusObjectReader msgObj : statusObjCluster.last().get_array()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string messageName;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!msgObj.get("name", messageName)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (skipMsgs.count(messageName)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (messageName == "client_issues") {
|
|
|
|
|
if (msgObj.has("issues")) {
|
|
|
|
|
for (StatusObjectReader issue : msgObj["issues"].get_array()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string issueName;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!issue.get("name", issueName)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string description;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!issue.get("description", description)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
description = issueName;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string countStr;
|
|
|
|
|
StatusArray addresses;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!issue.has("addresses")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
countStr = "Some client(s)";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
addresses = issue["addresses"].get_array();
|
|
|
|
|
countStr = format("%d client(s)", addresses.size());
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString +=
|
|
|
|
|
format("\n%s reported: %s\n", countStr.c_str(), description.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (level == StatusClient::StatusLevel::DETAILED) {
|
|
|
|
|
for (int i = 0; i < addresses.size() && i < 4; ++i) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += format(" %s\n", addresses[i].get_str().c_str());
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (addresses.size() > 4) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += " ...\n";
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (msgObj.has("description"))
|
|
|
|
|
outputString += "\n" + lineWrap(msgObj.last().get_str().c_str(), 80);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (fatalRecoveryState) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("%s", outputString.c_str());
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
StatusObjectReader statusObjConfig;
|
|
|
|
|
StatusArray excludedServersArr;
|
2020-04-02 06:13:04 +08:00
|
|
|
|
Optional<std::string> activePrimaryDC;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2020-04-02 06:13:04 +08:00
|
|
|
|
if (statusObjCluster.has("active_primary_dc")) {
|
|
|
|
|
activePrimaryDC = statusObjCluster["active_primary_dc"].get_str();
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (statusObjCluster.get("configuration", statusObjConfig)) {
|
|
|
|
|
if (statusObjConfig.has("excluded_servers"))
|
|
|
|
|
excludedServersArr = statusObjConfig.last().get_array();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If there is a configuration message then there is no configuration information to display
|
|
|
|
|
outputString += "\nConfiguration:";
|
|
|
|
|
std::string outputStringCache = outputString;
|
2019-03-17 13:48:24 +08:00
|
|
|
|
bool isOldMemory = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
// Configuration section
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// FIXME: Should we suppress this if there are cluster messages implying that the database has no
|
|
|
|
|
// configuration?
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
outputString += "\n Redundancy mode - ";
|
|
|
|
|
std::string strVal;
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjConfig.get("redundancy_mode", strVal)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += strVal;
|
|
|
|
|
} else
|
|
|
|
|
outputString += "unknown";
|
|
|
|
|
|
|
|
|
|
outputString += "\n Storage engine - ";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjConfig.get("storage_engine", strVal)) {
|
|
|
|
|
if (strVal == "memory-1") {
|
2019-03-17 13:48:24 +08:00
|
|
|
|
isOldMemory = true;
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += strVal;
|
|
|
|
|
} else
|
|
|
|
|
outputString += "unknown";
|
|
|
|
|
|
|
|
|
|
int intVal;
|
|
|
|
|
outputString += "\n Coordinators - ";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjConfig.get("coordinators_count", intVal)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += std::to_string(intVal);
|
|
|
|
|
} else
|
|
|
|
|
outputString += "unknown";
|
|
|
|
|
|
|
|
|
|
if (excludedServersArr.size()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString += format("\n Exclusions - %d (type `exclude' for details)",
|
|
|
|
|
excludedServersArr.size());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2020-09-11 08:44:15 +08:00
|
|
|
|
if (statusObjConfig.get("commit_proxies", intVal))
|
|
|
|
|
outputString += format("\n Desired Commit Proxies - %d", intVal);
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2020-08-06 15:01:57 +08:00
|
|
|
|
if (statusObjConfig.get("grv_proxies", intVal))
|
|
|
|
|
outputString += format("\n Desired GRV Proxies - %d", intVal);
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (statusObjConfig.get("resolvers", intVal))
|
|
|
|
|
outputString += format("\n Desired Resolvers - %d", intVal);
|
|
|
|
|
|
|
|
|
|
if (statusObjConfig.get("logs", intVal))
|
|
|
|
|
outputString += format("\n Desired Logs - %d", intVal);
|
2018-06-22 15:04:00 +08:00
|
|
|
|
|
|
|
|
|
if (statusObjConfig.get("remote_logs", intVal))
|
|
|
|
|
outputString += format("\n Desired Remote Logs - %d", intVal);
|
|
|
|
|
|
|
|
|
|
if (statusObjConfig.get("log_routers", intVal))
|
|
|
|
|
outputString += format("\n Desired Log Routers - %d", intVal);
|
2020-03-26 07:34:24 +08:00
|
|
|
|
|
2021-03-06 03:28:15 +08:00
|
|
|
|
if (statusObjConfig.get("tss_count", intVal) && intVal > 0) {
|
|
|
|
|
int activeTss = 0;
|
|
|
|
|
if (statusObjCluster.has("active_tss_count")) {
|
|
|
|
|
statusObjCluster.get("active_tss_count", activeTss);
|
|
|
|
|
}
|
|
|
|
|
outputString += format("\n TSS - %d/%d", activeTss, intVal);
|
|
|
|
|
|
|
|
|
|
if (statusObjConfig.get("tss_storage_engine", strVal))
|
|
|
|
|
outputString += format("\n TSS Storage Engine - %s", strVal.c_str());
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-26 07:34:24 +08:00
|
|
|
|
outputString += "\n Usable Regions - ";
|
|
|
|
|
if (statusObjConfig.get("usable_regions", intVal)) {
|
|
|
|
|
outputString += std::to_string(intVal);
|
|
|
|
|
} else {
|
|
|
|
|
outputString += "unknown";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
StatusArray regions;
|
|
|
|
|
if (statusObjConfig.has("regions")) {
|
|
|
|
|
outputString += "\n Regions: ";
|
|
|
|
|
regions = statusObjConfig["regions"].get_array();
|
|
|
|
|
for (StatusObjectReader region : regions) {
|
2020-10-01 08:42:22 +08:00
|
|
|
|
bool isPrimary = false;
|
|
|
|
|
std::vector<std::string> regionSatelliteDCs;
|
|
|
|
|
std::string regionDC;
|
2020-03-31 04:55:59 +08:00
|
|
|
|
for (StatusObjectReader dc : region["datacenters"].get_array()) {
|
|
|
|
|
if (!dc.has("satellite")) {
|
|
|
|
|
regionDC = dc["id"].get_str();
|
|
|
|
|
if (activePrimaryDC.present() && dc["id"].get_str() == activePrimaryDC.get()) {
|
|
|
|
|
isPrimary = true;
|
|
|
|
|
}
|
|
|
|
|
} else if (dc["satellite"].get_int() == 1) {
|
|
|
|
|
regionSatelliteDCs.push_back(dc["id"].get_str());
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-04-02 06:13:04 +08:00
|
|
|
|
if (activePrimaryDC.present()) {
|
|
|
|
|
if (isPrimary) {
|
|
|
|
|
outputString += "\n Primary -";
|
|
|
|
|
} else {
|
|
|
|
|
outputString += "\n Remote -";
|
|
|
|
|
}
|
2020-03-31 04:55:59 +08:00
|
|
|
|
} else {
|
2020-04-02 06:13:04 +08:00
|
|
|
|
outputString += "\n Region -";
|
2020-03-31 04:55:59 +08:00
|
|
|
|
}
|
|
|
|
|
outputString += format("\n Datacenter - %s", regionDC.c_str());
|
|
|
|
|
if (regionSatelliteDCs.size() > 0) {
|
|
|
|
|
outputString += "\n Satellite datacenters - ";
|
|
|
|
|
for (int i = 0; i < regionSatelliteDCs.size(); i++) {
|
|
|
|
|
if (i != regionSatelliteDCs.size() - 1) {
|
|
|
|
|
outputString += format("%s, ", regionSatelliteDCs[i].c_str());
|
|
|
|
|
} else {
|
|
|
|
|
outputString += format("%s", regionSatelliteDCs[i].c_str());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
isPrimary = false;
|
2020-03-26 07:34:24 +08:00
|
|
|
|
if (region.get("satellite_redundancy_mode", strVal)) {
|
2020-03-28 06:45:51 +08:00
|
|
|
|
outputString += format("\n Satellite Redundancy Mode - %s", strVal.c_str());
|
2020-03-26 07:34:24 +08:00
|
|
|
|
}
|
|
|
|
|
if (region.get("satellite_anti_quorum", intVal)) {
|
2020-03-28 06:45:51 +08:00
|
|
|
|
outputString += format("\n Satellite Anti Quorum - %d", intVal);
|
2020-03-26 07:34:24 +08:00
|
|
|
|
}
|
|
|
|
|
if (region.get("satellite_logs", intVal)) {
|
2020-03-28 06:45:51 +08:00
|
|
|
|
outputString += format("\n Satellite Logs - %d", intVal);
|
2020-03-26 07:34:24 +08:00
|
|
|
|
}
|
|
|
|
|
if (region.get("satellite_log_policy", strVal)) {
|
2020-03-28 06:45:51 +08:00
|
|
|
|
outputString += format("\n Satellite Log Policy - %s", strVal.c_str());
|
2020-03-26 07:34:24 +08:00
|
|
|
|
}
|
|
|
|
|
if (region.get("satellite_log_replicas", intVal)) {
|
2020-03-28 06:45:51 +08:00
|
|
|
|
outputString += format("\n Satellite Log Replicas - %d", intVal);
|
2020-03-26 07:34:24 +08:00
|
|
|
|
}
|
|
|
|
|
if (region.get("satellite_usable_dcs", intVal)) {
|
2020-03-28 06:45:51 +08:00
|
|
|
|
outputString += format("\n Satellite Usable DCs - %d", intVal);
|
2020-03-26 07:34:24 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString = outputStringCache;
|
|
|
|
|
outputString += "\n Unable to retrieve configuration status";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Cluster section
|
|
|
|
|
outputString += "\n\nCluster:";
|
|
|
|
|
StatusObjectReader processesMap;
|
|
|
|
|
StatusObjectReader machinesMap;
|
|
|
|
|
|
|
|
|
|
outputStringCache = outputString;
|
2019-07-31 03:24:13 +08:00
|
|
|
|
|
|
|
|
|
bool machinesAreZones = true;
|
|
|
|
|
std::map<std::string, int> zones;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
outputString += "\n FoundationDB processes - ";
|
|
|
|
|
if (statusObjCluster.get("processes", processesMap)) {
|
|
|
|
|
|
|
|
|
|
outputString += format("%d", processesMap.obj().size());
|
|
|
|
|
|
|
|
|
|
int errors = 0;
|
|
|
|
|
int processExclusions = 0;
|
|
|
|
|
for (auto p : processesMap.obj()) {
|
|
|
|
|
StatusObjectReader process(p.second);
|
2019-07-31 03:24:13 +08:00
|
|
|
|
bool excluded = process.has("excluded") && process.last().get_bool();
|
|
|
|
|
if (excluded) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
processExclusions++;
|
2019-07-31 03:24:13 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (process.has("messages") && process.last().get_array().size()) {
|
2019-07-31 03:24:13 +08:00
|
|
|
|
errors++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string zoneId;
|
|
|
|
|
if (process.get("locality.zoneid", zoneId)) {
|
|
|
|
|
std::string machineId;
|
|
|
|
|
if (!process.get("locality.machineid", machineId) || machineId != zoneId) {
|
|
|
|
|
machinesAreZones = false;
|
|
|
|
|
}
|
|
|
|
|
int& nonExcluded = zones[zoneId];
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!excluded) {
|
2019-07-31 03:24:13 +08:00
|
|
|
|
nonExcluded = 1;
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (errors > 0 || processExclusions) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += format(" (less %d excluded; %d with errors)", processExclusions, errors);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} else
|
|
|
|
|
outputString += "unknown";
|
|
|
|
|
|
2019-07-31 03:24:13 +08:00
|
|
|
|
if (zones.size() > 0) {
|
|
|
|
|
outputString += format("\n Zones - %d", zones.size());
|
|
|
|
|
int zoneExclusions = 0;
|
|
|
|
|
for (auto itr : zones) {
|
|
|
|
|
if (itr.second == 0) {
|
|
|
|
|
++zoneExclusions;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (zoneExclusions > 0) {
|
|
|
|
|
outputString += format(" (less %d excluded)", zoneExclusions);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
outputString += "\n Zones - unknown";
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\n Machines - ";
|
|
|
|
|
if (statusObjCluster.get("machines", machinesMap)) {
|
|
|
|
|
outputString += format("%d", machinesMap.obj().size());
|
|
|
|
|
|
|
|
|
|
int machineExclusions = 0;
|
|
|
|
|
for (auto mach : machinesMap.obj()) {
|
|
|
|
|
StatusObjectReader machine(mach.second);
|
|
|
|
|
if (machine.has("excluded") && machine.last().get_bool())
|
|
|
|
|
machineExclusions++;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (machineExclusions) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += format(" (less %d excluded)", machineExclusions);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int64_t minMemoryAvailable = std::numeric_limits<int64_t>::max();
|
|
|
|
|
for (auto proc : processesMap.obj()) {
|
|
|
|
|
StatusObjectReader process(proc.second);
|
|
|
|
|
int64_t availBytes;
|
|
|
|
|
if (process.get("memory.available_bytes", availBytes)) {
|
|
|
|
|
minMemoryAvailable = std::min(minMemoryAvailable, availBytes);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (minMemoryAvailable < std::numeric_limits<int64_t>::max()) {
|
|
|
|
|
double worstServerGb = minMemoryAvailable / (1024.0 * 1024 * 1024);
|
|
|
|
|
outputString += "\n Memory availability - ";
|
|
|
|
|
outputString += format("%.1f GB per process on machine with least available", worstServerGb);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString += minMemoryAvailable < 4294967296
|
|
|
|
|
? "\n >>>>> (WARNING: 4.0 GB recommended) <<<<<"
|
|
|
|
|
: "";
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
double retransCount = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto mach : machinesMap.obj()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader machine(mach.second);
|
|
|
|
|
double hz;
|
|
|
|
|
if (machine.get("network.tcp_segments_retransmitted.hz", hz))
|
|
|
|
|
retransCount += hz;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (retransCount > 0) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += format("\n Retransmissions rate - %d Hz", (int)round(retransCount));
|
|
|
|
|
}
|
|
|
|
|
} else
|
|
|
|
|
outputString += "\n Machines - unknown";
|
|
|
|
|
|
|
|
|
|
StatusObjectReader faultTolerance;
|
|
|
|
|
if (statusObjCluster.get("fault_tolerance", faultTolerance)) {
|
|
|
|
|
int availLoss, dataLoss;
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (faultTolerance.get("max_zone_failures_without_losing_availability", availLoss) &&
|
|
|
|
|
faultTolerance.get("max_zone_failures_without_losing_data", dataLoss)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
outputString += "\n Fault Tolerance - ";
|
|
|
|
|
|
|
|
|
|
int minLoss = std::min(availLoss, dataLoss);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
const char* faultDomain = machinesAreZones ? "machine" : "zone";
|
2020-09-10 02:54:58 +08:00
|
|
|
|
outputString += format("%d %ss", minLoss, faultDomain);
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (dataLoss > availLoss) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += format(" (%d without data loss)", dataLoss);
|
|
|
|
|
}
|
2020-09-10 02:54:58 +08:00
|
|
|
|
|
|
|
|
|
if (dataLoss == -1) {
|
2020-09-10 13:34:36 +08:00
|
|
|
|
ASSERT_WE_THINK(availLoss == -1);
|
|
|
|
|
outputString += format(
|
|
|
|
|
"\n\n Warning: the database may have data loss and availability loss. Please restart "
|
|
|
|
|
"following tlog interfaces, otherwise storage servers may never be able to catch "
|
|
|
|
|
"up.\n");
|
2020-09-10 02:54:58 +08:00
|
|
|
|
StatusObjectReader logs;
|
2020-09-10 13:34:36 +08:00
|
|
|
|
if (statusObjCluster.has("logs")) {
|
|
|
|
|
for (StatusObjectReader logEpoch : statusObjCluster.last().get_array()) {
|
2020-09-10 02:54:58 +08:00
|
|
|
|
bool possiblyLosingData;
|
|
|
|
|
if (logEpoch.get("possibly_losing_data", possiblyLosingData) &&
|
|
|
|
|
!possiblyLosingData) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-10 04:57:26 +08:00
|
|
|
|
// Current epoch doesn't have an end version.
|
|
|
|
|
int64_t epoch, beginVersion, endVersion = invalidVersion;
|
2020-09-10 02:54:58 +08:00
|
|
|
|
bool current;
|
|
|
|
|
logEpoch.get("epoch", epoch);
|
|
|
|
|
logEpoch.get("begin_version", beginVersion);
|
|
|
|
|
logEpoch.get("end_version", endVersion);
|
|
|
|
|
logEpoch.get("current", current);
|
2020-09-10 13:34:36 +08:00
|
|
|
|
std::string missing_log_interfaces;
|
|
|
|
|
if (logEpoch.has("log_interfaces")) {
|
|
|
|
|
for (StatusObjectReader logInterface : logEpoch.last().get_array()) {
|
|
|
|
|
bool healthy;
|
|
|
|
|
std::string address, id;
|
|
|
|
|
if (logInterface.get("healthy", healthy) && !healthy) {
|
|
|
|
|
logInterface.get("id", id);
|
|
|
|
|
logInterface.get("address", address);
|
|
|
|
|
missing_log_interfaces += format("%s,%s ", id.c_str(), address.c_str());
|
|
|
|
|
}
|
2020-09-10 02:54:58 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-10 13:34:36 +08:00
|
|
|
|
outputString += format(
|
|
|
|
|
" %s log epoch: %ld begin: %ld end: %s, missing "
|
|
|
|
|
"log interfaces(id,address): %s\n",
|
2021-03-11 02:06:03 +08:00
|
|
|
|
current ? "Current" : "Old",
|
|
|
|
|
epoch,
|
|
|
|
|
beginVersion,
|
2020-09-10 13:34:36 +08:00
|
|
|
|
endVersion == invalidVersion ? "(unknown)" : format("%ld", endVersion).c_str(),
|
|
|
|
|
missing_log_interfaces.c_str());
|
2020-09-10 02:54:58 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string serverTime = getDateInfoString(statusObjCluster, "cluster_controller_timestamp");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (serverTime != "") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\n Server time - " + serverTime;
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString = outputStringCache;
|
|
|
|
|
outputString += "\n Unable to retrieve cluster status";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
StatusObjectReader statusObjData;
|
|
|
|
|
statusObjCluster.get("data", statusObjData);
|
|
|
|
|
|
|
|
|
|
// Data section
|
|
|
|
|
outputString += "\n\nData:";
|
|
|
|
|
outputStringCache = outputString;
|
|
|
|
|
try {
|
|
|
|
|
outputString += "\n Replication health - ";
|
|
|
|
|
|
|
|
|
|
StatusObjectReader statusObjDataState;
|
|
|
|
|
statusObjData.get("state", statusObjDataState);
|
|
|
|
|
|
|
|
|
|
std::string dataState;
|
|
|
|
|
statusObjDataState.get("name", dataState);
|
|
|
|
|
|
|
|
|
|
std::string description = "";
|
|
|
|
|
statusObjDataState.get("description", description);
|
|
|
|
|
|
|
|
|
|
bool healthy;
|
|
|
|
|
if (statusObjDataState.get("healthy", healthy) && healthy) {
|
|
|
|
|
outputString += "Healthy" + (description != "" ? " (" + description + ")" : "");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (dataState == "missing_data") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "UNHEALTHY" + (description != "" ? ": " + description : "");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (dataState == "healing") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "HEALING" + (description != "" ? ": " + description : "");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (description != "") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += description;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "unknown";
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjData.has("moving_data")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader movingData = statusObjData.last();
|
|
|
|
|
double dataInQueue, dataInFlight;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (movingData.get("in_queue_bytes", dataInQueue) &&
|
|
|
|
|
movingData.get("in_flight_bytes", dataInFlight))
|
|
|
|
|
outputString += format("\n Moving data - %.3f GB",
|
|
|
|
|
((double)dataInQueue + (double)dataInFlight) / 1e9);
|
|
|
|
|
} else if (dataState == "initializing") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\n Moving data - unknown (initializing)";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\n Moving data - unknown";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
outputString += "\n Sum of key-value sizes - ";
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjData.has("total_kv_size_bytes")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
double totalDBBytes = statusObjData.last().get_int64();
|
|
|
|
|
|
|
|
|
|
if (totalDBBytes >= 1e12)
|
|
|
|
|
outputString += format("%.3f TB", (totalDBBytes / 1e12));
|
|
|
|
|
|
|
|
|
|
else if (totalDBBytes >= 1e9)
|
|
|
|
|
outputString += format("%.3f GB", (totalDBBytes / 1e9));
|
|
|
|
|
|
|
|
|
|
else
|
|
|
|
|
// no decimal points for MB
|
|
|
|
|
outputString += format("%d MB", (int)round(totalDBBytes / 1e6));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "unknown";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
outputString += "\n Disk space used - ";
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (statusObjData.has("total_disk_used_bytes")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
double totalDiskUsed = statusObjData.last().get_int64();
|
|
|
|
|
|
|
|
|
|
if (totalDiskUsed >= 1e12)
|
|
|
|
|
outputString += format("%.3f TB", (totalDiskUsed / 1e12));
|
|
|
|
|
|
|
|
|
|
else if (totalDiskUsed >= 1e9)
|
|
|
|
|
outputString += format("%.3f GB", (totalDiskUsed / 1e9));
|
|
|
|
|
|
|
|
|
|
else
|
|
|
|
|
// no decimal points for MB
|
|
|
|
|
outputString += format("%d MB", (int)round(totalDiskUsed / 1e6));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "unknown";
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString = outputStringCache;
|
|
|
|
|
outputString += "\n Unable to retrieve data status";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Operating space section
|
|
|
|
|
outputString += "\n\nOperating space:";
|
|
|
|
|
std::string operatingSpaceString = "";
|
|
|
|
|
try {
|
|
|
|
|
int64_t val;
|
|
|
|
|
if (statusObjData.get("least_operating_space_bytes_storage_server", val))
|
2021-03-11 02:06:03 +08:00
|
|
|
|
operatingSpaceString += format("\n Storage server - %.1f GB free on most full server",
|
|
|
|
|
std::max(val / 1e9, 0.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
if (statusObjData.get("least_operating_space_bytes_log_server", val))
|
2021-03-11 02:06:03 +08:00
|
|
|
|
operatingSpaceString += format("\n Log server - %.1f GB free on most full server",
|
|
|
|
|
std::max(val / 1e9, 0.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
operatingSpaceString = "";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (operatingSpaceString.empty()) {
|
|
|
|
|
operatingSpaceString += "\n Unable to retrieve operating space status";
|
|
|
|
|
}
|
|
|
|
|
outputString += operatingSpaceString;
|
|
|
|
|
|
|
|
|
|
// Workload section
|
|
|
|
|
outputString += "\n\nWorkload:";
|
|
|
|
|
outputStringCache = outputString;
|
2019-03-17 13:48:24 +08:00
|
|
|
|
bool foundLogAndStorage = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
// Determine which rates are unknown
|
|
|
|
|
StatusObjectReader statusObjWorkload;
|
|
|
|
|
statusObjCluster.get("workload", statusObjWorkload);
|
|
|
|
|
|
|
|
|
|
std::string performanceLimited = "";
|
|
|
|
|
bool unknownMCT = false;
|
|
|
|
|
bool unknownRP = false;
|
|
|
|
|
|
2017-05-27 05:51:34 +08:00
|
|
|
|
// Print performance limit details if known.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
StatusObjectReader limit = statusObjCluster["qos.performance_limited_by"];
|
|
|
|
|
std::string name = limit["name"].get_str();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (name != "workload") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string desc = limit["description"].get_str();
|
|
|
|
|
std::string serverID;
|
|
|
|
|
limit.get("reason_server_id", serverID);
|
|
|
|
|
std::string procAddr = getProcessAddressByServerID(processesMap, serverID);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
performanceLimited = format("\n Performance limited by %s: %s",
|
|
|
|
|
(procAddr == "unknown")
|
|
|
|
|
? ("server" + (serverID == "" ? "" : (" " + serverID))).c_str()
|
|
|
|
|
: "process",
|
|
|
|
|
desc.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (procAddr != "unknown")
|
|
|
|
|
performanceLimited += format("\n Most limiting process: %s", procAddr.c_str());
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::exception&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
// If anything here throws (such as for an incompatible type) ignore it.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// display the known rates
|
|
|
|
|
outputString += "\n Read rate - ";
|
|
|
|
|
outputString += getWorkloadRates(statusObjWorkload, unknownRP, "reads", "hz");
|
|
|
|
|
|
|
|
|
|
outputString += "\n Write rate - ";
|
|
|
|
|
outputString += getWorkloadRates(statusObjWorkload, unknownMCT, "writes", "hz");
|
|
|
|
|
|
|
|
|
|
outputString += "\n Transactions started - ";
|
|
|
|
|
outputString += getWorkloadRates(statusObjWorkload, unknownMCT, "started", "hz", true);
|
|
|
|
|
|
|
|
|
|
outputString += "\n Transactions committed - ";
|
|
|
|
|
outputString += getWorkloadRates(statusObjWorkload, unknownMCT, "committed", "hz", true);
|
|
|
|
|
|
|
|
|
|
outputString += "\n Conflict rate - ";
|
|
|
|
|
outputString += getWorkloadRates(statusObjWorkload, unknownMCT, "conflicted", "hz", true);
|
|
|
|
|
|
|
|
|
|
outputString += unknownRP ? "" : performanceLimited;
|
|
|
|
|
|
|
|
|
|
// display any process messages
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// FIXME: Above comment is not what this code block does, it actually just looks for a specific message
|
|
|
|
|
// in the process map, *by description*, and adds process addresses that have it to a vector. Either
|
|
|
|
|
// change the comment or the code.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::vector<std::string> messagesAddrs;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto proc : processesMap.obj()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader process(proc.second);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (process.has("roles")) {
|
2019-03-17 13:48:24 +08:00
|
|
|
|
StatusArray rolesArray = proc.second.get_obj()["roles"].get_array();
|
|
|
|
|
bool storageRole = false;
|
|
|
|
|
bool logRole = false;
|
|
|
|
|
for (StatusObjectReader role : rolesArray) {
|
|
|
|
|
if (role["role"].get_str() == "storage") {
|
|
|
|
|
storageRole = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (role["role"].get_str() == "log") {
|
2019-03-17 13:48:24 +08:00
|
|
|
|
logRole = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (storageRole && logRole) {
|
2019-03-17 13:48:24 +08:00
|
|
|
|
foundLogAndStorage = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (process.has("messages")) {
|
|
|
|
|
StatusArray processMessagesArr = process.last().get_array();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (processMessagesArr.size()) {
|
|
|
|
|
for (StatusObjectReader msg : processMessagesArr) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string desc;
|
|
|
|
|
std::string addr;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (msg.get("description", desc) && desc == "Unable to update cluster file." &&
|
|
|
|
|
process.get("address", addr)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
messagesAddrs.push_back(addr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (messagesAddrs.size()) {
|
|
|
|
|
outputString += format("\n\n%d FoundationDB processes reported unable to update cluster file:",
|
|
|
|
|
messagesAddrs.size());
|
|
|
|
|
for (auto msg : messagesAddrs) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\n " + msg;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString = outputStringCache;
|
|
|
|
|
outputString += "\n Unable to retrieve workload status";
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-04 05:02:03 +08:00
|
|
|
|
// Backup and DR section
|
|
|
|
|
outputString += "\n\nBackup and DR:";
|
|
|
|
|
|
|
|
|
|
std::map<std::string, std::string> backupTags;
|
2017-11-07 06:00:08 +08:00
|
|
|
|
getBackupDRTags(statusObjCluster, "backup", backupTags);
|
2017-11-04 05:02:03 +08:00
|
|
|
|
|
|
|
|
|
std::map<std::string, std::string> drPrimaryTags;
|
2017-11-07 06:00:08 +08:00
|
|
|
|
getBackupDRTags(statusObjCluster, "dr_backup", drPrimaryTags);
|
2017-11-04 05:02:03 +08:00
|
|
|
|
|
|
|
|
|
std::map<std::string, std::string> drSecondaryTags;
|
2017-11-07 06:00:08 +08:00
|
|
|
|
getBackupDRTags(statusObjCluster, "dr_backup_dest", drSecondaryTags);
|
2017-11-04 05:02:03 +08:00
|
|
|
|
|
2017-11-07 06:00:08 +08:00
|
|
|
|
outputString += format("\n Running backups - %d", backupTags.size());
|
2017-11-04 05:02:03 +08:00
|
|
|
|
outputString += format("\n Running DRs - ");
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (drPrimaryTags.size() == 0 && drSecondaryTags.size() == 0) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
outputString += format("%d", 0);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
|
|
|
|
if (drPrimaryTags.size() > 0) {
|
2017-11-07 06:00:08 +08:00
|
|
|
|
outputString += format("%d as primary", drPrimaryTags.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (drSecondaryTags.size() > 0) {
|
2017-11-04 05:02:03 +08:00
|
|
|
|
outputString += ", ";
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (drSecondaryTags.size() > 0) {
|
2017-11-07 06:00:08 +08:00
|
|
|
|
outputString += format("%d as secondary", drSecondaryTags.size());
|
2018-06-21 00:21:23 +08:00
|
|
|
|
}
|
2017-11-04 05:02:03 +08:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
// status details
|
|
|
|
|
if (level == StatusClient::DETAILED) {
|
2017-11-07 06:00:08 +08:00
|
|
|
|
outputString += logBackupDR("Running backup tags", backupTags);
|
|
|
|
|
outputString += logBackupDR("Running DR tags (as primary)", drPrimaryTags);
|
|
|
|
|
outputString += logBackupDR("Running DR tags (as secondary)", drSecondaryTags);
|
2017-11-04 05:02:03 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\n\nProcess performance details:";
|
|
|
|
|
outputStringCache = outputString;
|
|
|
|
|
try {
|
|
|
|
|
// constructs process performance details output
|
2019-04-02 02:40:26 +08:00
|
|
|
|
std::map<NetworkAddress, std::string> workerDetails;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto proc : processesMap.obj()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader procObj(proc.second);
|
|
|
|
|
std::string address;
|
|
|
|
|
procObj.get("address", address);
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string line;
|
|
|
|
|
|
2019-04-02 02:40:26 +08:00
|
|
|
|
NetworkAddress parsedAddress;
|
|
|
|
|
try {
|
|
|
|
|
parsedAddress = NetworkAddress::parse(address);
|
2019-06-21 00:29:01 +08:00
|
|
|
|
} catch (Error&) {
|
2019-04-02 02:40:26 +08:00
|
|
|
|
// Groups all invalid IP address/port pair in the end of this detail group.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
line = format(" %-22s (invalid IP address or port)", address.c_str());
|
2019-04-02 02:40:26 +08:00
|
|
|
|
IPAddress::IPAddressStore maxIp;
|
|
|
|
|
for (int i = 0; i < maxIp.size(); ++i) {
|
|
|
|
|
maxIp[i] = std::numeric_limits<std::remove_reference<decltype(maxIp[0])>::type>::max();
|
|
|
|
|
}
|
|
|
|
|
std::string& lastline =
|
|
|
|
|
workerDetails[NetworkAddress(IPAddress(maxIp), std::numeric_limits<uint16_t>::max())];
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!lastline.empty())
|
|
|
|
|
lastline.append("\n");
|
|
|
|
|
lastline += line;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
double tx = -1, rx = -1, mCPUUtil = -1;
|
|
|
|
|
int64_t processTotalSize;
|
|
|
|
|
|
|
|
|
|
// Get the machine for this process
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// StatusObjectReader mach = machinesMap[procObj["machine_id"].get_str()];
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader mach;
|
|
|
|
|
if (machinesMap.get(procObj["machine_id"].get_str(), mach, false)) {
|
|
|
|
|
StatusObjectReader machCPU;
|
|
|
|
|
if (mach.get("cpu", machCPU)) {
|
|
|
|
|
|
|
|
|
|
machCPU.get("logical_core_utilization", mCPUUtil);
|
|
|
|
|
|
|
|
|
|
StatusObjectReader network;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (mach.get("network", network)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
network.get("megabits_sent.hz", tx);
|
|
|
|
|
network.get("megabits_received.hz", rx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
procObj.get("memory.used_bytes", processTotalSize);
|
|
|
|
|
|
|
|
|
|
StatusObjectReader procCPUObj;
|
|
|
|
|
procObj.get("cpu", procCPUObj);
|
|
|
|
|
|
|
|
|
|
line = format(" %-22s (", address.c_str());
|
|
|
|
|
|
|
|
|
|
double usageCores;
|
|
|
|
|
if (procCPUObj.get("usage_cores", usageCores))
|
|
|
|
|
line += format("%3.0f%% cpu;", usageCores * 100);
|
|
|
|
|
|
|
|
|
|
line += mCPUUtil != -1 ? format("%3.0f%% machine;", mCPUUtil * 100) : "";
|
|
|
|
|
line += std::min(tx, rx) != -1 ? format("%6.3f Gbps;", std::max(tx, rx) / 1000.0) : "";
|
|
|
|
|
|
|
|
|
|
double diskBusy;
|
|
|
|
|
if (procObj.get("disk.busy", diskBusy))
|
|
|
|
|
line += format("%3.0f%% disk IO;", 100.0 * diskBusy);
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
line += processTotalSize != -1
|
|
|
|
|
? format("%4.1f GB", processTotalSize / (1024.0 * 1024 * 1024))
|
|
|
|
|
: "";
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
double availableBytes;
|
|
|
|
|
if (procObj.get("memory.available_bytes", availableBytes))
|
|
|
|
|
line += format(" / %3.1f GB RAM )", availableBytes / (1024.0 * 1024 * 1024));
|
|
|
|
|
else
|
|
|
|
|
line += " )";
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (procObj.has("messages")) {
|
|
|
|
|
for (StatusObjectReader message : procObj.last().get_array()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string desc;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (message.get("description", desc)) {
|
|
|
|
|
if (message.has("type")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
line += "\n Last logged error: " + desc;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
line += "\n " + desc;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-02 02:40:26 +08:00
|
|
|
|
workerDetails[parsedAddress] = line;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string noMetrics = format(" %-22s (no metrics available)", address.c_str());
|
2019-04-02 02:40:26 +08:00
|
|
|
|
workerDetails[parsedAddress] = noMetrics;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for (auto w : workerDetails)
|
|
|
|
|
outputString += "\n" + format("%s", w.second.c_str());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString = outputStringCache;
|
|
|
|
|
outputString += "\n Unable to retrieve process performance details";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!printedCoordinators) {
|
|
|
|
|
printedCoordinators = true;
|
|
|
|
|
outputString += "\n\nCoordination servers:";
|
|
|
|
|
outputString += getCoordinatorsInfoString(statusObj);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// client time
|
|
|
|
|
std::string clientTime = getDateInfoString(statusObjClient, "timestamp");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (clientTime != "") {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
outputString += "\n\nClient time: " + clientTime;
|
|
|
|
|
}
|
2019-03-17 13:48:24 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (processesMap.obj().size() > 1 && isOldMemory) {
|
|
|
|
|
outputString += "\n\nWARNING: type `configure memory' to switch to a safer method of persisting data "
|
|
|
|
|
"on the transaction logs.";
|
2019-03-17 13:48:24 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (processesMap.obj().size() > 9 && foundLogAndStorage) {
|
|
|
|
|
outputString +=
|
|
|
|
|
"\n\nWARNING: A single process is both a transaction log and a storage server.\n For best "
|
|
|
|
|
"performance use dedicated disks for the transaction logs by setting process classes.";
|
2019-03-17 13:48:24 +08:00
|
|
|
|
}
|
|
|
|
|
|
2019-08-30 09:41:34 +08:00
|
|
|
|
if (statusObjCluster.has("data_distribution_disabled")) {
|
2019-08-22 05:44:15 +08:00
|
|
|
|
outputString += "\n\nWARNING: Data distribution is off.";
|
|
|
|
|
} else {
|
2019-08-30 09:41:34 +08:00
|
|
|
|
if (statusObjCluster.has("data_distribution_disabled_for_ss_failures")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString += "\n\nWARNING: Data distribution is currently turned on but disabled for all "
|
|
|
|
|
"storage server failures.";
|
2019-08-22 05:44:15 +08:00
|
|
|
|
}
|
2019-08-30 09:41:34 +08:00
|
|
|
|
if (statusObjCluster.has("data_distribution_disabled_for_rebalance")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString += "\n\nWARNING: Data distribution is currently turned on but shard size balancing is "
|
|
|
|
|
"currently disabled.";
|
2019-08-22 05:44:15 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("%s\n", outputString.c_str());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// status minimal
|
|
|
|
|
else if (level == StatusClient::MINIMAL) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Checking for field exsistence is not necessary here because if a field is missing there is no additional
|
|
|
|
|
// information that we would be able to display if we continued execution. Instead, any missing fields will
|
|
|
|
|
// throw and the catch will display the proper message.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
// If any of these throw, can't get status because the result makes no sense.
|
|
|
|
|
StatusObjectReader statusObjClient = statusObj["client"].get_obj();
|
|
|
|
|
StatusObjectReader statusObjClientDatabaseStatus = statusObjClient["database_status"].get_obj();
|
|
|
|
|
|
|
|
|
|
bool available = statusObjClientDatabaseStatus["available"].get_bool();
|
|
|
|
|
|
|
|
|
|
// Database unavailable
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!available) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("%s", "The database is unavailable; type `status' for more information.\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
bool healthy = statusObjClientDatabaseStatus["healthy"].get_bool();
|
|
|
|
|
|
|
|
|
|
// Database available without issues
|
|
|
|
|
if (healthy) {
|
|
|
|
|
if (displayDatabaseAvailable) {
|
|
|
|
|
printf("The database is available.\n");
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else { // Database running but with issues
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("The database is available, but has issues (type 'status' for more information).\n");
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("The database is available, but has issues (type 'status' for more information).\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool upToDate;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!statusObjClient.get("cluster_file.up_to_date", upToDate) || !upToDate) {
|
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"WARNING: The cluster file is not up to date. Type 'status' for more information.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (std::runtime_error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("Unable to determine database state, type 'status' for more information.\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// status JSON
|
|
|
|
|
else if (level == StatusClient::JSON) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("%s\n",
|
|
|
|
|
json_spirit::write_string(json_spirit::mValue(statusObj.obj()),
|
|
|
|
|
json_spirit::Output_options::pretty_print)
|
|
|
|
|
.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error&) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (hideErrorMessages)
|
|
|
|
|
return;
|
|
|
|
|
if (level == StatusClient::MINIMAL) {
|
|
|
|
|
printf("Unable to determine database state, type 'status' for more information.\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (level == StatusClient::JSON) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("Could not retrieve status json.\n\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("Could not retrieve status, type 'status json' for more information.\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
int printStatusFromJSON(std::string const& jsonFileName) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
json_spirit::mValue value;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
json_spirit::read_string(readFileBytes(jsonFileName, 10000000), value);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
printStatus(value.get_obj(), StatusClient::DETAILED, false, true);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
} catch (std::exception& e) {
|
|
|
|
|
printf("Exception printing status: %s\n", e.what());
|
|
|
|
|
return 1;
|
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
printf("Error printing status: %d %s\n", e.code(), e.what());
|
|
|
|
|
return 2;
|
|
|
|
|
} catch (...) {
|
|
|
|
|
printf("Unknown exception printing status.\n");
|
|
|
|
|
return 3;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-13 08:27:55 +08:00
|
|
|
|
ACTOR Future<Void> triggerDDTeamInfoLog(Database db) {
|
|
|
|
|
state ReadYourWritesTransaction tr(db);
|
|
|
|
|
loop {
|
|
|
|
|
try {
|
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
std::string v = deterministicRandom()->randomUniqueID().toString();
|
|
|
|
|
tr.set(triggerDDTeamInfoPrintKey, v);
|
|
|
|
|
wait(tr.commit());
|
|
|
|
|
printf("Triggered team info logging in data distribution.\n");
|
|
|
|
|
return Void();
|
|
|
|
|
} catch (Error& e) {
|
2020-11-14 02:21:31 +08:00
|
|
|
|
wait(tr.onError(e));
|
2020-11-13 08:27:55 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-08 03:54:24 +08:00
|
|
|
|
ACTOR Future<Void> tssQuarantineList(Database db) {
|
|
|
|
|
state ReadYourWritesTransaction tr(db);
|
|
|
|
|
loop {
|
|
|
|
|
try {
|
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
|
|
|
|
|
RangeResult result = wait(tr.getRange(tssQuarantineKeys, CLIENT_KNOBS->TOO_MANY));
|
|
|
|
|
// shouldn't have many quarantined TSSes
|
|
|
|
|
ASSERT(!result.more);
|
|
|
|
|
printf("Found %d quarantined TSS processes%s\n", result.size(), result.size() == 0 ? "." : ":");
|
|
|
|
|
for (auto& it : result) {
|
|
|
|
|
printf(" %s\n", decodeTssQuarantineKey(it.key).toString().c_str());
|
|
|
|
|
}
|
|
|
|
|
return Void();
|
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
wait(tr.onError(e));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ACTOR Future<bool> tssQuarantine(Database db, bool enable, UID tssId) {
|
|
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(db);
|
|
|
|
|
state KeyBackedMap<UID, UID> tssMapDB = KeyBackedMap<UID, UID>(tssMappingKeys.begin);
|
|
|
|
|
|
|
|
|
|
loop {
|
|
|
|
|
try {
|
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
|
|
|
|
|
// Do some validation first to make sure the command is valid
|
|
|
|
|
Optional<Value> serverListValue = wait(tr->get(serverListKeyFor(tssId)));
|
|
|
|
|
if (!serverListValue.present()) {
|
|
|
|
|
printf("No TSS %s found in cluster!\n", tssId.toString().c_str());
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
state StorageServerInterface ssi = decodeServerListValue(serverListValue.get());
|
|
|
|
|
if (!ssi.isTss()) {
|
|
|
|
|
printf("Cannot quarantine Non-TSS storage ID %s!\n", tssId.toString().c_str());
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Optional<Value> currentQuarantineValue = wait(tr->get(tssQuarantineKeyFor(tssId)));
|
|
|
|
|
if (enable && currentQuarantineValue.present()) {
|
|
|
|
|
printf("TSS %s already in quarantine, doing nothing.\n", tssId.toString().c_str());
|
|
|
|
|
return false;
|
|
|
|
|
} else if (!enable && !currentQuarantineValue.present()) {
|
|
|
|
|
printf("TSS %s is not in quarantine, cannot remove from quarantine!.\n", tssId.toString().c_str());
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
|
tr->set(tssQuarantineKeyFor(tssId), LiteralStringRef(""));
|
|
|
|
|
// remove server from TSS mapping when quarantine is enabled
|
|
|
|
|
tssMapDB.erase(tr, ssi.tssPairID.get());
|
|
|
|
|
} else {
|
|
|
|
|
tr->clear(tssQuarantineKeyFor(tssId));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wait(tr->commit());
|
|
|
|
|
break;
|
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
wait(tr->onError(e));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
printf("Successfully %s TSS %s\n", enable ? "quarantined" : "removed", tssId.toString().c_str());
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<Void> timeWarning(double when, const char* msg) {
|
|
|
|
|
wait(delay(when));
|
|
|
|
|
fputs(msg, stderr);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-23 07:41:22 +08:00
|
|
|
|
ACTOR Future<Void> checkStatus(Future<Void> f, Database db, bool displayDatabaseAvailable = true) {
|
2018-08-11 04:57:10 +08:00
|
|
|
|
wait(f);
|
2020-01-23 07:41:22 +08:00
|
|
|
|
StatusObject s = wait(StatusClient::statusFetcher(db));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\n");
|
2017-05-27 05:51:34 +08:00
|
|
|
|
printStatus(s, StatusClient::MINIMAL, displayDatabaseAvailable);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\n");
|
|
|
|
|
return Void();
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR template <class T>
|
|
|
|
|
Future<T> makeInterruptable(Future<T> f) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
Future<Void> interrupt = LineNoise::onKeyboardInterrupt();
|
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
when(T t = wait(f)) { return t; }
|
|
|
|
|
when(wait(interrupt)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
f.cancel();
|
|
|
|
|
throw operation_cancelled();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<Void> commitTransaction(Reference<ReadYourWritesTransaction> tr) {
|
|
|
|
|
wait(makeInterruptable(tr->commit()));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
auto ver = tr->getCommittedVersion();
|
|
|
|
|
if (ver != invalidVersion)
|
|
|
|
|
printf("Committed (%" PRId64 ")\n", ver);
|
|
|
|
|
else
|
|
|
|
|
printf("Nothing to commit\n");
|
|
|
|
|
return Void();
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<bool> configure(Database db,
|
|
|
|
|
std::vector<StringRef> tokens,
|
|
|
|
|
Reference<ClusterConnectionFile> ccf,
|
|
|
|
|
LineNoise* linenoise,
|
|
|
|
|
Future<Void> warn) {
|
2020-09-28 02:52:18 +08:00
|
|
|
|
state ConfigurationResult result;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
state int startToken = 1;
|
|
|
|
|
state bool force = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokens.size() < 2)
|
|
|
|
|
result = ConfigurationResult::NO_OPTIONS_PROVIDED;
|
|
|
|
|
else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens[startToken] == LiteralStringRef("FORCE")) {
|
2018-11-05 11:53:55 +08:00
|
|
|
|
force = true;
|
|
|
|
|
startToken = 2;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
state Optional<ConfigureAutoResult> conf;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens[startToken] == LiteralStringRef("auto")) {
|
|
|
|
|
StatusObject s = wait(makeInterruptable(StatusClient::statusFetcher(db)));
|
|
|
|
|
if (warn.isValid())
|
2017-05-26 04:48:44 +08:00
|
|
|
|
warn.cancel();
|
|
|
|
|
|
|
|
|
|
conf = parseConfig(s);
|
|
|
|
|
|
|
|
|
|
if (!conf.get().isValid()) {
|
|
|
|
|
printf("Unable to provide advice for the current configuration.\n");
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-27 05:51:34 +08:00
|
|
|
|
bool noChanges = conf.get().old_replication == conf.get().auto_replication &&
|
2020-08-06 15:01:57 +08:00
|
|
|
|
conf.get().old_logs == conf.get().auto_logs &&
|
2020-09-11 08:44:15 +08:00
|
|
|
|
conf.get().old_commit_proxies == conf.get().auto_commit_proxies &&
|
2020-08-06 15:01:57 +08:00
|
|
|
|
conf.get().old_grv_proxies == conf.get().auto_grv_proxies &&
|
|
|
|
|
conf.get().old_resolvers == conf.get().auto_resolvers &&
|
|
|
|
|
conf.get().old_processes_with_transaction == conf.get().auto_processes_with_transaction &&
|
|
|
|
|
conf.get().old_machines_with_transaction == conf.get().auto_machines_with_transaction;
|
|
|
|
|
|
|
|
|
|
bool noDesiredChanges = noChanges && conf.get().old_logs == conf.get().desired_logs &&
|
2020-09-11 08:44:15 +08:00
|
|
|
|
conf.get().old_commit_proxies == conf.get().desired_commit_proxies &&
|
2020-08-06 15:01:57 +08:00
|
|
|
|
conf.get().old_grv_proxies == conf.get().desired_grv_proxies &&
|
|
|
|
|
conf.get().old_resolvers == conf.get().desired_resolvers;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
std::string outputString;
|
|
|
|
|
|
|
|
|
|
outputString += "\nYour cluster has:\n\n";
|
|
|
|
|
outputString += format(" processes %d\n", conf.get().processes);
|
|
|
|
|
outputString += format(" machines %d\n", conf.get().machines);
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (noDesiredChanges)
|
|
|
|
|
outputString += "\nConfigure recommends keeping your current configuration:\n\n";
|
|
|
|
|
else if (noChanges)
|
|
|
|
|
outputString +=
|
|
|
|
|
"\nConfigure cannot modify the configuration because some parameters have been set manually:\n\n";
|
|
|
|
|
else
|
|
|
|
|
outputString += "\nConfigure recommends the following changes:\n\n";
|
|
|
|
|
outputString += " ------------------------------------------------------------------- \n";
|
|
|
|
|
outputString += "| parameter | old | new |\n";
|
|
|
|
|
outputString += " ------------------------------------------------------------------- \n";
|
|
|
|
|
outputString += format("| replication | %16s | %16s |\n",
|
|
|
|
|
conf.get().old_replication.c_str(),
|
|
|
|
|
conf.get().auto_replication.c_str());
|
|
|
|
|
outputString +=
|
|
|
|
|
format("| logs | %16d | %16d |", conf.get().old_logs, conf.get().auto_logs);
|
|
|
|
|
outputString += conf.get().auto_logs != conf.get().desired_logs
|
|
|
|
|
? format(" (manually set; would be %d)\n", conf.get().desired_logs)
|
|
|
|
|
: "\n";
|
|
|
|
|
outputString += format("| commit_proxies | %16d | %16d |",
|
|
|
|
|
conf.get().old_commit_proxies,
|
2020-09-11 08:44:15 +08:00
|
|
|
|
conf.get().auto_commit_proxies);
|
|
|
|
|
outputString += conf.get().auto_commit_proxies != conf.get().desired_commit_proxies
|
|
|
|
|
? format(" (manually set; would be %d)\n", conf.get().desired_commit_proxies)
|
|
|
|
|
: "\n";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString += format("| grv_proxies | %16d | %16d |",
|
|
|
|
|
conf.get().old_grv_proxies,
|
2020-08-06 15:01:57 +08:00
|
|
|
|
conf.get().auto_grv_proxies);
|
|
|
|
|
outputString += conf.get().auto_grv_proxies != conf.get().desired_grv_proxies
|
|
|
|
|
? format(" (manually set; would be %d)\n", conf.get().desired_grv_proxies)
|
|
|
|
|
: "\n";
|
2021-03-11 02:06:03 +08:00
|
|
|
|
outputString += format(
|
|
|
|
|
"| resolvers | %16d | %16d |", conf.get().old_resolvers, conf.get().auto_resolvers);
|
|
|
|
|
outputString += conf.get().auto_resolvers != conf.get().desired_resolvers
|
|
|
|
|
? format(" (manually set; would be %d)\n", conf.get().desired_resolvers)
|
|
|
|
|
: "\n";
|
|
|
|
|
outputString += format("| transaction-class processes | %16d | %16d |\n",
|
|
|
|
|
conf.get().old_processes_with_transaction,
|
|
|
|
|
conf.get().auto_processes_with_transaction);
|
|
|
|
|
outputString += format("| transaction-class machines | %16d | %16d |\n",
|
|
|
|
|
conf.get().old_machines_with_transaction,
|
|
|
|
|
conf.get().auto_machines_with_transaction);
|
|
|
|
|
outputString += " ------------------------------------------------------------------- \n\n";
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::printf("%s", outputString.c_str());
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (noChanges)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
// TODO: disable completion
|
2021-03-11 02:06:03 +08:00
|
|
|
|
Optional<std::string> line = wait(linenoise->read("Would you like to make these changes? [y/n]> "));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!line.present() || (line.get() != "y" && line.get() != "Y")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2020-09-28 02:52:18 +08:00
|
|
|
|
ConfigurationResult r = wait(makeInterruptable(
|
|
|
|
|
changeConfig(db, std::vector<StringRef>(tokens.begin() + startToken, tokens.end()), conf, force)));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
result = r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
|
|
|
|
// there are various results specific to changeConfig() that we need to report:
|
|
|
|
|
bool ret;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
switch (result) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
|
|
|
|
case ConfigurationResult::CONFLICTING_OPTIONS:
|
|
|
|
|
case ConfigurationResult::UNKNOWN_OPTION:
|
|
|
|
|
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
2018-11-05 11:53:55 +08:00
|
|
|
|
printUsage(LiteralStringRef("configure"));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-09-05 13:16:35 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::INVALID_CONFIGURATION:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::DATABASE_CREATED:
|
|
|
|
|
printf("Database created\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: The database is unavailable\n");
|
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: When usable_regions > 1, all regions with priority >= 0 must be fully replicated "
|
|
|
|
|
"before changing the configuration\n");
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-13 09:40:40 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::REGIONS_CHANGED:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
2018-12-01 10:52:24 +08:00
|
|
|
|
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2019-05-01 00:34:48 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2019-05-01 00:34:48 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::DCID_MISSING:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
|
|
|
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-12-01 10:52:24 +08:00
|
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
case ConfigurationResult::SUCCESS:
|
|
|
|
|
printf("Configuration changed\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
2020-02-04 05:55:40 +08:00
|
|
|
|
case ConfigurationResult::LOCKED_NOT_NEW:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: `only new databases can be configured as locked`\n");
|
2020-02-04 05:55:40 +08:00
|
|
|
|
ret = true;
|
|
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
default:
|
|
|
|
|
ASSERT(false);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
};
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-05 11:53:55 +08:00
|
|
|
|
ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDatabase, bool force) {
|
2018-08-17 08:34:59 +08:00
|
|
|
|
std::string contents(readFileBytes(filePath, 100000));
|
|
|
|
|
json_spirit::mValue config;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!json_spirit::read_string(contents, config)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Invalid JSON\n");
|
2018-08-17 08:34:59 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (config.type() != json_spirit::obj_type) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Configuration file must contain a JSON object\n");
|
2019-08-20 02:28:15 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
2018-08-17 08:34:59 +08:00
|
|
|
|
StatusObject configJSON = config.get_obj();
|
|
|
|
|
|
|
|
|
|
json_spirit::mValue schema;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!json_spirit::read_string(JSONSchemas::clusterConfigurationSchema.toString(), schema)) {
|
2018-09-05 13:16:35 +08:00
|
|
|
|
ASSERT(false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string errorStr;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!schemaMatch(schema.get_obj(), configJSON, errorStr)) {
|
2018-09-05 13:16:35 +08:00
|
|
|
|
printf("%s", errorStr.c_str());
|
2018-08-17 08:34:59 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::string configString;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (isNewDatabase) {
|
2018-09-05 13:16:35 +08:00
|
|
|
|
configString = "new";
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2020-12-27 13:46:20 +08:00
|
|
|
|
for (const auto& [name, value] : configJSON) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!configString.empty()) {
|
2018-08-17 08:34:59 +08:00
|
|
|
|
configString += " ";
|
|
|
|
|
}
|
2020-12-27 13:46:20 +08:00
|
|
|
|
if (value.type() == json_spirit::int_type) {
|
|
|
|
|
configString += name + ":=" + format("%d", value.get_int());
|
|
|
|
|
} else if (value.type() == json_spirit::str_type) {
|
|
|
|
|
configString += value.get_str();
|
|
|
|
|
} else if (value.type() == json_spirit::array_type) {
|
|
|
|
|
configString +=
|
|
|
|
|
name + "=" +
|
|
|
|
|
json_spirit::write_string(json_spirit::mValue(value.get_array()), json_spirit::Output_options::none);
|
2018-08-17 08:34:59 +08:00
|
|
|
|
} else {
|
|
|
|
|
printUsage(LiteralStringRef("fileconfigure"));
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 02:52:18 +08:00
|
|
|
|
ConfigurationResult result = wait(makeInterruptable(changeConfig(db, configString, force)));
|
2018-08-17 08:34:59 +08:00
|
|
|
|
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
|
|
|
|
// there are various results specific to changeConfig() that we need to report:
|
|
|
|
|
bool ret;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
switch (result) {
|
2018-08-17 08:34:59 +08:00
|
|
|
|
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: No options provided\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-09-05 13:16:35 +08:00
|
|
|
|
break;
|
2018-08-17 08:34:59 +08:00
|
|
|
|
case ConfigurationResult::CONFLICTING_OPTIONS:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Conflicting options\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-09-05 13:16:35 +08:00
|
|
|
|
break;
|
2018-08-17 08:34:59 +08:00
|
|
|
|
case ConfigurationResult::UNKNOWN_OPTION:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Unknown option\n"); // This should not be possible because of schema match
|
|
|
|
|
ret = true;
|
2018-09-05 13:16:35 +08:00
|
|
|
|
break;
|
2018-08-17 08:34:59 +08:00
|
|
|
|
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: Must specify both a replication level and a storage engine when creating a new database\n");
|
|
|
|
|
ret = true;
|
2018-09-05 13:16:35 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::INVALID_CONFIGURATION:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-08-17 08:34:59 +08:00
|
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::DATABASE_CREATED:
|
|
|
|
|
printf("Database created\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: The database is unavailable\n");
|
2018-11-05 11:53:55 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
2018-11-05 11:53:55 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: When usable_regions > 1, All regions with priority >= 0 must be fully replicated "
|
|
|
|
|
"before changing the configuration\n");
|
2018-11-05 11:53:55 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
2018-11-05 11:53:55 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-05 11:53:55 +08:00
|
|
|
|
break;
|
2018-11-13 09:40:40 +08:00
|
|
|
|
case ConfigurationResult::REGIONS_CHANGED:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
2018-12-01 10:52:24 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-12-01 10:52:24 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
2018-12-01 10:52:24 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2019-05-01 00:34:48 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
2020-05-28 06:27:47 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2019-05-01 00:34:48 +08:00
|
|
|
|
break;
|
|
|
|
|
case ConfigurationResult::DCID_MISSING:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
2020-05-28 06:27:47 +08:00
|
|
|
|
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2018-11-13 09:40:40 +08:00
|
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
case ConfigurationResult::SUCCESS:
|
|
|
|
|
printf("Configuration changed\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
ASSERT(false);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ret = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
};
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// FIXME: Factor address parsing from coordinators, include, exclude
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<bool> coordinators(Database db, std::vector<StringRef> tokens, bool isClusterTLS) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
state StringRef setName;
|
|
|
|
|
StringRef nameTokenBegin = LiteralStringRef("description=");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto tok = tokens.begin() + 1; tok != tokens.end(); ++tok)
|
2019-06-21 00:29:01 +08:00
|
|
|
|
if (tok->startsWith(nameTokenBegin)) {
|
|
|
|
|
setName = tok->substr(nameTokenBegin.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
std::copy(tok + 1, tokens.end(), tok);
|
|
|
|
|
tokens.resize(tokens.size() - 1);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool automatic = tokens.size() == 2 && tokens[1] == LiteralStringRef("auto");
|
|
|
|
|
|
|
|
|
|
state Reference<IQuorumChange> change;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens.size() == 1 && setName.size()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
change = noQuorumChange();
|
|
|
|
|
} else if (automatic) {
|
|
|
|
|
// Automatic quorum change
|
|
|
|
|
change = autoQuorumChange();
|
|
|
|
|
} else {
|
|
|
|
|
state std::set<NetworkAddress> addresses;
|
|
|
|
|
state std::vector<StringRef>::iterator t;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
2017-05-27 05:51:34 +08:00
|
|
|
|
// SOMEDAY: Check for keywords
|
2021-03-11 02:06:03 +08:00
|
|
|
|
auto const& addr = NetworkAddress::parse(t->toString());
|
|
|
|
|
if (addresses.count(addr)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: passed redundant coordinators: `%s'\n", addr.toString().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
addresses.insert(addr);
|
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
if (e.code() == error_code_connection_string_invalid) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: '%s' is not a valid network endpoint address\n", t->toString().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<NetworkAddress> addressesVec(addresses.begin(), addresses.end());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
change = specifiedQuorumChange(addressesVec);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (setName.size())
|
|
|
|
|
change = nameQuorumChange(setName.toString(), change);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2020-09-28 02:52:18 +08:00
|
|
|
|
CoordinatorsResult r = wait(makeInterruptable(changeQuorum(db, change)));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
|
|
|
|
// there are various results specific to changeConfig() that we need to report:
|
|
|
|
|
bool err = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
switch (r) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
case CoordinatorsResult::INVALID_NETWORK_ADDRESSES:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: The specified network addresses are invalid\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case CoordinatorsResult::SAME_NETWORK_ADDRESSES:
|
|
|
|
|
printf("No change (existing configuration satisfies request)\n");
|
|
|
|
|
err = false;
|
|
|
|
|
break;
|
|
|
|
|
case CoordinatorsResult::NOT_COORDINATORS:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Coordination servers are not running on the specified network addresses\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case CoordinatorsResult::DATABASE_UNREACHABLE:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Database unreachable\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case CoordinatorsResult::BAD_DATABASE_STATE:
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: The database is in an unexpected state from which changing coordinators might be unsafe\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case CoordinatorsResult::COORDINATOR_UNREACHABLE:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: One of the specified coordinators is unreachable\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case CoordinatorsResult::SUCCESS:
|
|
|
|
|
printf("Coordination state changed\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
err = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
case CoordinatorsResult::NOT_ENOUGH_MACHINES:
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Too few fdbserver machines to provide coordination at the current redundancy level\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
ASSERT(false);
|
|
|
|
|
};
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-17 15:27:26 +08:00
|
|
|
|
// Includes the servers that could be IP addresses or localities back to the cluster.
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<bool> include(Database db, std::vector<StringRef> tokens) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::vector<AddressExclusion> addresses;
|
2021-05-19 14:48:04 +08:00
|
|
|
|
state std::vector<std::string> localities;
|
|
|
|
|
state bool failed = false;
|
|
|
|
|
state bool all = false;
|
2019-10-24 02:05:48 +08:00
|
|
|
|
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
|
|
|
|
if (*t == LiteralStringRef("all")) {
|
|
|
|
|
all = true;
|
|
|
|
|
} else if (*t == LiteralStringRef("failed")) {
|
|
|
|
|
failed = true;
|
2021-06-26 04:05:32 +08:00
|
|
|
|
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) && t->toString().find(':') != std::string::npos) {
|
2021-06-24 04:55:17 +08:00
|
|
|
|
// if the token starts with 'locality_' prefix.
|
2021-05-19 14:48:04 +08:00
|
|
|
|
localities.push_back(t->toString());
|
2019-10-24 02:05:48 +08:00
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
auto a = AddressExclusion::parse(*t);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!a.isValid()) {
|
2021-06-05 06:23:04 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: '%s' is neither a valid network endpoint address nor a locality\n",
|
|
|
|
|
t->toString().c_str());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (t->toString().find(":tls") != std::string::npos)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf(" Do not include the `:tls' suffix when naming a process\n");
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
addresses.push_back(a);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2019-10-24 02:05:48 +08:00
|
|
|
|
if (all) {
|
2019-10-25 04:05:28 +08:00
|
|
|
|
std::vector<AddressExclusion> includeAll;
|
2019-10-24 02:12:10 +08:00
|
|
|
|
includeAll.push_back(AddressExclusion());
|
|
|
|
|
wait(makeInterruptable(includeServers(db, includeAll, failed)));
|
2021-06-17 15:27:26 +08:00
|
|
|
|
wait(makeInterruptable(includeLocalities(db, localities, failed, all)));
|
2019-10-24 02:05:48 +08:00
|
|
|
|
} else {
|
2021-05-19 14:48:04 +08:00
|
|
|
|
if (!addresses.empty()) {
|
|
|
|
|
wait(makeInterruptable(includeServers(db, addresses, failed)));
|
|
|
|
|
}
|
|
|
|
|
if (!localities.empty()) {
|
2021-06-10 08:04:05 +08:00
|
|
|
|
// includes the servers that belong to given localities.
|
2021-06-17 15:27:26 +08:00
|
|
|
|
wait(makeInterruptable(includeLocalities(db, localities, failed, all)));
|
2021-05-19 14:48:04 +08:00
|
|
|
|
}
|
2019-10-24 02:05:48 +08:00
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return false;
|
|
|
|
|
};
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<bool> exclude(Database db,
|
|
|
|
|
std::vector<StringRef> tokens,
|
|
|
|
|
Reference<ClusterConnectionFile> ccf,
|
|
|
|
|
Future<Void> warn) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokens.size() <= 1) {
|
2021-05-19 14:48:04 +08:00
|
|
|
|
state Future<vector<AddressExclusion>> fexclAddresses = makeInterruptable(getExcludedServers(db));
|
|
|
|
|
state Future<vector<std::string>> fexclLocalities = makeInterruptable(getExcludedLocalities(db));
|
|
|
|
|
|
|
|
|
|
wait(success(fexclAddresses) && success(fexclLocalities));
|
|
|
|
|
vector<AddressExclusion> exclAddresses = fexclAddresses.get();
|
|
|
|
|
vector<std::string> exclLocalities = fexclLocalities.get();
|
|
|
|
|
|
|
|
|
|
if (!exclAddresses.size() && !exclLocalities.size()) {
|
|
|
|
|
printf("There are currently no servers or localities excluded from the database.\n"
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"To learn how to exclude a server, type `help exclude'.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-05 06:23:04 +08:00
|
|
|
|
printf("There are currently %zu servers or localities being excluded from the database:\n",
|
|
|
|
|
exclAddresses.size() + exclLocalities.size());
|
2021-05-19 14:48:04 +08:00
|
|
|
|
for (const auto& e : exclAddresses)
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf(" %s\n", e.toString().c_str());
|
2021-05-19 14:48:04 +08:00
|
|
|
|
for (const auto& e : exclLocalities)
|
|
|
|
|
printf(" %s\n", e.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
printf("To find out whether it is safe to remove one or more of these\n"
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"servers from the cluster, type `exclude <addresses>'.\n"
|
|
|
|
|
"To return one of these servers to the cluster, type `include <addresses>'.\n");
|
2019-07-17 09:10:26 +08:00
|
|
|
|
|
|
|
|
|
return false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
} else {
|
2020-07-28 02:52:07 +08:00
|
|
|
|
state std::vector<AddressExclusion> exclusionVector;
|
|
|
|
|
state std::set<AddressExclusion> exclusionSet;
|
2021-05-19 14:48:04 +08:00
|
|
|
|
state std::vector<AddressExclusion> exclusionAddresses;
|
|
|
|
|
state std::unordered_set<std::string> exclusionLocalities;
|
|
|
|
|
state std::vector<std::string> noMatchLocalities;
|
|
|
|
|
state bool force = false;
|
2019-07-17 09:10:26 +08:00
|
|
|
|
state bool waitForAllExcluded = true;
|
2019-09-25 01:04:56 +08:00
|
|
|
|
state bool markFailed = false;
|
2021-05-19 14:48:04 +08:00
|
|
|
|
state std::vector<ProcessData> workers = wait(makeInterruptable(getWorkers(db)));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
|
|
|
|
if (*t == LiteralStringRef("FORCE")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
force = true;
|
2019-07-17 09:10:26 +08:00
|
|
|
|
} else if (*t == LiteralStringRef("no_wait")) {
|
|
|
|
|
waitForAllExcluded = false;
|
2019-09-25 01:04:56 +08:00
|
|
|
|
} else if (*t == LiteralStringRef("failed")) {
|
|
|
|
|
markFailed = true;
|
2021-06-26 04:05:32 +08:00
|
|
|
|
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) &&
|
|
|
|
|
t->toString().find(':') != std::string::npos) {
|
2021-05-19 14:48:04 +08:00
|
|
|
|
std::set<AddressExclusion> localityAddresses = getAddressesByLocality(workers, t->toString());
|
2021-06-05 06:23:04 +08:00
|
|
|
|
if (localityAddresses.empty()) {
|
2021-05-19 14:48:04 +08:00
|
|
|
|
noMatchLocalities.push_back(t->toString());
|
|
|
|
|
} else {
|
2021-06-10 08:04:05 +08:00
|
|
|
|
// add all the server ipaddresses that belong to the given localities to the exclusionSet.
|
2021-06-17 15:27:26 +08:00
|
|
|
|
exclusionVector.insert(exclusionVector.end(), localityAddresses.begin(), localityAddresses.end());
|
2021-05-19 14:48:04 +08:00
|
|
|
|
exclusionSet.insert(localityAddresses.begin(), localityAddresses.end());
|
|
|
|
|
}
|
|
|
|
|
exclusionLocalities.insert(t->toString());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
auto a = AddressExclusion::parse(*t);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!a.isValid()) {
|
2021-06-05 06:23:04 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: '%s' is neither a valid network endpoint address nor a locality\n",
|
|
|
|
|
t->toString().c_str());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (t->toString().find(":tls") != std::string::npos)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf(" Do not include the `:tls' suffix when naming a process\n");
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2020-07-28 02:52:07 +08:00
|
|
|
|
exclusionVector.push_back(a);
|
|
|
|
|
exclusionSet.insert(a);
|
2021-05-19 14:48:04 +08:00
|
|
|
|
exclusionAddresses.push_back(a);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-19 14:48:04 +08:00
|
|
|
|
if (exclusionAddresses.empty() && exclusionLocalities.empty()) {
|
2021-06-10 08:04:05 +08:00
|
|
|
|
fprintf(stderr, "ERROR: At least one valid network endpoint address or a locality is not provided\n");
|
2021-05-19 14:48:04 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!force) {
|
2019-09-25 01:04:56 +08:00
|
|
|
|
if (markFailed) {
|
|
|
|
|
state bool safe;
|
|
|
|
|
try {
|
2020-07-28 02:52:07 +08:00
|
|
|
|
bool _safe = wait(makeInterruptable(checkSafeExclusions(db, exclusionVector)));
|
2019-09-25 01:04:56 +08:00
|
|
|
|
safe = _safe;
|
|
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (e.code() == error_code_actor_cancelled)
|
|
|
|
|
throw;
|
2019-09-25 01:04:56 +08:00
|
|
|
|
TraceEvent("CheckSafeExclusionsError").error(e);
|
|
|
|
|
safe = false;
|
|
|
|
|
}
|
2019-08-09 07:30:05 +08:00
|
|
|
|
if (!safe) {
|
|
|
|
|
std::string errorStr =
|
|
|
|
|
"ERROR: It is unsafe to exclude the specified servers at this time.\n"
|
2019-10-17 02:30:20 +08:00
|
|
|
|
"Please check that this exclusion does not bring down an entire storage team.\n"
|
2019-08-17 06:13:53 +08:00
|
|
|
|
"Please also ensure that the exclusion will keep a majority of coordinators alive.\n"
|
2019-10-19 05:52:07 +08:00
|
|
|
|
"You may add more storage processes or coordinators to make the operation safe.\n"
|
2020-05-28 06:27:47 +08:00
|
|
|
|
"Type `exclude FORCE failed <ADDRESS...>' to exclude without performing safety checks.\n";
|
2019-08-09 07:30:05 +08:00
|
|
|
|
printf("%s", errorStr.c_str());
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
StatusObject status = wait(makeInterruptable(StatusClient::statusFetcher(db)));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
state std::string errorString =
|
|
|
|
|
"ERROR: Could not calculate the impact of this exclude on the total free space in the cluster.\n"
|
|
|
|
|
"Please try the exclude again in 30 seconds.\n"
|
|
|
|
|
"Type `exclude FORCE <ADDRESS...>' to exclude without checking free space.\n";
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
StatusObjectReader statusObj(status);
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader statusObjCluster;
|
|
|
|
|
if (!statusObj.get("cluster", statusObjCluster)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "%s", errorString.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader processesMap;
|
|
|
|
|
if (!statusObjCluster.get("processes", processesMap)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "%s", errorString.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
state int ssTotalCount = 0;
|
|
|
|
|
state int ssExcludedCount = 0;
|
|
|
|
|
state double worstFreeSpaceRatio = 1.0;
|
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto proc : processesMap.obj()) {
|
2017-07-20 06:50:15 +08:00
|
|
|
|
bool storageServer = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusArray rolesArray = proc.second.get_obj()["roles"].get_array();
|
2017-07-20 06:50:15 +08:00
|
|
|
|
for (StatusObjectReader role : rolesArray) {
|
|
|
|
|
if (role["role"].get_str() == "storage") {
|
|
|
|
|
storageServer = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Skip non-storage servers in free space calculation
|
|
|
|
|
if (!storageServer)
|
|
|
|
|
continue;
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader process(proc.second);
|
|
|
|
|
std::string addrStr;
|
|
|
|
|
if (!process.get("address", addrStr)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "%s", errorString.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
NetworkAddress addr = NetworkAddress::parse(addrStr);
|
2020-07-28 02:52:07 +08:00
|
|
|
|
bool excluded =
|
|
|
|
|
(process.has("excluded") && process.last().get_bool()) || addressExcluded(exclusionSet, addr);
|
2017-07-20 06:50:15 +08:00
|
|
|
|
ssTotalCount++;
|
|
|
|
|
if (excluded)
|
|
|
|
|
ssExcludedCount++;
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!excluded) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
StatusObjectReader disk;
|
|
|
|
|
if (!process.get("disk", disk)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "%s", errorString.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
int64_t total_bytes;
|
|
|
|
|
if (!disk.get("total_bytes", total_bytes)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "%s", errorString.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int64_t free_bytes;
|
|
|
|
|
if (!disk.get("free_bytes", free_bytes)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "%s", errorString.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
worstFreeSpaceRatio = std::min(worstFreeSpaceRatio, double(free_bytes) / total_bytes);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (...) // std::exception
|
2017-05-26 04:48:44 +08:00
|
|
|
|
{
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "%s", errorString.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (ssExcludedCount == ssTotalCount ||
|
|
|
|
|
(1 - worstFreeSpaceRatio) * ssTotalCount / (ssTotalCount - ssExcludedCount) > 0.9) {
|
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: This exclude may cause the total free space in the cluster to drop below 10%%.\n"
|
|
|
|
|
"Type `exclude FORCE <ADDRESS...>' to exclude without checking free space.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-19 14:48:04 +08:00
|
|
|
|
if (!exclusionAddresses.empty()) {
|
|
|
|
|
wait(makeInterruptable(excludeServers(db, exclusionAddresses, markFailed)));
|
|
|
|
|
}
|
|
|
|
|
if (!exclusionLocalities.empty()) {
|
2021-06-17 15:27:26 +08:00
|
|
|
|
wait(makeInterruptable(excludeLocalities(db, exclusionLocalities, markFailed)));
|
2021-05-19 14:48:04 +08:00
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2019-08-16 00:53:00 +08:00
|
|
|
|
if (waitForAllExcluded) {
|
2019-07-17 09:10:26 +08:00
|
|
|
|
printf("Waiting for state to be removed from all excluded servers. This may take a while.\n");
|
|
|
|
|
printf("(Interrupting this wait with CTRL+C will not cancel the data movement.)\n");
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (warn.isValid())
|
2017-05-26 04:48:44 +08:00
|
|
|
|
warn.cancel();
|
|
|
|
|
|
2019-07-17 09:10:26 +08:00
|
|
|
|
state std::set<NetworkAddress> notExcludedServers =
|
2020-07-28 02:52:07 +08:00
|
|
|
|
wait(makeInterruptable(checkForExcludingServers(db, exclusionVector, waitForAllExcluded)));
|
2019-02-27 10:04:03 +08:00
|
|
|
|
std::map<IPAddress, std::set<uint16_t>> workerPorts;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto addr : workers)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
workerPorts[addr.address.ip].insert(addr.address.port);
|
|
|
|
|
|
|
|
|
|
// Print a list of all excluded addresses that don't have a corresponding worker
|
2020-03-27 04:15:47 +08:00
|
|
|
|
std::set<AddressExclusion> absentExclusions;
|
2020-07-28 02:52:07 +08:00
|
|
|
|
for (const auto& addr : exclusionVector) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
auto worker = workerPorts.find(addr.ip);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (worker == workerPorts.end())
|
2020-03-27 04:15:47 +08:00
|
|
|
|
absentExclusions.insert(addr);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
else if (addr.port > 0 && worker->second.count(addr.port) == 0)
|
2020-03-27 04:15:47 +08:00
|
|
|
|
absentExclusions.insert(addr);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2020-07-28 02:52:07 +08:00
|
|
|
|
for (const auto& exclusion : exclusionVector) {
|
|
|
|
|
if (absentExclusions.find(exclusion) != absentExclusions.end()) {
|
2020-08-03 01:26:35 +08:00
|
|
|
|
if (exclusion.port == 0) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
" %s(Whole machine) ---- WARNING: Missing from cluster!Be sure that you excluded the "
|
|
|
|
|
"correct machines before removing them from the cluster!\n",
|
|
|
|
|
exclusion.ip.toString().c_str());
|
2020-08-03 01:26:35 +08:00
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
" %s ---- WARNING: Missing from cluster! Be sure that you excluded the correct processes "
|
|
|
|
|
"before removing them from the cluster!\n",
|
|
|
|
|
exclusion.toString().c_str());
|
2020-08-03 01:26:35 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (std::any_of(notExcludedServers.begin(), notExcludedServers.end(), [&](const NetworkAddress& a) {
|
|
|
|
|
return addressExcluded({ exclusion }, a);
|
|
|
|
|
})) {
|
2020-08-03 01:26:35 +08:00
|
|
|
|
if (exclusion.port == 0) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
" %s(Whole machine) ---- WARNING: Exclusion in progress! It is not safe to remove this "
|
|
|
|
|
"machine from the cluster\n",
|
|
|
|
|
exclusion.ip.toString().c_str());
|
2020-08-03 01:26:35 +08:00
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
" %s ---- WARNING: Exclusion in progress! It is not safe to remove this process from the "
|
|
|
|
|
"cluster\n",
|
|
|
|
|
exclusion.toString().c_str());
|
2020-08-03 01:26:35 +08:00
|
|
|
|
}
|
2020-03-27 04:15:47 +08:00
|
|
|
|
} else {
|
2020-08-03 01:26:35 +08:00
|
|
|
|
if (exclusion.port == 0) {
|
2020-03-27 04:15:47 +08:00
|
|
|
|
printf(" %s(Whole machine) ---- Successfully excluded. It is now safe to remove this machine "
|
|
|
|
|
"from the cluster.\n",
|
2020-07-28 02:52:07 +08:00
|
|
|
|
exclusion.ip.toString().c_str());
|
2020-08-03 01:26:35 +08:00
|
|
|
|
} else {
|
2020-03-27 04:15:47 +08:00
|
|
|
|
printf(
|
|
|
|
|
" %s ---- Successfully excluded. It is now safe to remove this process from the cluster.\n",
|
2020-07-28 02:52:07 +08:00
|
|
|
|
exclusion.toString().c_str());
|
2020-08-03 01:26:35 +08:00
|
|
|
|
}
|
2019-07-17 09:10:26 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-06-05 06:23:04 +08:00
|
|
|
|
for (const auto& locality : noMatchLocalities) {
|
|
|
|
|
fprintf(
|
|
|
|
|
stderr,
|
|
|
|
|
" %s ---- WARNING: Currently no servers found with this locality match! Be sure that you excluded "
|
|
|
|
|
"the correct locality.\n",
|
|
|
|
|
locality.c_str());
|
2021-05-19 14:48:04 +08:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
bool foundCoordinator = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
auto ccs = ClusterConnectionFile(ccf->getFilename()).getConnectionString();
|
2020-12-27 13:46:20 +08:00
|
|
|
|
for (const auto& c : ccs.coordinators()) {
|
2020-07-28 02:52:07 +08:00
|
|
|
|
if (std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip, c.port)) ||
|
|
|
|
|
std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip))) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "WARNING: %s is a coordinator!\n", c.toString().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
foundCoordinator = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (foundCoordinator)
|
|
|
|
|
printf("Type `help coordinators' for information on how to change the\n"
|
2021-03-11 02:06:03 +08:00
|
|
|
|
"cluster's coordination servers before removing them.\n");
|
2019-07-17 09:10:26 +08:00
|
|
|
|
|
|
|
|
|
return false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<bool> createSnapshot(Database db, std::vector<StringRef> tokens) {
|
2019-08-29 01:52:56 +08:00
|
|
|
|
state Standalone<StringRef> snapCmd;
|
2019-09-14 02:50:36 +08:00
|
|
|
|
state UID snapUID = deterministicRandom()->randomUniqueID();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (int i = 1; i < tokens.size(); i++) {
|
2019-08-29 01:52:56 +08:00
|
|
|
|
snapCmd = snapCmd.withSuffix(tokens[i]);
|
|
|
|
|
if (i != tokens.size() - 1) {
|
|
|
|
|
snapCmd = snapCmd.withSuffix(LiteralStringRef(" "));
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-07-13 01:56:27 +08:00
|
|
|
|
try {
|
2019-09-14 02:50:36 +08:00
|
|
|
|
wait(makeInterruptable(mgmtSnapCreate(db, snapCmd, snapUID)));
|
2019-07-27 06:01:05 +08:00
|
|
|
|
printf("Snapshot command succeeded with UID %s\n", snapUID.toString().c_str());
|
2019-07-13 01:56:27 +08:00
|
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"Snapshot command failed %d (%s)."
|
|
|
|
|
" Please cleanup any instance level snapshots created with UID %s.\n",
|
|
|
|
|
e.code(),
|
|
|
|
|
e.what(),
|
|
|
|
|
snapUID.toString().c_str());
|
2019-07-13 01:56:27 +08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
2019-02-28 07:40:33 +08:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
Reference<ReadYourWritesTransaction> getTransaction(Database db,
|
|
|
|
|
Reference<ReadYourWritesTransaction>& tr,
|
|
|
|
|
FdbOptions* options,
|
|
|
|
|
bool intrans) {
|
|
|
|
|
if (!tr || !intrans) {
|
2020-11-07 15:50:55 +08:00
|
|
|
|
tr = makeReference<ReadYourWritesTransaction>(db);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
options->apply(tr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return tr;
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-11 03:29:46 +08:00
|
|
|
|
// TODO: Update the function to get rid of Database and ReadYourWritesTransaction after refactoring
|
|
|
|
|
// The original ReadYourWritesTransaciton handle "tr" is needed as some commands can be called inside a
|
|
|
|
|
// transaction and "tr" holds the pointer to the ongoing transaction object. As it's not easy to get ride of "tr" in
|
|
|
|
|
// one shot and we are refactoring the code to use Reference<ITransaction> (tr2), we need to let "tr2" point to the same
|
|
|
|
|
// underlying transaction like "tr". Thus everytime we need to use "tr2", we first update "tr" and let "tr2" points to
|
|
|
|
|
// "tr1". "tr2" is always having the same lifetime as "tr1"
|
2021-04-23 16:32:30 +08:00
|
|
|
|
Reference<ITransaction> getTransaction(Database db,
|
|
|
|
|
Reference<ReadYourWritesTransaction>& tr,
|
|
|
|
|
Reference<ITransaction>& tr2,
|
|
|
|
|
FdbOptions* options,
|
|
|
|
|
bool intrans) {
|
2021-05-11 03:29:46 +08:00
|
|
|
|
// Update "tr" to point to a brand new transaction object when it's not initialized or "intrans" flag is "false",
|
|
|
|
|
// which indicates we need a new transaction object
|
2021-04-23 16:32:30 +08:00
|
|
|
|
if (!tr || !intrans) {
|
|
|
|
|
tr = makeReference<ReadYourWritesTransaction>(db);
|
|
|
|
|
options->apply(tr);
|
|
|
|
|
}
|
|
|
|
|
tr2 = Reference<ITransaction>(new ThreadSafeTransaction(tr.getPtr()));
|
|
|
|
|
return tr2;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
std::string newCompletion(const char* base, const char* name) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return format("%s%s ", base, name);
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-15 07:04:03 +08:00
|
|
|
|
void compGenerator(const char* text, bool help, std::vector<std::string>& lc) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::map<std::string, CommandHelp>::const_iterator iter;
|
|
|
|
|
int len = strlen(text);
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
const char* helpExtra[] = { "escaping", "options", nullptr };
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
const char** he = helpExtra;
|
|
|
|
|
|
|
|
|
|
for (auto iter = helpMap.begin(); iter != helpMap.end(); ++iter) {
|
|
|
|
|
const char* name = (*iter).first.c_str();
|
|
|
|
|
if (!strncmp(name, text, len)) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
lc.push_back(newCompletion(help ? "help " : "", name));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (help) {
|
|
|
|
|
while (*he) {
|
|
|
|
|
const char* name = *he;
|
|
|
|
|
he++;
|
|
|
|
|
if (!strncmp(name, text, len))
|
2021-03-11 02:06:03 +08:00
|
|
|
|
lc.push_back(newCompletion("help ", name));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-15 07:04:03 +08:00
|
|
|
|
void cmdGenerator(const char* text, std::vector<std::string>& lc) {
|
|
|
|
|
compGenerator(text, false, lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-15 07:04:03 +08:00
|
|
|
|
void helpGenerator(const char* text, std::vector<std::string>& lc) {
|
|
|
|
|
compGenerator(text, true, lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void optionGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
int len = strlen(text);
|
|
|
|
|
|
|
|
|
|
for (auto iter = validOptions.begin(); iter != validOptions.end(); ++iter) {
|
|
|
|
|
const char* name = (*iter).c_str();
|
|
|
|
|
if (!strncmp(name, text, len)) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
lc.push_back(newCompletion(line, name));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void arrayGenerator(const char* text, const char* line, const char** options, std::vector<std::string>& lc) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
const char** iter = options;
|
|
|
|
|
int len = strlen(text);
|
|
|
|
|
|
|
|
|
|
while (*iter) {
|
|
|
|
|
const char* name = *iter;
|
|
|
|
|
iter++;
|
|
|
|
|
if (!strncmp(name, text, len)) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
lc.push_back(newCompletion(line, name));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void onOffGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
|
|
|
|
const char* opts[] = { "on", "off", nullptr };
|
2020-05-15 07:04:03 +08:00
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void configureGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
2020-09-11 08:44:15 +08:00
|
|
|
|
const char* opts[] = { "new",
|
|
|
|
|
"single",
|
|
|
|
|
"double",
|
|
|
|
|
"triple",
|
|
|
|
|
"three_data_hall",
|
|
|
|
|
"three_datacenter",
|
|
|
|
|
"ssd",
|
|
|
|
|
"ssd-1",
|
|
|
|
|
"ssd-2",
|
|
|
|
|
"memory",
|
|
|
|
|
"memory-1",
|
|
|
|
|
"memory-2",
|
|
|
|
|
"memory-radixtree-beta",
|
|
|
|
|
"commit_proxies=",
|
|
|
|
|
"grv_proxies=",
|
|
|
|
|
"logs=",
|
|
|
|
|
"resolvers=",
|
2021-05-11 08:05:08 +08:00
|
|
|
|
"perpetual_storage_wiggle=",
|
2020-09-11 08:44:15 +08:00
|
|
|
|
nullptr };
|
2020-05-15 07:04:03 +08:00
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void statusGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
|
|
|
|
const char* opts[] = { "minimal", "details", "json", nullptr };
|
2020-05-15 07:04:03 +08:00
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void killGenerator(const char* text, const char* line, std::vector<std::string>& lc) {
|
|
|
|
|
const char* opts[] = { "all", "list", nullptr };
|
2020-05-15 07:04:03 +08:00
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
void throttleGenerator(const char* text,
|
|
|
|
|
const char* line,
|
|
|
|
|
std::vector<std::string>& lc,
|
|
|
|
|
std::vector<StringRef> const& tokens) {
|
|
|
|
|
if (tokens.size() == 1) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
const char* opts[] = { "on tag", "off", "enable auto", "disable auto", "list", nullptr };
|
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "on")) {
|
|
|
|
|
if (tokens.size() == 2) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
const char* opts[] = { "tag", nullptr };
|
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() == 6) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
const char* opts[] = { "default", "immediate", "batch", nullptr };
|
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "off") && !tokencmp(tokens[tokens.size() - 1], "tag")) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
const char* opts[] = { "all", "auto", "manual", "tag", "default", "immediate", "batch", nullptr };
|
2020-05-15 07:04:03 +08:00
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() == 2 && (tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable"))) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
const char* opts[] = { "auto", nullptr };
|
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "list")) {
|
|
|
|
|
if (tokens.size() == 2) {
|
2020-08-20 14:26:31 +08:00
|
|
|
|
const char* opts[] = { "throttled", "recommended", "all", nullptr };
|
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() == 3) {
|
|
|
|
|
const char* opts[] = { "LIMITS", nullptr };
|
2020-08-20 14:26:31 +08:00
|
|
|
|
arrayGenerator(text, line, opts, lc);
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-05-15 07:04:03 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void fdbcliCompCmd(std::string const& text, std::vector<std::string>& lc) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
bool err, partial;
|
|
|
|
|
std::string whole_line = text;
|
|
|
|
|
auto parsed = parseLine(whole_line, err, partial);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (err || partial) // If there was an error, or we are partially through a quoted sequence
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
auto tokens = parsed.back();
|
|
|
|
|
int count = tokens.size();
|
|
|
|
|
|
|
|
|
|
// for(int i = 0; i < count; i++) {
|
|
|
|
|
// printf("Token (%d): `%s'\n", i, tokens[i].toString().c_str());
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
std::string ntext = "";
|
|
|
|
|
std::string base_input = text;
|
|
|
|
|
|
|
|
|
|
// If there is a token and the input does not end in a space
|
|
|
|
|
if (count && text.size() > 0 && text[text.size() - 1] != ' ') {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
count--; // Ignore the last token for purposes of later code
|
2017-05-26 04:48:44 +08:00
|
|
|
|
ntext = tokens.back().toString();
|
|
|
|
|
base_input = whole_line.substr(0, whole_line.rfind(ntext));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// printf("final text (%d tokens): `%s' & `%s'\n", count, base_input.c_str(), ntext.c_str());
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!count) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
cmdGenerator(ntext.c_str(), lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "help") && count == 1) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
helpGenerator(ntext.c_str(), lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "option")) {
|
|
|
|
|
if (count == 1)
|
2020-05-15 07:04:03 +08:00
|
|
|
|
onOffGenerator(ntext.c_str(), base_input.c_str(), lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (count == 2)
|
2020-05-15 07:04:03 +08:00
|
|
|
|
optionGenerator(ntext.c_str(), base_input.c_str(), lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "writemode") && count == 1) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
onOffGenerator(ntext.c_str(), base_input.c_str(), lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "configure")) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
configureGenerator(ntext.c_str(), base_input.c_str(), lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "status") && count == 1) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
statusGenerator(ntext.c_str(), base_input.c_str(), lc);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "kill") && count == 1) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
killGenerator(ntext.c_str(), base_input.c_str(), lc);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "throttle")) {
|
|
|
|
|
throttleGenerator(ntext.c_str(), base_input.c_str(), lc, tokens);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-15 07:04:03 +08:00
|
|
|
|
std::vector<const char*> throttleHintGenerator(std::vector<StringRef> const& tokens, bool inArgument) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens.size() == 1) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
return { "<on|off|enable auto|disable auto|list>", "[ARGS]" };
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokencmp(tokens[1], "on")) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
std::vector<const char*> opts = { "tag", "<TAG>", "[RATE]", "[DURATION]", "[default|immediate|batch]" };
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens.size() == 2) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
return opts;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (((tokens.size() == 3 && inArgument) || tokencmp(tokens[2], "tag")) && tokens.size() < 7) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
return std::vector<const char*>(opts.begin() + tokens.size() - 2, opts.end());
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokencmp(tokens[1], "off")) {
|
|
|
|
|
if (tokencmp(tokens[tokens.size() - 1], "tag")) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
return { "<TAG>" };
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
bool hasType = false;
|
|
|
|
|
bool hasTag = false;
|
|
|
|
|
bool hasPriority = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (int i = 2; i < tokens.size(); ++i) {
|
|
|
|
|
if (tokencmp(tokens[i], "all") || tokencmp(tokens[i], "auto") || tokencmp(tokens[i], "manual")) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
hasType = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokencmp(tokens[i], "default") || tokencmp(tokens[i], "immediate") ||
|
|
|
|
|
tokencmp(tokens[i], "batch")) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
hasPriority = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokencmp(tokens[i], "tag")) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
hasTag = true;
|
|
|
|
|
++i;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<const char*> options;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!hasType) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
options.push_back("[all|auto|manual]");
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!hasTag) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
options.push_back("[tag <TAG>]");
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!hasPriority) {
|
2020-05-16 03:47:55 +08:00
|
|
|
|
options.push_back("[default|immediate|batch]");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return options;
|
2020-05-15 07:04:03 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if ((tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable")) && tokens.size() == 2) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
return { "auto" };
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() >= 2 && tokencmp(tokens[1], "list")) {
|
|
|
|
|
if (tokens.size() == 2) {
|
2020-08-22 01:49:21 +08:00
|
|
|
|
return { "[throttled|recommended|all]", "[LIMITS]" };
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() == 3 && (tokencmp(tokens[2], "throttled") || tokencmp(tokens[2], "recommended") ||
|
|
|
|
|
tokencmp(tokens[2], "all"))) {
|
|
|
|
|
return { "[LIMITS]" };
|
2020-08-22 01:49:21 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() == 2 && inArgument) {
|
2020-05-15 07:04:03 +08:00
|
|
|
|
return { "[ARGS]" };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return std::vector<const char*>();
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
void LogCommand(std::string line, UID randomID, std::string errMsg) {
|
|
|
|
|
printf("%s\n", errMsg.c_str());
|
2019-04-04 09:01:35 +08:00
|
|
|
|
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line).detail("Error", errMsg);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct CLIOptions {
|
|
|
|
|
std::string program_name;
|
2020-02-25 08:08:04 +08:00
|
|
|
|
int exit_code = -1;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
std::string commandLine;
|
|
|
|
|
|
|
|
|
|
std::string clusterFile;
|
2020-02-25 08:08:04 +08:00
|
|
|
|
bool trace = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string traceDir;
|
2019-03-23 01:35:05 +08:00
|
|
|
|
std::string traceFormat;
|
2020-02-25 08:08:04 +08:00
|
|
|
|
int exit_timeout = 0;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
Optional<std::string> exec;
|
2020-02-25 08:08:04 +08:00
|
|
|
|
bool initialStatusCheck = true;
|
|
|
|
|
bool cliHints = true;
|
2020-03-14 06:46:03 +08:00
|
|
|
|
bool debugTLS = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
std::string tlsCertPath;
|
|
|
|
|
std::string tlsKeyPath;
|
|
|
|
|
std::string tlsVerifyPeers;
|
2018-05-09 07:28:13 +08:00
|
|
|
|
std::string tlsCAPath;
|
2018-05-09 11:46:31 +08:00
|
|
|
|
std::string tlsPassword;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2020-03-05 03:15:32 +08:00
|
|
|
|
std::vector<std::pair<std::string, std::string>> knobs;
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
CLIOptions(int argc, char* argv[]) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
program_name = argv[0];
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (int a = 0; a < argc; a++) {
|
|
|
|
|
if (a)
|
|
|
|
|
commandLine += ' ';
|
2017-05-26 04:48:44 +08:00
|
|
|
|
commandLine += argv[a];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CSimpleOpt args(argc, argv, g_rgOptions);
|
|
|
|
|
|
|
|
|
|
while (args.Next()) {
|
|
|
|
|
int ec = processArg(args);
|
|
|
|
|
if (ec != -1) {
|
|
|
|
|
exit_code = ec;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (exit_timeout && !exec.present()) {
|
|
|
|
|
fprintf(stderr, "ERROR: --timeout may only be specified with --exec\n");
|
2020-03-05 03:15:32 +08:00
|
|
|
|
exit_code = FDB_EXIT_ERROR;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
2020-03-05 03:15:32 +08:00
|
|
|
|
|
2021-06-10 13:33:00 +08:00
|
|
|
|
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
|
2021-06-03 01:04:46 +08:00
|
|
|
|
for (const auto& [knobName, knobValueString] : knobs) {
|
2020-03-05 03:15:32 +08:00
|
|
|
|
try {
|
2021-06-10 13:33:00 +08:00
|
|
|
|
auto knobValue = g_knobs.parseKnobValue(knobName, knobValueString);
|
|
|
|
|
g_knobs.setKnob(knobName, knobValue);
|
2020-03-05 03:15:32 +08:00
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
if (e.code() == error_code_invalid_option_value) {
|
2021-06-03 01:04:46 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"WARNING: Invalid value '%s' for knob option '%s'\n",
|
|
|
|
|
knobValueString.c_str(),
|
|
|
|
|
knobName.c_str());
|
2020-12-27 13:46:20 +08:00
|
|
|
|
TraceEvent(SevWarnAlways, "InvalidKnobValue")
|
2021-06-03 01:04:46 +08:00
|
|
|
|
.detail("Knob", printable(knobName))
|
|
|
|
|
.detail("Value", printable(knobValueString));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2021-06-03 01:04:46 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", knobName.c_str(), e.what());
|
2020-12-27 13:46:20 +08:00
|
|
|
|
TraceEvent(SevError, "FailedToSetKnob")
|
2021-06-03 01:04:46 +08:00
|
|
|
|
.detail("Knob", printable(knobName))
|
|
|
|
|
.detail("Value", printable(knobValueString))
|
2020-12-27 13:46:20 +08:00
|
|
|
|
.error(e);
|
2020-03-05 03:15:32 +08:00
|
|
|
|
exit_code = FDB_EXIT_ERROR;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-04-02 04:59:06 +08:00
|
|
|
|
|
|
|
|
|
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
|
2021-07-17 15:11:40 +08:00
|
|
|
|
g_knobs.initialize(Randomize::False, IsSimulated::False);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int processArg(CSimpleOpt& args) {
|
|
|
|
|
if (args.LastError() != SO_SUCCESS) {
|
|
|
|
|
printProgramUsage(program_name.c_str());
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (args.OptionId()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
case OPT_CONNFILE:
|
|
|
|
|
clusterFile = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case OPT_TRACE:
|
|
|
|
|
trace = true;
|
|
|
|
|
break;
|
|
|
|
|
case OPT_TRACE_DIR:
|
|
|
|
|
traceDir = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case OPT_TIMEOUT: {
|
|
|
|
|
char* endptr;
|
|
|
|
|
exit_timeout = strtoul((char*)args.OptionArg(), &endptr, 10);
|
|
|
|
|
if (*endptr != '\0') {
|
|
|
|
|
fprintf(stderr, "ERROR: invalid timeout %s\n", args.OptionArg());
|
|
|
|
|
return 1;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case OPT_EXEC:
|
|
|
|
|
exec = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case OPT_NO_STATUS:
|
|
|
|
|
initialStatusCheck = false;
|
|
|
|
|
break;
|
|
|
|
|
case OPT_NO_HINTS:
|
|
|
|
|
cliHints = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2018-06-27 03:08:32 +08:00
|
|
|
|
#ifndef TLS_DISABLED
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// TLS Options
|
|
|
|
|
case TLSConfig::OPT_TLS_PLUGIN:
|
|
|
|
|
args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case TLSConfig::OPT_TLS_CERTIFICATES:
|
|
|
|
|
tlsCertPath = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case TLSConfig::OPT_TLS_CA_FILE:
|
|
|
|
|
tlsCAPath = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case TLSConfig::OPT_TLS_KEY:
|
|
|
|
|
tlsKeyPath = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case TLSConfig::OPT_TLS_PASSWORD:
|
|
|
|
|
tlsPassword = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case TLSConfig::OPT_TLS_VERIFY_PEERS:
|
|
|
|
|
tlsVerifyPeers = args.OptionArg();
|
|
|
|
|
break;
|
2018-06-21 00:21:23 +08:00
|
|
|
|
#endif
|
2021-03-11 02:06:03 +08:00
|
|
|
|
case OPT_HELP:
|
|
|
|
|
printProgramUsage(program_name.c_str());
|
|
|
|
|
return 0;
|
|
|
|
|
case OPT_STATUS_FROM_JSON:
|
|
|
|
|
return printStatusFromJSON(args.OptionArg());
|
|
|
|
|
case OPT_TRACE_FORMAT:
|
|
|
|
|
if (!validateTraceFormat(args.OptionArg())) {
|
|
|
|
|
fprintf(stderr, "WARNING: Unrecognized trace format `%s'\n", args.OptionArg());
|
2020-03-05 03:15:32 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
traceFormat = args.OptionArg();
|
|
|
|
|
break;
|
|
|
|
|
case OPT_KNOB: {
|
|
|
|
|
std::string syn = args.OptionSyntax();
|
|
|
|
|
if (!StringRef(syn).startsWith(LiteralStringRef("--knob_"))) {
|
|
|
|
|
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", syn.c_str());
|
|
|
|
|
return FDB_EXIT_ERROR;
|
|
|
|
|
}
|
|
|
|
|
syn = syn.substr(7);
|
2021-05-11 07:32:02 +08:00
|
|
|
|
knobs.emplace_back(syn, args.OptionArg());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case OPT_DEBUG_TLS:
|
|
|
|
|
debugTLS = true;
|
|
|
|
|
break;
|
|
|
|
|
case OPT_VERSION:
|
|
|
|
|
printVersion();
|
|
|
|
|
return FDB_EXIT_SUCCESS;
|
|
|
|
|
case OPT_BUILD_FLAGS:
|
|
|
|
|
printBuildInformation();
|
|
|
|
|
return FDB_EXIT_SUCCESS;
|
2020-03-14 06:46:03 +08:00
|
|
|
|
}
|
|
|
|
|
return -1;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
ACTOR template <class T>
|
2021-03-11 02:06:03 +08:00
|
|
|
|
Future<T> stopNetworkAfter(Future<T> what) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
T t = wait(what);
|
2021-08-19 09:09:02 +08:00
|
|
|
|
API->stopNetwork();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return t;
|
|
|
|
|
} catch (...) {
|
2021-08-19 09:09:02 +08:00
|
|
|
|
API->stopNetwork();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ACTOR Future<Void> addInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
|
|
|
|
Reference<FlowLock> connectLock,
|
|
|
|
|
KeyValue kv) {
|
2020-01-04 08:10:44 +08:00
|
|
|
|
wait(connectLock->take());
|
|
|
|
|
state FlowLock::Releaser releaser(*connectLock);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
state ClientWorkerInterface workerInterf =
|
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(kv.value, IncludeVersion());
|
2020-01-04 08:10:44 +08:00
|
|
|
|
state ClientLeaderRegInterface leaderInterf(workerInterf.address());
|
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
when(Optional<LeaderInfo> rep =
|
|
|
|
|
wait(brokenPromiseToNever(leaderInterf.getLeader.getReply(GetLeaderRequest())))) {
|
2020-05-07 00:45:21 +08:00
|
|
|
|
StringRef ip_port =
|
|
|
|
|
(kv.key.endsWith(LiteralStringRef(":tls")) ? kv.key.removeSuffix(LiteralStringRef(":tls")) : kv.key)
|
|
|
|
|
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
2020-01-04 08:10:44 +08:00
|
|
|
|
(*address_interface)[ip_port] = std::make_pair(kv.value, leaderInterf);
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (workerInterf.reboot.getEndpoint().addresses.secondaryAddress.present()) {
|
2020-05-07 00:45:21 +08:00
|
|
|
|
Key full_ip_port2 =
|
|
|
|
|
StringRef(workerInterf.reboot.getEndpoint().addresses.secondaryAddress.get().toString());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
StringRef ip_port2 = full_ip_port2.endsWith(LiteralStringRef(":tls"))
|
|
|
|
|
? full_ip_port2.removeSuffix(LiteralStringRef(":tls"))
|
|
|
|
|
: full_ip_port2;
|
2020-01-04 08:10:44 +08:00
|
|
|
|
(*address_interface)[ip_port2] = std::make_pair(kv.value, leaderInterf);
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
when(wait(delay(CLIENT_KNOBS->CLI_CONNECT_TIMEOUT))) {}
|
2020-01-04 08:10:44 +08:00
|
|
|
|
}
|
|
|
|
|
return Void();
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|
|
|
|
state LineNoise& linenoise = *plinenoise;
|
|
|
|
|
state bool intrans = false;
|
|
|
|
|
|
|
|
|
|
state Database db;
|
|
|
|
|
state Reference<ReadYourWritesTransaction> tr;
|
2021-04-23 16:32:30 +08:00
|
|
|
|
// TODO: refactoring work, will replace db, tr when we have all commands through the general fdb interface
|
2021-03-25 00:33:20 +08:00
|
|
|
|
state Reference<IDatabase> db2;
|
2021-04-23 16:32:30 +08:00
|
|
|
|
state Reference<ITransaction> tr2;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
state bool writeMode = false;
|
|
|
|
|
|
|
|
|
|
state std::string clusterConnectString;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
state std::map<Key, std::pair<Value, ClientLeaderRegInterface>> address_interface;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
state FdbOptions globalOptions;
|
|
|
|
|
state FdbOptions activeOptions;
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
state FdbOptions* options = &globalOptions;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2017-05-27 05:51:34 +08:00
|
|
|
|
state Reference<ClusterConnectionFile> ccf;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
state std::pair<std::string, bool> resolvedClusterFile =
|
|
|
|
|
ClusterConnectionFile::lookupClusterFileName(opt.clusterFile);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
2020-11-07 15:50:55 +08:00
|
|
|
|
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Ordinarily, this is done when the network is run. However, network thread should be set before TraceEvents are
|
|
|
|
|
// logged. This thread will eventually run the network, so call it now.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
TraceEvent::setNetworkThread();
|
|
|
|
|
|
|
|
|
|
try {
|
2021-07-17 15:11:40 +08:00
|
|
|
|
db = Database::createDatabase(ccf, -1, IsInternal::False);
|
2018-09-22 06:58:14 +08:00
|
|
|
|
if (!opt.exec.present()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("Using cluster file `%s'.\n", ccf->getFilename().c_str());
|
2018-09-22 06:58:14 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-14 04:42:19 +08:00
|
|
|
|
// Note: refactoring work, will remove the above code finally
|
2021-03-25 00:33:20 +08:00
|
|
|
|
try {
|
|
|
|
|
db2 = API->createDatabase(opt.clusterFile.c_str());
|
|
|
|
|
} catch (Error& e) {
|
2021-04-23 16:38:25 +08:00
|
|
|
|
fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code());
|
|
|
|
|
printf("Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
2021-03-25 00:33:20 +08:00
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (opt.trace) {
|
|
|
|
|
TraceEvent("CLIProgramStart")
|
2021-03-11 02:06:03 +08:00
|
|
|
|
.setMaxEventLength(12000)
|
|
|
|
|
.detail("SourceVersion", getSourceVersion())
|
|
|
|
|
.detail("Version", FDB_VT_VERSION)
|
|
|
|
|
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
|
|
|
|
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
|
|
|
|
|
.detail("ClusterFile", ccf->getFilename().c_str())
|
|
|
|
|
.detail("ConnectionString", ccf->getConnectionString().toString())
|
|
|
|
|
.setMaxFieldLength(10000)
|
|
|
|
|
.detail("CommandLine", opt.commandLine)
|
|
|
|
|
.trackLatest("ProgramStart");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-22 06:58:14 +08:00
|
|
|
|
if (!opt.exec.present()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (opt.initialStatusCheck) {
|
2020-01-23 07:41:22 +08:00
|
|
|
|
Future<Void> checkStatusF = checkStatus(Void(), db);
|
2019-08-22 05:44:15 +08:00
|
|
|
|
wait(makeInterruptable(success(checkStatusF)));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2018-09-22 06:58:14 +08:00
|
|
|
|
printf("\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printf("Welcome to the fdbcli. For help, type `help'.\n");
|
|
|
|
|
validOptions = options->getValidOptions();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
state bool is_error = false;
|
|
|
|
|
|
|
|
|
|
state Future<Void> warn;
|
|
|
|
|
loop {
|
|
|
|
|
if (warn.isValid())
|
|
|
|
|
warn.cancel();
|
|
|
|
|
|
|
|
|
|
state std::string line;
|
|
|
|
|
|
|
|
|
|
if (opt.exec.present()) {
|
|
|
|
|
line = opt.exec.get();
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
Optional<std::string> rawline = wait(linenoise.read("fdb> "));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (!rawline.present()) {
|
|
|
|
|
printf("\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
line = rawline.get();
|
|
|
|
|
|
|
|
|
|
if (!line.size())
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
// Don't put dangerous commands in the command history
|
2020-04-02 08:39:16 +08:00
|
|
|
|
if (line.find("writemode") == std::string::npos && line.find("expensive_data_check") == std::string::npos &&
|
|
|
|
|
line.find("unlock") == std::string::npos)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
linenoise.historyAdd(line);
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-23 07:41:22 +08:00
|
|
|
|
warn = checkStatus(timeWarning(5.0, "\nWARNING: Long delay (Ctrl-C to interrupt)\n"), db);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
try {
|
2019-05-11 05:01:52 +08:00
|
|
|
|
state UID randomID = deterministicRandom()->randomUniqueID();
|
2019-04-04 09:01:47 +08:00
|
|
|
|
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2017-08-22 04:42:01 +08:00
|
|
|
|
bool malformed, partial;
|
|
|
|
|
state std::vector<std::vector<StringRef>> parsed = parseLine(line, malformed, partial);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (malformed)
|
|
|
|
|
LogCommand(line, randomID, "ERROR: malformed escape sequence");
|
|
|
|
|
if (partial)
|
|
|
|
|
LogCommand(line, randomID, "ERROR: unterminated quote");
|
2017-08-22 04:42:01 +08:00
|
|
|
|
if (malformed || partial) {
|
|
|
|
|
if (parsed.size() > 0) {
|
|
|
|
|
// Denote via a special token that the command was a parse failure.
|
|
|
|
|
auto& last_command = parsed.back();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
last_command.insert(last_command.begin(),
|
|
|
|
|
StringRef((const uint8_t*)"parse_error", strlen("parse_error")));
|
2017-08-22 04:42:01 +08:00
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
state bool multi = parsed.size() > 1;
|
2017-08-22 04:42:01 +08:00
|
|
|
|
is_error = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
state std::vector<std::vector<StringRef>>::iterator iter;
|
|
|
|
|
for (iter = parsed.begin(); iter != parsed.end(); ++iter) {
|
|
|
|
|
state std::vector<StringRef> tokens = *iter;
|
|
|
|
|
|
2017-08-22 04:42:01 +08:00
|
|
|
|
if (is_error) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("WARNING: the previous command failed, the remaining commands will not be executed.\n");
|
2017-08-22 04:42:01 +08:00
|
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!tokens.size())
|
|
|
|
|
continue;
|
|
|
|
|
|
2017-08-22 04:42:01 +08:00
|
|
|
|
if (tokencmp(tokens[0], "parse_error")) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Command failed to completely parse.\n");
|
2017-08-22 04:42:01 +08:00
|
|
|
|
if (tokens.size() > 1) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Not running partial or malformed command:");
|
2017-08-22 04:42:01 +08:00
|
|
|
|
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t)
|
|
|
|
|
printf(" %s", formatStringRef(*t, true).c_str());
|
|
|
|
|
printf("\n");
|
|
|
|
|
}
|
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (multi) {
|
|
|
|
|
printf(">>>");
|
|
|
|
|
for (auto t = tokens.begin(); t != tokens.end(); ++t)
|
|
|
|
|
printf(" %s", formatStringRef(*t, true).c_str());
|
|
|
|
|
printf("\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!helpMap.count(tokens[0].toString()) && !hiddenCommands.count(tokens[0].toString())) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Unknown command `%s'. Try `help'?\n", formatStringRef(tokens[0]).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "exit") || tokencmp(tokens[0], "quit")) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "help")) {
|
|
|
|
|
if (tokens.size() == 1) {
|
|
|
|
|
printHelpOverview();
|
|
|
|
|
} else if (tokens.size() == 2) {
|
|
|
|
|
if (tokencmp(tokens[1], "escaping"))
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("\n"
|
|
|
|
|
"When parsing commands, fdbcli considers a space to delimit individual tokens.\n"
|
|
|
|
|
"To include a space in a single token, you may either enclose the token in\n"
|
|
|
|
|
"quotation marks (\"hello world\"), prefix the space with a backslash\n"
|
|
|
|
|
"(hello\\ world), or encode the space as a hex byte (hello\\x20world).\n"
|
|
|
|
|
"\n"
|
|
|
|
|
"To include a literal quotation mark in a token, precede it with a backslash\n"
|
|
|
|
|
"(\\\"hello\\ world\\\").\n"
|
|
|
|
|
"\n"
|
|
|
|
|
"To express a binary value, encode each byte as a two-digit hex byte, preceded\n"
|
|
|
|
|
"by \\x (e.g. \\x20 for a space character, or \\x0a\\x00\\x00\\x00 for a\n"
|
|
|
|
|
"32-bit, little-endian representation of the integer 10).\n"
|
|
|
|
|
"\n"
|
|
|
|
|
"All keys and values are displayed by the fdbcli with non-printable characters\n"
|
|
|
|
|
"and spaces encoded as two-digit hex bytes.\n\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
else if (tokencmp(tokens[1], "options")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("\n"
|
|
|
|
|
"The following options are available to be set using the `option' command:\n"
|
|
|
|
|
"\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
options->printHelpString();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokencmp(tokens[1], "help"))
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printHelpOverview();
|
|
|
|
|
else
|
|
|
|
|
printHelp(tokens[1]);
|
|
|
|
|
} else
|
|
|
|
|
printf("Usage: help [topic]\n");
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "waitconnected")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(makeInterruptable(db->onConnected()));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokencmp(tokens[0], "waitopen")) {
|
|
|
|
|
wait(success(getTransaction(db, tr, options, intrans)->getReadVersion()));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokencmp(tokens[0], "sleep")) {
|
|
|
|
|
if (tokens.size() != 2) {
|
2019-08-17 09:13:35 +08:00
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
double v;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
int n = 0;
|
2019-08-17 09:13:35 +08:00
|
|
|
|
if (sscanf(tokens[1].toString().c_str(), "%lf%n", &v, &n) != 1 || n != tokens[1].size()) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
wait(delay(v));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokencmp(tokens[0], "status")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
// Warn at 7 seconds since status will spend as long as 5 seconds trying to read/write from the
|
|
|
|
|
// database
|
|
|
|
|
warn = timeWarning(7.0, "\nWARNING: Long delay (Ctrl-C to interrupt)\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
state StatusClient::StatusLevel level;
|
|
|
|
|
if (tokens.size() == 1)
|
|
|
|
|
level = StatusClient::NORMAL;
|
|
|
|
|
else if (tokens.size() == 2 && tokencmp(tokens[1], "details"))
|
|
|
|
|
level = StatusClient::DETAILED;
|
|
|
|
|
else if (tokens.size() == 2 && tokencmp(tokens[1], "minimal"))
|
|
|
|
|
level = StatusClient::MINIMAL;
|
|
|
|
|
else if (tokens.size() == 2 && tokencmp(tokens[1], "json"))
|
|
|
|
|
level = StatusClient::JSON;
|
|
|
|
|
else {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-23 07:41:22 +08:00
|
|
|
|
StatusObject s = wait(makeInterruptable(StatusClient::statusFetcher(db)));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!opt.exec.present())
|
|
|
|
|
printf("\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printStatus(s, level);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!opt.exec.present())
|
|
|
|
|
printf("\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-04 06:49:23 +08:00
|
|
|
|
if (tokencmp(tokens[0], "triggerddteaminfolog")) {
|
2020-11-13 08:27:55 +08:00
|
|
|
|
wait(triggerDDTeamInfoLog(db));
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-08 03:54:24 +08:00
|
|
|
|
if (tokencmp(tokens[0], "tssq")) {
|
|
|
|
|
if (tokens.size() == 2) {
|
|
|
|
|
if (tokens[1] != LiteralStringRef("list")) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
wait(tssQuarantineList(db));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (tokens.size() == 3) {
|
|
|
|
|
if ((tokens[1] != LiteralStringRef("start") && tokens[1] != LiteralStringRef("stop")) ||
|
|
|
|
|
(tokens[2].size() != 32) || !std::all_of(tokens[2].begin(), tokens[2].end(), &isxdigit)) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
bool enable = tokens[1] == LiteralStringRef("start");
|
|
|
|
|
UID tssId = UID::fromString(tokens[2].toString());
|
|
|
|
|
bool err = wait(tssQuarantine(db, enable, tssId));
|
|
|
|
|
if (err)
|
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokencmp(tokens[0], "configure")) {
|
2019-04-23 06:48:47 +08:00
|
|
|
|
bool err = wait(configure(db, tokens, db->getConnectionFile(), &linenoise, warn));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (err)
|
|
|
|
|
is_error = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-17 08:34:59 +08:00
|
|
|
|
if (tokencmp(tokens[0], "fileconfigure")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens.size() == 2 || (tokens.size() == 3 && (tokens[1] == LiteralStringRef("new") ||
|
|
|
|
|
tokens[1] == LiteralStringRef("FORCE")))) {
|
|
|
|
|
bool err = wait(fileConfigure(db,
|
|
|
|
|
tokens.back().toString(),
|
|
|
|
|
tokens[1] == LiteralStringRef("new"),
|
|
|
|
|
tokens[1] == LiteralStringRef("FORCE")));
|
|
|
|
|
if (err)
|
|
|
|
|
is_error = true;
|
2018-09-05 13:16:35 +08:00
|
|
|
|
} else {
|
2018-08-17 08:34:59 +08:00
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokencmp(tokens[0], "coordinators")) {
|
2019-04-23 06:48:47 +08:00
|
|
|
|
auto cs = ClusterConnectionFile(db->getConnectionFile()->getFilename()).getConnectionString();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokens.size() < 2) {
|
|
|
|
|
printf("Cluster description: %s\n", cs.clusterKeyName().toString().c_str());
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("Cluster coordinators (%zu): %s\n",
|
|
|
|
|
cs.coordinators().size(),
|
|
|
|
|
describe(cs.coordinators()).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("Type `help coordinators' to learn how to change this information.\n");
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
bool err = wait(coordinators(db, tokens, cs.coordinators()[0].isTLS()));
|
|
|
|
|
if (err)
|
|
|
|
|
is_error = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "exclude")) {
|
2019-04-23 06:48:47 +08:00
|
|
|
|
bool err = wait(exclude(db, tokens, db->getConnectionFile(), warn));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (err)
|
|
|
|
|
is_error = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "include")) {
|
|
|
|
|
if (tokens.size() < 2) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
bool err = wait(include(db, tokens));
|
|
|
|
|
if (err)
|
|
|
|
|
is_error = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-28 07:40:33 +08:00
|
|
|
|
if (tokencmp(tokens[0], "snapshot")) {
|
2021-05-21 05:01:50 +08:00
|
|
|
|
bool _result = wait(snapshotCommandActor(db2, tokens));
|
|
|
|
|
if (!_result)
|
2019-02-28 07:40:33 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-28 04:15:30 +08:00
|
|
|
|
if (tokencmp(tokens[0], "lock")) {
|
|
|
|
|
if (tokens.size() != 1) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
state UID lockUID = deterministicRandom()->randomUniqueID();
|
|
|
|
|
printf("Locking database with lockUID: %s\n", lockUID.toString().c_str());
|
|
|
|
|
wait(makeInterruptable(lockDatabase(db, lockUID)));
|
|
|
|
|
printf("Database locked.\n");
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-02 08:39:16 +08:00
|
|
|
|
if (tokencmp(tokens[0], "unlock")) {
|
|
|
|
|
if ((tokens.size() != 2) || (tokens[1].size() != 32) ||
|
|
|
|
|
!std::all_of(tokens[1].begin(), tokens[1].end(), &isxdigit)) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
state std::string passPhrase = deterministicRandom()->randomAlphaNumeric(10);
|
|
|
|
|
warn.cancel(); // don't warn while waiting on user input
|
|
|
|
|
printf("Unlocking the database is a potentially dangerous operation.\n");
|
2021-06-17 08:26:43 +08:00
|
|
|
|
printf("%s\n", passPhrase.c_str());
|
|
|
|
|
fflush(stdout);
|
2021-06-17 08:35:44 +08:00
|
|
|
|
Optional<std::string> input =
|
|
|
|
|
wait(linenoise.read(format("Repeat the above passphrase if you would like to proceed:")));
|
2020-04-02 08:39:16 +08:00
|
|
|
|
warn = checkStatus(timeWarning(5.0, "\nWARNING: Long delay (Ctrl-C to interrupt)\n"), db);
|
|
|
|
|
if (input.present() && input.get() == passPhrase) {
|
|
|
|
|
UID unlockUID = UID::fromString(tokens[1].toString());
|
2020-04-09 07:38:30 +08:00
|
|
|
|
try {
|
|
|
|
|
wait(makeInterruptable(unlockDatabase(db, unlockUID)));
|
|
|
|
|
printf("Database unlocked.\n");
|
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
if (e.code() == error_code_database_locked) {
|
|
|
|
|
printf(
|
|
|
|
|
"Unable to unlock database. Make sure to unlock with the correct lock UID.\n");
|
|
|
|
|
}
|
|
|
|
|
throw e;
|
|
|
|
|
}
|
2020-04-02 08:39:16 +08:00
|
|
|
|
} else {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Incorrect passphrase entered.\n");
|
2020-04-02 08:39:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokencmp(tokens[0], "setclass")) {
|
2021-07-09 06:00:05 +08:00
|
|
|
|
bool _result = wait(makeInterruptable(setClassCommandActor(db2, tokens)));
|
|
|
|
|
if (!_result)
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "begin")) {
|
|
|
|
|
if (tokens.size() != 1) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else if (intrans) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Already in transaction\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
activeOptions = FdbOptions(globalOptions);
|
|
|
|
|
options = &activeOptions;
|
|
|
|
|
getTransaction(db, tr, options, false);
|
|
|
|
|
intrans = true;
|
|
|
|
|
printf("Transaction started\n");
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "commit")) {
|
|
|
|
|
if (tokens.size() != 1) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else if (!intrans) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: No active transaction\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(commitTransaction(tr));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
intrans = false;
|
|
|
|
|
options = &globalOptions;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "reset")) {
|
|
|
|
|
if (tokens.size() != 1) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else if (!intrans) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: No active transaction\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
tr->reset();
|
|
|
|
|
activeOptions = FdbOptions(globalOptions);
|
|
|
|
|
options = &activeOptions;
|
|
|
|
|
options->apply(tr);
|
|
|
|
|
printf("Transaction reset\n");
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "rollback")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens.size() != 1) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else if (!intrans) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: No active transaction\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
intrans = false;
|
|
|
|
|
options = &globalOptions;
|
|
|
|
|
printf("Transaction rolled back\n");
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "get")) {
|
|
|
|
|
if (tokens.size() != 2) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
Optional<Standalone<StringRef>> v =
|
|
|
|
|
wait(makeInterruptable(getTransaction(db, tr, options, intrans)->get(tokens[1])));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
if (v.present())
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("`%s' is `%s'\n", printable(tokens[1]).c_str(), printable(v.get()).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
else
|
|
|
|
|
printf("`%s': not found\n", printable(tokens[1]).c_str());
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-31 08:10:00 +08:00
|
|
|
|
if (tokencmp(tokens[0], "getversion")) {
|
|
|
|
|
if (tokens.size() != 1) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
Version v = wait(makeInterruptable(getTransaction(db, tr, options, intrans)->getReadVersion()));
|
|
|
|
|
printf("%ld\n", v);
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-16 11:01:01 +08:00
|
|
|
|
if (tokencmp(tokens[0], "advanceversion")) {
|
2021-05-18 15:22:17 +08:00
|
|
|
|
bool _result = wait(makeInterruptable(advanceVersionCommandActor(db2, tokens)));
|
|
|
|
|
if (!_result)
|
2020-04-16 11:01:01 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokencmp(tokens[0], "kill")) {
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
|
|
|
|
if (tokens.size() == 1) {
|
2021-05-04 04:14:16 +08:00
|
|
|
|
RangeResult kvs = wait(
|
2020-05-07 00:45:21 +08:00
|
|
|
|
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
|
|
|
|
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
|
|
|
|
CLIENT_KNOBS->TOO_MANY)));
|
|
|
|
|
ASSERT(!kvs.more);
|
2020-11-07 15:50:55 +08:00
|
|
|
|
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
2020-01-04 08:10:44 +08:00
|
|
|
|
std::vector<Future<Void>> addInterfs;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : kvs) {
|
2020-01-04 08:10:44 +08:00
|
|
|
|
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(waitForAll(addInterfs));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (address_interface.size() == 0) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\nNo addresses can be killed.\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (address_interface.size() == 1) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\nThe following address can be killed:\n");
|
|
|
|
|
} else {
|
2018-03-15 09:07:05 +08:00
|
|
|
|
printf("\nThe following %zu addresses can be killed:\n", address_interface.size());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : address_interface) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("%s\n", printable(it.first).c_str());
|
|
|
|
|
}
|
|
|
|
|
printf("\n");
|
|
|
|
|
} else if (tokencmp(tokens[1], "all")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : address_interface) {
|
2021-06-18 03:13:07 +08:00
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(it.second.first, IncludeVersion())
|
|
|
|
|
.reboot.send(RebootRequest());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
if (address_interface.size() == 0) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: no processes to kill. You must run the `kill’ command before "
|
|
|
|
|
"running `kill all’.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
} else {
|
2018-03-15 09:07:05 +08:00
|
|
|
|
printf("Attempted to kill %zu processes\n", address_interface.size());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (int i = 1; i < tokens.size(); i++) {
|
|
|
|
|
if (!address_interface.count(tokens[i])) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!is_error) {
|
|
|
|
|
for (int i = 1; i < tokens.size(); i++) {
|
2021-06-18 03:13:07 +08:00
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(address_interface[tokens[i]].first,
|
|
|
|
|
IncludeVersion())
|
|
|
|
|
.reboot.send(RebootRequest());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2018-03-15 09:07:05 +08:00
|
|
|
|
printf("Attempted to kill %zu processes\n", tokens.size() - 1);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-23 07:37:00 +08:00
|
|
|
|
if (tokencmp(tokens[0], "suspend")) {
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
|
|
|
|
if (tokens.size() == 1) {
|
2021-05-04 04:14:16 +08:00
|
|
|
|
RangeResult kvs = wait(
|
2020-08-01 16:59:48 +08:00
|
|
|
|
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
|
|
|
|
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
|
|
|
|
CLIENT_KNOBS->TOO_MANY)));
|
|
|
|
|
ASSERT(!kvs.more);
|
2020-11-07 15:50:55 +08:00
|
|
|
|
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
2020-07-23 07:37:00 +08:00
|
|
|
|
std::vector<Future<Void>> addInterfs;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : kvs) {
|
2020-07-23 07:37:00 +08:00
|
|
|
|
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(waitForAll(addInterfs));
|
|
|
|
|
if (address_interface.size() == 0) {
|
2020-07-24 02:09:59 +08:00
|
|
|
|
printf("\nNo addresses can be suspended.\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (address_interface.size() == 1) {
|
2020-07-24 02:09:59 +08:00
|
|
|
|
printf("\nThe following address can be suspended:\n");
|
2020-07-23 07:37:00 +08:00
|
|
|
|
} else {
|
2020-07-24 02:09:59 +08:00
|
|
|
|
printf("\nThe following %zu addresses can be suspended:\n", address_interface.size());
|
2020-07-23 07:37:00 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : address_interface) {
|
2020-07-23 07:37:00 +08:00
|
|
|
|
printf("%s\n", printable(it.first).c_str());
|
|
|
|
|
}
|
|
|
|
|
printf("\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens.size() == 2) {
|
2020-07-23 07:37:00 +08:00
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (int i = 2; i < tokens.size(); i++) {
|
|
|
|
|
if (!address_interface.count(tokens[i])) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
2020-07-23 07:37:00 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!is_error) {
|
2020-07-23 07:37:00 +08:00
|
|
|
|
double seconds;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
int n = 0;
|
2020-07-23 07:37:00 +08:00
|
|
|
|
auto secondsStr = tokens[1].toString();
|
|
|
|
|
if (sscanf(secondsStr.c_str(), "%lf%n", &seconds, &n) != 1 || n != secondsStr.size()) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
int64_t timeout_ms = seconds * 1000;
|
|
|
|
|
tr->setOption(FDBTransactionOptions::TIMEOUT,
|
|
|
|
|
StringRef((uint8_t*)&timeout_ms, sizeof(int64_t)));
|
|
|
|
|
for (int i = 2; i < tokens.size(); i++) {
|
2021-06-18 03:13:07 +08:00
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(
|
|
|
|
|
address_interface[tokens[i]].first, IncludeVersion())
|
|
|
|
|
.reboot.send(RebootRequest(false, false, seconds));
|
2020-07-23 07:37:00 +08:00
|
|
|
|
}
|
|
|
|
|
printf("Attempted to suspend %zu processes\n", tokens.size() - 2);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-01 21:39:04 +08:00
|
|
|
|
if (tokencmp(tokens[0], "force_recovery_with_data_loss")) {
|
2021-05-21 03:47:00 +08:00
|
|
|
|
bool _result = wait(makeInterruptable(forceRecoveryWithDataLossCommandActor(db2, tokens)));
|
2021-05-21 05:01:50 +08:00
|
|
|
|
if (!_result)
|
2018-07-01 21:39:04 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-02 08:55:13 +08:00
|
|
|
|
if (tokencmp(tokens[0], "maintenance")) {
|
2021-05-21 02:37:03 +08:00
|
|
|
|
bool _result = wait(makeInterruptable(maintenanceCommandActor(db2, tokens)));
|
2021-05-21 05:01:50 +08:00
|
|
|
|
if (!_result)
|
2019-04-02 08:55:13 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-21 12:38:45 +08:00
|
|
|
|
if (tokencmp(tokens[0], "consistencycheck")) {
|
2021-04-23 16:32:30 +08:00
|
|
|
|
getTransaction(db, tr, tr2, options, intrans);
|
2021-05-21 07:29:22 +08:00
|
|
|
|
bool _result = wait(makeInterruptable(consistencyCheckCommandActor(tr2, tokens)));
|
2021-05-21 05:01:50 +08:00
|
|
|
|
if (!_result)
|
|
|
|
|
is_error = true;
|
2019-06-21 12:38:45 +08:00
|
|
|
|
continue;
|
2019-04-02 08:55:13 +08:00
|
|
|
|
}
|
|
|
|
|
|
2017-09-30 12:14:08 +08:00
|
|
|
|
if (tokencmp(tokens[0], "profile")) {
|
|
|
|
|
if (tokens.size() == 1) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Usage: profile <client|list|flow|heap>\n");
|
2017-10-04 11:57:39 +08:00
|
|
|
|
is_error = true;
|
2017-09-30 12:14:08 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (tokencmp(tokens[1], "client")) {
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
2017-10-04 11:57:39 +08:00
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
2017-09-30 12:14:08 +08:00
|
|
|
|
if (tokens.size() == 2) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Usage: profile client <get|set>\n");
|
2017-09-30 12:14:08 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-07-28 06:45:35 +08:00
|
|
|
|
wait(makeInterruptable(GlobalConfig::globalConfig().onInitialized()));
|
2017-10-04 11:57:39 +08:00
|
|
|
|
if (tokencmp(tokens[2], "get")) {
|
2017-09-30 12:14:08 +08:00
|
|
|
|
if (tokens.size() != 3) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");
|
2017-09-30 12:14:08 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-03-17 08:20:25 +08:00
|
|
|
|
const double sampleRateDbl = GlobalConfig::globalConfig().get<double>(
|
|
|
|
|
fdbClientInfoTxnSampleRate, std::numeric_limits<double>::infinity());
|
|
|
|
|
const int64_t sizeLimit =
|
|
|
|
|
GlobalConfig::globalConfig().get<int64_t>(fdbClientInfoTxnSizeLimit, -1);
|
2017-10-04 11:57:39 +08:00
|
|
|
|
std::string sampleRateStr = "default", sizeLimitStr = "default";
|
2021-02-24 08:17:05 +08:00
|
|
|
|
if (!std::isinf(sampleRateDbl)) {
|
|
|
|
|
sampleRateStr = boost::lexical_cast<std::string>(sampleRateDbl);
|
2017-10-04 11:57:39 +08:00
|
|
|
|
}
|
2021-02-24 08:17:05 +08:00
|
|
|
|
if (sizeLimit != -1) {
|
|
|
|
|
sizeLimitStr = boost::lexical_cast<std::string>(sizeLimit);
|
2017-09-30 12:14:08 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("Client profiling rate is set to %s and size limit is set to %s.\n",
|
|
|
|
|
sampleRateStr.c_str(),
|
|
|
|
|
sizeLimitStr.c_str());
|
2017-09-30 12:14:08 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-10-04 11:57:39 +08:00
|
|
|
|
if (tokencmp(tokens[2], "set")) {
|
|
|
|
|
if (tokens.size() != 5) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Usage: profile client set <RATE|default> <SIZE|default>\n");
|
2017-09-30 12:14:08 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
double sampleRate;
|
2017-10-04 11:57:39 +08:00
|
|
|
|
if (tokencmp(tokens[3], "default")) {
|
|
|
|
|
sampleRate = std::numeric_limits<double>::infinity();
|
|
|
|
|
} else {
|
2017-10-05 05:00:38 +08:00
|
|
|
|
char* end;
|
|
|
|
|
sampleRate = std::strtod((const char*)tokens[3].begin(), &end);
|
|
|
|
|
if (!std::isspace(*end)) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: %s failed to parse.\n", printable(tokens[3]).c_str());
|
2017-10-05 05:00:38 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-10-04 11:57:39 +08:00
|
|
|
|
}
|
|
|
|
|
int64_t sizeLimit;
|
|
|
|
|
if (tokencmp(tokens[4], "default")) {
|
|
|
|
|
sizeLimit = -1;
|
|
|
|
|
} else {
|
2017-09-30 12:14:08 +08:00
|
|
|
|
Optional<uint64_t> parsed = parse_with_suffix(tokens[4].toString());
|
|
|
|
|
if (parsed.present()) {
|
|
|
|
|
sizeLimit = parsed.get();
|
|
|
|
|
} else {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: `%s` failed to parse.\n", printable(tokens[4]).c_str());
|
2017-09-30 12:14:08 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-02-24 08:17:05 +08:00
|
|
|
|
|
|
|
|
|
Tuple rate = Tuple().appendDouble(sampleRate);
|
|
|
|
|
Tuple size = Tuple().append(sizeLimit);
|
|
|
|
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
2021-03-12 02:57:46 +08:00
|
|
|
|
tr->set(GlobalConfig::prefixedKey(fdbClientInfoTxnSampleRate), rate.pack());
|
|
|
|
|
tr->set(GlobalConfig::prefixedKey(fdbClientInfoTxnSizeLimit), size.pack());
|
2017-09-30 12:14:08 +08:00
|
|
|
|
if (!intrans) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(commitTransaction(tr));
|
2017-09-30 12:14:08 +08:00
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Unknown action: %s\n", printable(tokens[2]).c_str());
|
2017-09-30 12:14:08 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-10-12 05:13:16 +08:00
|
|
|
|
if (tokencmp(tokens[1], "list")) {
|
|
|
|
|
if (tokens.size() != 2) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Usage: profile list\n");
|
2017-10-12 05:13:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
2021-05-04 04:14:16 +08:00
|
|
|
|
RangeResult kvs = wait(
|
2020-05-07 00:45:21 +08:00
|
|
|
|
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
|
|
|
|
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
|
|
|
|
CLIENT_KNOBS->TOO_MANY)));
|
|
|
|
|
ASSERT(!kvs.more);
|
2017-10-12 05:13:16 +08:00
|
|
|
|
for (const auto& pair : kvs) {
|
2020-05-07 00:45:21 +08:00
|
|
|
|
auto ip_port = (pair.key.endsWith(LiteralStringRef(":tls"))
|
|
|
|
|
? pair.key.removeSuffix(LiteralStringRef(":tls"))
|
|
|
|
|
: pair.key)
|
|
|
|
|
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
2018-06-12 07:43:28 +08:00
|
|
|
|
printf("%s\n", printable(ip_port).c_str());
|
2017-10-12 05:13:16 +08:00
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (tokencmp(tokens[1], "flow")) {
|
|
|
|
|
if (tokens.size() == 2) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Usage: profile flow <run>\n");
|
2017-10-12 05:13:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (tokencmp(tokens[2], "run")) {
|
|
|
|
|
if (tokens.size() < 6) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(
|
|
|
|
|
stderr,
|
|
|
|
|
"ERROR: Usage: profile flow run <DURATION_IN_SECONDS> <FILENAME> <PROCESS...>\n");
|
2017-10-12 05:13:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
2021-05-04 04:14:16 +08:00
|
|
|
|
RangeResult kvs = wait(makeInterruptable(
|
2020-05-07 00:45:21 +08:00
|
|
|
|
tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
|
|
|
|
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
|
|
|
|
CLIENT_KNOBS->TOO_MANY)));
|
|
|
|
|
ASSERT(!kvs.more);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
char* duration_end;
|
2017-10-12 05:13:16 +08:00
|
|
|
|
int duration = std::strtol((const char*)tokens[3].begin(), &duration_end, 10);
|
|
|
|
|
if (!std::isspace(*duration_end)) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(
|
|
|
|
|
stderr, "ERROR: Failed to parse %s as an integer.", printable(tokens[3]).c_str());
|
2017-10-12 05:13:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
std::map<Key, ClientWorkerInterface> interfaces;
|
2017-10-17 07:46:52 +08:00
|
|
|
|
state std::vector<Key> all_profiler_addresses;
|
|
|
|
|
state std::vector<Future<ErrorOr<Void>>> all_profiler_responses;
|
2017-10-12 05:13:16 +08:00
|
|
|
|
for (const auto& pair : kvs) {
|
2020-05-07 00:45:21 +08:00
|
|
|
|
auto ip_port = (pair.key.endsWith(LiteralStringRef(":tls"))
|
|
|
|
|
? pair.key.removeSuffix(LiteralStringRef(":tls"))
|
|
|
|
|
: pair.key)
|
|
|
|
|
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
interfaces.emplace(
|
|
|
|
|
ip_port,
|
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(pair.value, IncludeVersion()));
|
2017-10-12 05:13:16 +08:00
|
|
|
|
}
|
|
|
|
|
if (tokens.size() == 6 && tokencmp(tokens[5], "all")) {
|
|
|
|
|
for (const auto& pair : interfaces) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ProfilerRequest profileRequest(
|
|
|
|
|
ProfilerRequest::Type::FLOW, ProfilerRequest::Action::RUN, duration);
|
2017-10-12 05:13:16 +08:00
|
|
|
|
profileRequest.outputFile = tokens[4];
|
2017-10-17 07:46:52 +08:00
|
|
|
|
all_profiler_addresses.push_back(pair.first);
|
|
|
|
|
all_profiler_responses.push_back(pair.second.profiler.tryGetReply(profileRequest));
|
2017-10-12 05:13:16 +08:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (int tokenidx = 5; tokenidx < tokens.size(); tokenidx++) {
|
|
|
|
|
auto element = interfaces.find(tokens[tokenidx]);
|
|
|
|
|
if (element == interfaces.end()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: process '%s' not recognized.\n",
|
|
|
|
|
printable(tokens[tokenidx]).c_str());
|
2017-10-12 05:13:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!is_error) {
|
|
|
|
|
for (int tokenidx = 5; tokenidx < tokens.size(); tokenidx++) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ProfilerRequest profileRequest(
|
|
|
|
|
ProfilerRequest::Type::FLOW, ProfilerRequest::Action::RUN, duration);
|
2017-10-12 05:13:16 +08:00
|
|
|
|
profileRequest.outputFile = tokens[4];
|
2017-10-17 07:46:52 +08:00
|
|
|
|
all_profiler_addresses.push_back(tokens[tokenidx]);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
all_profiler_responses.push_back(
|
|
|
|
|
interfaces[tokens[tokenidx]].profiler.tryGetReply(profileRequest));
|
2017-10-12 05:13:16 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!is_error) {
|
2018-08-11 04:57:10 +08:00
|
|
|
|
wait(waitForAll(all_profiler_responses));
|
2017-10-17 07:46:52 +08:00
|
|
|
|
for (int i = 0; i < all_profiler_responses.size(); i++) {
|
|
|
|
|
const ErrorOr<Void>& err = all_profiler_responses[i].get();
|
2017-10-12 05:13:16 +08:00
|
|
|
|
if (err.isError()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: %s: %s: %s\n",
|
|
|
|
|
printable(all_profiler_addresses[i]).c_str(),
|
|
|
|
|
err.getError().name(),
|
|
|
|
|
err.getError().what());
|
2017-10-12 05:13:16 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-10-17 07:46:52 +08:00
|
|
|
|
all_profiler_addresses.clear();
|
|
|
|
|
all_profiler_responses.clear();
|
2017-10-12 05:13:16 +08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-04 06:57:16 +08:00
|
|
|
|
if (tokencmp(tokens[1], "heap")) {
|
|
|
|
|
if (tokens.size() != 3) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Usage: profile heap <PROCESS>\n");
|
2019-04-04 06:57:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
2021-05-04 04:14:16 +08:00
|
|
|
|
RangeResult kvs = wait(
|
2020-08-01 16:59:48 +08:00
|
|
|
|
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
|
|
|
|
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
|
|
|
|
CLIENT_KNOBS->TOO_MANY)));
|
|
|
|
|
ASSERT(!kvs.more);
|
2019-04-04 06:57:16 +08:00
|
|
|
|
std::map<Key, ClientWorkerInterface> interfaces;
|
|
|
|
|
for (const auto& pair : kvs) {
|
2020-08-01 16:59:48 +08:00
|
|
|
|
auto ip_port = (pair.key.endsWith(LiteralStringRef(":tls"))
|
|
|
|
|
? pair.key.removeSuffix(LiteralStringRef(":tls"))
|
|
|
|
|
: pair.key)
|
|
|
|
|
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
2021-03-11 02:06:03 +08:00
|
|
|
|
interfaces.emplace(
|
|
|
|
|
ip_port,
|
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(pair.value, IncludeVersion()));
|
2019-04-04 06:57:16 +08:00
|
|
|
|
}
|
|
|
|
|
state Key ip_port = tokens[2];
|
|
|
|
|
if (interfaces.find(ip_port) == interfaces.end()) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: host %s not found\n", printable(ip_port).c_str());
|
2019-04-04 06:57:16 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
ProfilerRequest profileRequest(
|
|
|
|
|
ProfilerRequest::Type::GPROF_HEAP, ProfilerRequest::Action::RUN, 0);
|
2019-04-04 06:57:16 +08:00
|
|
|
|
profileRequest.outputFile = LiteralStringRef("heapz");
|
|
|
|
|
ErrorOr<Void> response = wait(interfaces[ip_port].profiler.tryGetReply(profileRequest));
|
|
|
|
|
if (response.isError()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: %s: %s: %s\n",
|
|
|
|
|
printable(ip_port).c_str(),
|
|
|
|
|
response.getError().name(),
|
|
|
|
|
response.getError().what());
|
2019-04-04 06:57:16 +08:00
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Unknown type: %s\n", printable(tokens[1]).c_str());
|
2017-09-30 12:14:08 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokencmp(tokens[0], "expensive_data_check")) {
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
|
|
|
|
if (tokens.size() == 1) {
|
2021-05-04 04:14:16 +08:00
|
|
|
|
RangeResult kvs = wait(
|
2020-08-01 16:59:48 +08:00
|
|
|
|
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
|
|
|
|
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
|
|
|
|
CLIENT_KNOBS->TOO_MANY)));
|
|
|
|
|
ASSERT(!kvs.more);
|
2020-11-07 15:50:55 +08:00
|
|
|
|
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
2020-01-04 08:10:44 +08:00
|
|
|
|
std::vector<Future<Void>> addInterfs;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : kvs) {
|
2020-01-04 08:10:44 +08:00
|
|
|
|
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(waitForAll(addInterfs));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (address_interface.size() == 0) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\nNo addresses can be checked.\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (address_interface.size() == 1) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\nThe following address can be checked:\n");
|
|
|
|
|
} else {
|
2018-03-15 09:07:05 +08:00
|
|
|
|
printf("\nThe following %zu addresses can be checked:\n", address_interface.size());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : address_interface) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("%s\n", printable(it.first).c_str());
|
|
|
|
|
}
|
|
|
|
|
printf("\n");
|
|
|
|
|
} else if (tokencmp(tokens[1], "all")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (auto it : address_interface) {
|
2021-06-18 03:13:07 +08:00
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(it.second.first, IncludeVersion())
|
|
|
|
|
.reboot.send(RebootRequest(false, true));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
if (address_interface.size() == 0) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: no processes to check. You must run the `expensive_data_check’ "
|
|
|
|
|
"command before running `expensive_data_check all’.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
} else {
|
2018-03-15 09:07:05 +08:00
|
|
|
|
printf("Attempted to kill and check %zu processes\n", address_interface.size());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
for (int i = 1; i < tokens.size(); i++) {
|
|
|
|
|
if (!address_interface.count(tokens[i])) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!is_error) {
|
|
|
|
|
for (int i = 1; i < tokens.size(); i++) {
|
2021-06-18 03:13:07 +08:00
|
|
|
|
BinaryReader::fromStringRef<ClientWorkerInterface>(address_interface[tokens[i]].first,
|
|
|
|
|
IncludeVersion())
|
|
|
|
|
.reboot.send(RebootRequest(false, true));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
2018-03-15 09:07:05 +08:00
|
|
|
|
printf("Attempted to kill and check %zu processes\n", tokens.size() - 1);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokencmp(tokens[0], "getrange") ||
|
|
|
|
|
tokencmp(tokens[0], "getrangekeys")) { // FIXME: support byte limits, and reverse range reads
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokens.size() < 2 || tokens.size() > 4) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
state int limit;
|
|
|
|
|
bool valid = true;
|
|
|
|
|
|
|
|
|
|
if (tokens.size() == 4) {
|
|
|
|
|
// INT_MAX is 10 digits; rather than
|
|
|
|
|
// worrying about overflow we'll just cap
|
|
|
|
|
// limit at the (already absurd)
|
|
|
|
|
// nearly-a-billion
|
|
|
|
|
if (tokens[3].size() > 9) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: bad limit\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
limit = 0;
|
|
|
|
|
int place = 1;
|
|
|
|
|
for (int i = tokens[3].size(); i > 0; i--) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
int val = int(tokens[3][i - 1]) - int('0');
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (val < 0 || val > 9) {
|
|
|
|
|
valid = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
limit += val * place;
|
|
|
|
|
place *= 10;
|
|
|
|
|
}
|
|
|
|
|
if (!valid) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: bad limit\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
limit = 25;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Standalone<StringRef> endKey;
|
|
|
|
|
if (tokens.size() >= 3) {
|
|
|
|
|
endKey = tokens[2];
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens[1].size() == 0) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
endKey = normalKeys.end;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens[1] == systemKeys.begin) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
endKey = systemKeys.end;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokens[1] >= allKeys.end) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
throw key_outside_legal_range();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
endKey = strinc(tokens[1]);
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-04 04:14:16 +08:00
|
|
|
|
RangeResult kvs = wait(makeInterruptable(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
getTransaction(db, tr, options, intrans)->getRange(KeyRangeRef(tokens[1], endKey), limit)));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
printf("\nRange limited to %d keys\n", limit);
|
|
|
|
|
for (auto iter = kvs.begin(); iter < kvs.end(); iter++) {
|
|
|
|
|
if (tokencmp(tokens[0], "getrangekeys"))
|
|
|
|
|
printf("`%s'\n", printable((*iter).key).c_str());
|
|
|
|
|
else
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf(
|
|
|
|
|
"`%s' is `%s'\n", printable((*iter).key).c_str(), printable((*iter).value).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
printf("\n");
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "writemode")) {
|
|
|
|
|
if (tokens.size() != 2) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokencmp(tokens[1], "on")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
writeMode = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokencmp(tokens[1], "off")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
writeMode = false;
|
|
|
|
|
} else {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "set")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!writeMode) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: writemode must be enabled to set or clear keys in the database.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokens.size() != 3) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
|
|
|
|
tr->set(tokens[1], tokens[2]);
|
|
|
|
|
|
|
|
|
|
if (!intrans) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(commitTransaction(tr));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "clear")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!writeMode) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: writemode must be enabled to set or clear keys in the database.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokens.size() != 2) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
|
|
|
|
tr->clear(tokens[1]);
|
|
|
|
|
|
|
|
|
|
if (!intrans) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(commitTransaction(tr));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tokencmp(tokens[0], "clearrange")) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!writeMode) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: writemode must be enabled to set or clear keys in the database.\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-05-27 05:51:34 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokens.size() != 3) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
|
|
|
|
getTransaction(db, tr, options, intrans);
|
|
|
|
|
tr->clear(KeyRangeRef(tokens[1], tokens[2]));
|
|
|
|
|
|
|
|
|
|
if (!intrans) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
wait(commitTransaction(tr));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-29 09:12:04 +08:00
|
|
|
|
if (tokencmp(tokens[0], "datadistribution")) {
|
2019-07-12 05:53:00 +08:00
|
|
|
|
if (tokens.size() != 2 && tokens.size() != 3) {
|
2019-08-22 05:44:15 +08:00
|
|
|
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
2019-07-25 06:32:52 +08:00
|
|
|
|
"<ssfailure|rebalance>>\n");
|
2017-07-29 09:12:04 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2019-08-22 05:44:15 +08:00
|
|
|
|
if (tokencmp(tokens[1], "on")) {
|
2019-02-13 08:07:17 +08:00
|
|
|
|
wait(success(setDDMode(db, 1)));
|
2019-07-12 05:53:00 +08:00
|
|
|
|
printf("Data distribution is turned on.\n");
|
|
|
|
|
} else if (tokencmp(tokens[1], "off")) {
|
2019-02-13 08:07:17 +08:00
|
|
|
|
wait(success(setDDMode(db, 0)));
|
2019-07-12 05:53:00 +08:00
|
|
|
|
printf("Data distribution is turned off.\n");
|
2019-07-10 07:09:51 +08:00
|
|
|
|
} else if (tokencmp(tokens[1], "disable")) {
|
|
|
|
|
if (tokencmp(tokens[2], "ssfailure")) {
|
2019-09-26 14:19:42 +08:00
|
|
|
|
wait(success(makeInterruptable(setHealthyZone(db, ignoreSSFailuresZoneString, 0))));
|
2019-07-12 05:53:00 +08:00
|
|
|
|
printf("Data distribution is disabled for storage server failures.\n");
|
2019-07-10 07:09:51 +08:00
|
|
|
|
} else if (tokencmp(tokens[2], "rebalance")) {
|
2019-07-12 05:53:00 +08:00
|
|
|
|
wait(makeInterruptable(setDDIgnoreRebalanceSwitch(db, true)));
|
|
|
|
|
printf("Data distribution is disabled for rebalance.\n");
|
2019-07-10 07:09:51 +08:00
|
|
|
|
} else {
|
2019-08-22 05:44:15 +08:00
|
|
|
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
2019-07-25 06:32:52 +08:00
|
|
|
|
"<ssfailure|rebalance>>\n");
|
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
} else if (tokencmp(tokens[1], "enable")) {
|
|
|
|
|
if (tokencmp(tokens[2], "ssfailure")) {
|
2019-09-26 14:19:42 +08:00
|
|
|
|
wait(success(makeInterruptable(clearHealthyZone(db, false, true))));
|
2019-07-25 06:32:52 +08:00
|
|
|
|
printf("Data distribution is enabled for storage server failures.\n");
|
|
|
|
|
} else if (tokencmp(tokens[2], "rebalance")) {
|
|
|
|
|
wait(makeInterruptable(setDDIgnoreRebalanceSwitch(db, false)));
|
|
|
|
|
printf("Data distribution is enabled for rebalance.\n");
|
|
|
|
|
} else {
|
2019-08-22 05:44:15 +08:00
|
|
|
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
2019-07-25 06:32:52 +08:00
|
|
|
|
"<ssfailure|rebalance>>\n");
|
2019-07-10 07:09:51 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
2017-07-29 09:12:04 +08:00
|
|
|
|
} else {
|
2019-08-22 05:44:15 +08:00
|
|
|
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
2019-07-25 06:32:52 +08:00
|
|
|
|
"<ssfailure|rebalance>>\n");
|
2017-07-29 09:12:04 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
if (tokencmp(tokens[0], "option")) {
|
|
|
|
|
if (tokens.size() == 2 || tokens.size() > 4) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens.size() == 1) {
|
|
|
|
|
if (options->hasAnyOptionsEnabled()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
printf("\nCurrently enabled options:\n\n");
|
|
|
|
|
options->print();
|
|
|
|
|
printf("\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "There are no options enabled\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
bool isOn;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokencmp(tokens[1], "on")) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
isOn = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else if (tokencmp(tokens[1], "off")) {
|
|
|
|
|
if (intrans) {
|
|
|
|
|
fprintf(
|
|
|
|
|
stderr,
|
|
|
|
|
"ERROR: Cannot turn option off when using a transaction created with `begin'\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokens.size() > 3) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Cannot specify option argument when turning option off\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
isOn = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"ERROR: Invalid option state `%s': option must be turned `on' or `off'\n",
|
|
|
|
|
formatStringRef(tokens[1]).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Optional<StringRef> arg = (tokens.size() > 3) ? tokens[3] : Optional<StringRef>();
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
options->setOption(tr, tokens[2], isOn, arg, intrans);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
printf("Option %s for %s\n",
|
|
|
|
|
isOn ? "enabled" : "disabled",
|
|
|
|
|
intrans ? "current transaction" : "all transactions");
|
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
// options->setOption() prints error message
|
2019-03-19 06:03:43 +08:00
|
|
|
|
TraceEvent(SevWarn, "CLISetOptionError").error(e).detail("Option", tokens[2]);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (tokencmp(tokens[0], "throttle")) {
|
2021-07-28 01:58:11 +08:00
|
|
|
|
bool _result = wait(throttleCommandActor(db2, tokens));
|
|
|
|
|
if (!_result)
|
2020-04-04 06:24:14 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-07-28 01:58:11 +08:00
|
|
|
|
|
2020-08-07 00:23:31 +08:00
|
|
|
|
if (tokencmp(tokens[0], "cache_range")) {
|
|
|
|
|
if (tokens.size() != 4) {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
KeyRangeRef cacheRange(tokens[2], tokens[3]);
|
|
|
|
|
if (tokencmp(tokens[1], "set")) {
|
|
|
|
|
wait(makeInterruptable(addCachedRange(db, cacheRange)));
|
|
|
|
|
} else if (tokencmp(tokens[1], "clear")) {
|
|
|
|
|
wait(makeInterruptable(removeCachedRange(db, cacheRange)));
|
|
|
|
|
} else {
|
|
|
|
|
printUsage(tokens[0]);
|
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: Unknown command `%s'. Try `help'?\n", formatStringRef(tokens[0]).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-04 09:01:55 +08:00
|
|
|
|
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line).detail("IsError", is_error);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (e.code() != error_code_actor_cancelled)
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
is_error = true;
|
2018-09-26 06:06:19 +08:00
|
|
|
|
if (intrans) {
|
2018-09-22 06:58:14 +08:00
|
|
|
|
printf("Rolling back current transaction\n");
|
|
|
|
|
intrans = false;
|
|
|
|
|
options = &globalOptions;
|
|
|
|
|
options->apply(tr);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (opt.exec.present()) {
|
|
|
|
|
return is_error ? 1 : 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ACTOR Future<int> runCli(CLIOptions opt) {
|
2017-05-27 05:51:34 +08:00
|
|
|
|
state LineNoise linenoise(
|
2021-03-11 02:06:03 +08:00
|
|
|
|
[](std::string const& line, std::vector<std::string>& completions) { fdbcliCompCmd(line, completions); },
|
|
|
|
|
[enabled = opt.cliHints](std::string const& line) -> LineNoise::Hint {
|
|
|
|
|
if (!enabled) {
|
|
|
|
|
return LineNoise::Hint();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool error = false;
|
|
|
|
|
bool partial = false;
|
|
|
|
|
std::string linecopy = line;
|
|
|
|
|
std::vector<std::vector<StringRef>> parsed = parseLine(linecopy, error, partial);
|
|
|
|
|
if (parsed.size() == 0 || parsed.back().size() == 0)
|
|
|
|
|
return LineNoise::Hint();
|
|
|
|
|
StringRef command = parsed.back().front();
|
|
|
|
|
int finishedParameters = parsed.back().size() + error;
|
|
|
|
|
|
|
|
|
|
// As a user is typing an escaped character, e.g. \", after the \ and before the " is typed
|
|
|
|
|
// the string will be a parse error. Ignore this parse error to avoid flipping the hint to
|
|
|
|
|
// {malformed escape sequence} and back to the original hint for the span of one character
|
|
|
|
|
// being entered.
|
|
|
|
|
if (error && line.back() != '\\')
|
|
|
|
|
return LineNoise::Hint(std::string(" {malformed escape sequence}"), 90, false);
|
|
|
|
|
|
|
|
|
|
bool inArgument = *(line.end() - 1) != ' ';
|
|
|
|
|
std::string hintLine = inArgument ? " " : "";
|
|
|
|
|
if (tokencmp(command, "throttle")) {
|
|
|
|
|
std::vector<const char*> hintItems = throttleHintGenerator(parsed.back(), inArgument);
|
|
|
|
|
if (hintItems.empty()) {
|
|
|
|
|
return LineNoise::Hint();
|
|
|
|
|
}
|
|
|
|
|
for (auto item : hintItems) {
|
|
|
|
|
hintLine = hintLine + item + " ";
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
auto iter = helpMap.find(command.toString());
|
|
|
|
|
if (iter != helpMap.end()) {
|
|
|
|
|
std::string helpLine = iter->second.usage;
|
|
|
|
|
std::vector<std::vector<StringRef>> parsedHelp = parseLine(helpLine, error, partial);
|
|
|
|
|
for (int i = finishedParameters; i < parsedHelp.back().size(); i++) {
|
|
|
|
|
hintLine = hintLine + parsedHelp.back()[i].toString() + " ";
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
return LineNoise::Hint();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return LineNoise::Hint(hintLine, 90, false);
|
|
|
|
|
},
|
|
|
|
|
1000,
|
|
|
|
|
false);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
state std::string historyFilename;
|
|
|
|
|
try {
|
|
|
|
|
historyFilename = joinPath(getUserHomeDirectory(), ".fdbcli_history");
|
|
|
|
|
linenoise.historyLoad(historyFilename);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
TraceEvent(SevWarnAlways, "ErrorLoadingCliHistory")
|
|
|
|
|
.error(e)
|
|
|
|
|
.detail("Filename", historyFilename.empty() ? "<unknown>" : historyFilename)
|
|
|
|
|
.GetLastError();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
state int result = wait(cli(opt, &linenoise));
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (!historyFilename.empty()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
linenoise.historySave(historyFilename);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
TraceEvent(SevWarnAlways, "ErrorSavingCliHistory")
|
|
|
|
|
.error(e)
|
|
|
|
|
.detail("Filename", historyFilename)
|
|
|
|
|
.GetLastError();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ACTOR Future<Void> timeExit(double duration) {
|
2018-08-11 04:57:10 +08:00
|
|
|
|
wait(delay(duration));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
fprintf(stderr, "Specified timeout reached -- exiting...\n");
|
|
|
|
|
return Void();
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
int main(int argc, char** argv) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
platformInit();
|
|
|
|
|
Error::init();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
std::set_new_handler(&platform::outOfMemory);
|
2017-09-30 01:35:40 +08:00
|
|
|
|
uint64_t memLimit = 8LL << 30;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
setMemoryQuota(memLimit);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
|
|
registerCrashHandler();
|
|
|
|
|
|
2021-07-17 15:11:40 +08:00
|
|
|
|
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
2021-06-03 01:04:46 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#ifdef __unixish__
|
|
|
|
|
struct sigaction act;
|
|
|
|
|
|
|
|
|
|
// We don't want ctrl-c to quit
|
2021-03-11 02:06:03 +08:00
|
|
|
|
sigemptyset(&act.sa_mask);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
act.sa_flags = 0;
|
|
|
|
|
act.sa_handler = SIG_IGN;
|
2020-08-19 05:30:20 +08:00
|
|
|
|
sigaction(SIGINT, &act, nullptr);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
CLIOptions opt(argc, argv);
|
|
|
|
|
if (opt.exit_code != -1)
|
|
|
|
|
return opt.exit_code;
|
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (opt.trace) {
|
|
|
|
|
if (opt.traceDir.empty())
|
2017-05-26 04:48:44 +08:00
|
|
|
|
setNetworkOption(FDBNetworkOptions::TRACE_ENABLE);
|
|
|
|
|
else
|
|
|
|
|
setNetworkOption(FDBNetworkOptions::TRACE_ENABLE, StringRef(opt.traceDir));
|
|
|
|
|
|
2019-03-23 01:35:05 +08:00
|
|
|
|
if (!opt.traceFormat.empty()) {
|
|
|
|
|
setNetworkOption(FDBNetworkOptions::TRACE_FORMAT, StringRef(opt.traceFormat));
|
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
setNetworkOption(FDBNetworkOptions::ENABLE_SLOW_TASK_PROFILING);
|
|
|
|
|
}
|
|
|
|
|
initHelp();
|
|
|
|
|
|
|
|
|
|
// deferred TLS options
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (opt.tlsCertPath.size()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
setNetworkOption(FDBNetworkOptions::TLS_CERT_PATH, opt.tlsCertPath);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
fprintf(stderr, "ERROR: cannot set TLS certificate path to `%s' (%s)\n", opt.tlsCertPath.c_str(), e.what());
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-06-21 00:21:23 +08:00
|
|
|
|
|
2018-05-09 07:28:13 +08:00
|
|
|
|
if (opt.tlsCAPath.size()) {
|
|
|
|
|
try {
|
|
|
|
|
setNetworkOption(FDBNetworkOptions::TLS_CA_PATH, opt.tlsCAPath);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
2018-05-09 07:28:13 +08:00
|
|
|
|
fprintf(stderr, "ERROR: cannot set TLS CA path to `%s' (%s)\n", opt.tlsCAPath.c_str(), e.what());
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (opt.tlsKeyPath.size()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
2018-05-09 11:46:31 +08:00
|
|
|
|
if (opt.tlsPassword.size())
|
2018-05-24 06:32:56 +08:00
|
|
|
|
setNetworkOption(FDBNetworkOptions::TLS_PASSWORD, opt.tlsPassword);
|
2018-05-09 11:46:31 +08:00
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
setNetworkOption(FDBNetworkOptions::TLS_KEY_PATH, opt.tlsKeyPath);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
fprintf(stderr, "ERROR: cannot set TLS key path to `%s' (%s)\n", opt.tlsKeyPath.c_str(), e.what());
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (opt.tlsVerifyPeers.size()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
|
|
|
|
setNetworkOption(FDBNetworkOptions::TLS_VERIFY_PEERS, opt.tlsVerifyPeers);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
|
|
|
|
fprintf(
|
|
|
|
|
stderr, "ERROR: cannot set TLS peer verification to `%s' (%s)\n", opt.tlsVerifyPeers.c_str(), e.what());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
setNetworkOption(FDBNetworkOptions::DISABLE_CLIENT_STATISTICS_LOGGING);
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
fprintf(stderr, "ERROR: cannot disable logging client related information (%s)\n", e.what());
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-14 06:46:03 +08:00
|
|
|
|
if (opt.debugTLS) {
|
|
|
|
|
#ifndef TLS_DISABLED
|
|
|
|
|
// Backdoor into NativeAPI's tlsConfig, which is where the above network option settings ended up.
|
|
|
|
|
extern TLSConfig tlsConfig;
|
|
|
|
|
printf("TLS Configuration:\n");
|
|
|
|
|
printf("\tCertificate Path: %s\n", tlsConfig.getCertificatePathSync().c_str());
|
|
|
|
|
printf("\tKey Path: %s\n", tlsConfig.getKeyPathSync().c_str());
|
|
|
|
|
printf("\tCA Path: %s\n", tlsConfig.getCAPathSync().c_str());
|
|
|
|
|
try {
|
|
|
|
|
LoadedTLSConfig loaded = tlsConfig.loadSync();
|
|
|
|
|
printf("\tPassword: %s\n", loaded.getPassword().empty() ? "Not configured" : "Exists, but redacted");
|
|
|
|
|
printf("\n");
|
|
|
|
|
loaded.print(stdout);
|
|
|
|
|
} catch (Error& e) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code());
|
2020-03-14 06:46:03 +08:00
|
|
|
|
printf("Use --log and look at the trace logs for more detailed information on the failure.\n");
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
printf("This fdbcli was built with TLS disabled.\n");
|
|
|
|
|
#endif
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
|
try {
|
2021-04-14 04:42:19 +08:00
|
|
|
|
// Note: refactoring fdbcli, in progress
|
|
|
|
|
API->selectApiVersion(FDB_API_VERSION);
|
2021-03-25 00:33:20 +08:00
|
|
|
|
API->setupNetwork();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
Future<int> cliFuture = runCli(opt);
|
|
|
|
|
Future<Void> timeoutFuture = opt.exit_timeout ? timeExit(opt.exit_timeout) : Never();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
auto f = stopNetworkAfter(success(cliFuture) || timeoutFuture);
|
2021-08-19 09:09:02 +08:00
|
|
|
|
API->runNetwork();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
|
if (cliFuture.isReady()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return cliFuture.get();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
} catch (Error& e) {
|
2021-02-25 19:09:02 +08:00
|
|
|
|
fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|