Merge branch 'master' of github.com:apple/foundationdb into feature-redwood

# Conflicts:
#	fdbrpc/AsyncFileCached.actor.h
#	fdbserver/IKeyValueStore.h
#	fdbserver/KeyValueStoreMemory.actor.cpp
#	fdbserver/workloads/StatusWorkload.actor.cpp
#	tests/fast/SidebandWithStatus.txt
#	tests/rare/LargeApiCorrectnessStatus.txt
#	tests/slow/DDBalanceAndRemoveStatus.txt
This commit is contained in:
Stephen Atherton 2018-09-20 03:39:55 -07:00
commit 2fc86c5ff3
273 changed files with 6933 additions and 4461 deletions

2
.gitignore vendored
View File

@ -42,6 +42,7 @@ packaging/msi/FDBInstaller.wix*
.ccache
.deps/
.objs/
.cmds/
bindings/c/fdb_c.symbols
bindings/go/build
bindings/go/godoc
@ -71,6 +72,7 @@ FoundationDB.xcodeproj
foundationdb.VC.db
foundationdb.VC.VC.opendb
ipch/
compile_commands.json
# Temporary and user configuration files
*~

View File

@ -85,12 +85,12 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy, bool is_cl
throw std::runtime_error("FDBLibTLSServerError");
}
if (tls_configure(tls_sctx, policy->tls_cfg) == -1) {
TraceEvent(SevError, "FDBLibTLSConfigureError", uid).detail("LibTLSErrorMessage", tls_error(tls_ctx));
TraceEvent(SevError, "FDBLibTLSConfigureError", uid).detail("LibTLSErrorMessage", tls_error(tls_sctx));
tls_free(tls_sctx);
throw std::runtime_error("FDBLibTLSConfigureError");
}
if (tls_accept_cbs(tls_sctx, &tls_ctx, tls_read_func, tls_write_func, this) == -1) {
TraceEvent(SevError, "FDBLibTLSAcceptError", uid).detail("LibTLSErrorMessage", tls_error(tls_ctx));
TraceEvent(SevError, "FDBLibTLSAcceptError", uid).detail("LibTLSErrorMessage", tls_error(tls_sctx));
tls_free(tls_sctx);
throw std::runtime_error("FDBLibTLSAcceptError");
}
@ -369,7 +369,7 @@ int FDBLibTLSSession::handshake() {
case TLS_WANT_POLLOUT:
return WANT_WRITE;
default:
TraceEvent("FDBLibTLSHandshakeError", uid).detail("LibTLSErrorMessage", tls_error(tls_ctx)).suppressFor(1.0, true);
TraceEvent("FDBLibTLSHandshakeError", uid).suppressFor(1.0).detail("LibTLSErrorMessage", tls_error(tls_ctx));
return FAILED;
}
}
@ -389,7 +389,7 @@ int FDBLibTLSSession::read(uint8_t* data, int length) {
return (int)n;
}
if (n == 0) {
TraceEvent("FDBLibTLSReadEOF").suppressFor(1.0, true);
TraceEvent("FDBLibTLSReadEOF").suppressFor(1.0);
return FAILED;
}
if (n == TLS_WANT_POLLIN)
@ -397,7 +397,7 @@ int FDBLibTLSSession::read(uint8_t* data, int length) {
if (n == TLS_WANT_POLLOUT)
return WANT_WRITE;
TraceEvent("FDBLibTLSReadError", uid).detail("LibTLSErrorMessage", tls_error(tls_ctx)).suppressFor(1.0, true);
TraceEvent("FDBLibTLSReadError", uid).suppressFor(1.0).detail("LibTLSErrorMessage", tls_error(tls_ctx));
return FAILED;
}
@ -416,7 +416,7 @@ int FDBLibTLSSession::write(const uint8_t* data, int length) {
return (int)n;
}
if (n == 0) {
TraceEvent("FDBLibTLSWriteEOF", uid).suppressFor(1.0, true);
TraceEvent("FDBLibTLSWriteEOF", uid).suppressFor(1.0);
return FAILED;
}
if (n == TLS_WANT_POLLIN)
@ -424,6 +424,6 @@ int FDBLibTLSSession::write(const uint8_t* data, int length) {
if (n == TLS_WANT_POLLOUT)
return WANT_WRITE;
TraceEvent("FDBLibTLSWriteError", uid).detail("LibTLSErrorMessage", tls_error(tls_ctx)).suppressFor(1.0, true);
TraceEvent("FDBLibTLSWriteError", uid).suppressFor(1.0).detail("LibTLSErrorMessage", tls_error(tls_ctx));
return FAILED;
}

View File

@ -108,7 +108,10 @@ STATIC_LIBS :=
VPATH += $(addprefix :,$(filter-out lib,$(patsubst -L%,%,$(filter -L%,$(LDFLAGS)))))
CS_PROJECTS := flow/actorcompiler flow/coveragetool fdbclient/vexillographer
CPP_PROJECTS := flow fdbrpc fdbclient fdbbackup fdbserver fdbcli bindings/c bindings/java fdbmonitor bindings/flow/tester bindings/flow FDBLibTLS
CPP_PROJECTS := flow fdbrpc fdbclient fdbbackup fdbserver fdbcli bindings/c bindings/java fdbmonitor bindings/flow/tester bindings/flow
ifndef TLS_DISABLED
CPP_PROJECTS += FDBLibTLS
endif
OTHER_PROJECTS := bindings/python bindings/ruby bindings/go
CS_MK_GENERATED := $(CS_PROJECTS:=/generated.mk)
@ -143,7 +146,7 @@ else
endif
@echo "#define FDB_VT_PACKAGE_NAME \"$(PACKAGE_NAME)\"" >> $@
bindings: fdb_c fdb_python fdb_ruby fdb_java fdb_flow fdb_flow_tester fdb_go fdb_go_tester
bindings: fdb_c fdb_python fdb_ruby fdb_java fdb_flow fdb_flow_tester fdb_go fdb_go_tester fdb_c_tests
Makefiles: $(MK_GENERATED)
@ -157,6 +160,11 @@ $(CPP_MK_GENERATED): build/vcxprojtom4.py build/vcxproj.mk Makefile
DEPSDIR := .deps
OBJDIR := .objs
CMDDIR := .cmds
COMPILE_COMMANDS_JSONS := $(addprefix $(CMDDIR)/,$(addsuffix /compile_commands.json,${CPP_PROJECTS}))
compile_commands.json: build/concatinate_jsons.py ${COMPILE_COMMANDS_JSONS}
@build/concatinate_jsons.py ${COMPILE_COMMANDS_JSONS}
include $(MK_INCLUDE)
@ -166,6 +174,7 @@ clean: $(CLEAN_TARGETS) docpreview_clean
@rm -rf $(DEPSDIR)
@rm -rf lib/
@rm -rf bin/coverage.*.xml
@rm -rf $(CMDDIR) compile_commands.json
@find . -name "*.g.cpp" -exec rm -f {} \; -or -name "*.g.h" -exec rm -f {} \;
targets:
@ -192,13 +201,16 @@ lib/libstdc++.a: $(shell $(CC) -print-file-name=libstdc++_pic.a)
@rm -r .libstdc++
docpreview: javadoc
TARGETS= $(MAKE) -C documentation docpreview
@echo "Generating docpreview"
@TARGETS= $(MAKE) -C documentation docpreview
docpreview_clean:
CLEAN_TARGETS= $(MAKE) -C documentation docpreview_clean
@echo "Cleaning docpreview"
@CLEAN_TARGETS= $(MAKE) -C documentation -s --no-print-directory docpreview_clean
packages/foundationdb-docs-$(VERSION).tar.gz: FORCE javadoc
TARGETS= $(MAKE) -C documentation docpackage
@echo "Packaging documentation"
@TARGETS= $(MAKE) -C documentation docpackage
@mkdir -p packages
@rm -f packages/foundationdb-docs-$(VERSION).tar.gz
@cp documentation/sphinx/.dist/foundationdb-docs-$(VERSION).tar.gz packages/foundationdb-docs-$(VERSION).tar.gz

View File

@ -114,7 +114,7 @@ class DirectoryTest(Test):
instructions.push_args(layer)
instructions.push_args(*test_util.with_length(path))
instructions.append('DIRECTORY_OPEN')
self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False)))
self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer=='partition'))))
# print('%d. Selected %s, dir=%s, dir_id=%s, has_known_prefix=%s, dir_list_len=%d' \
# % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), self.dir_list[-1].dir_id, False, len(self.dir_list)-1))
@ -378,7 +378,7 @@ class DirectoryTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021]),
ResultSpecification(self.directory_log, ordering_index=0),
ResultSpecification(self.subspace_log, ordering_index=0)
]

View File

@ -315,10 +315,13 @@ void fdb_cluster_destroy( FDBCluster* c ) {
extern "C" DLLEXPORT
FDBFuture* fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
int db_name_length ) {
return (FDBFuture*)
( CLUSTER(c)->createDatabase( StringRef( db_name,
db_name_length ) ).extractPtr() );
int db_name_length )
{
if(strncmp((const char*)db_name, "DB", db_name_length) != 0) {
return (FDBFuture*)ThreadFuture<Reference<IDatabase>>(invalid_database_name()).extractPtr();
}
return (FDBFuture*)CLUSTER(c)->createDatabase().extractPtr();
}
extern "C" DLLEXPORT

View File

@ -216,7 +216,7 @@ namespace FDB {
throw directory_does_not_exist();
}
Void _ = wait(dirLayer->checkVersion(tr, true));
wait(dirLayer->checkVersion(tr, true));
state Standalone<StringRef> newPrefix = wait(getPrefix(dirLayer, tr, prefix));
bool isFree = wait(isPrefixFree(dirLayer, tr, newPrefix, !prefix.present()));
@ -238,7 +238,7 @@ namespace FDB {
Standalone<StringRef> layer, Optional<Standalone<StringRef>> prefix, bool allowCreate, bool allowOpen)
{
ASSERT(!prefix.present() || allowCreate);
Void _ = wait(dirLayer->checkVersion(tr, false));
wait(dirLayer->checkVersion(tr, false));
if(prefix.present() && !dirLayer->allowManualPrefixes) {
if(!dirLayer->getPath().size()) {
@ -287,7 +287,7 @@ namespace FDB {
}
ACTOR Future<Standalone<VectorRef<StringRef>>> listInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
Void _ = wait(dirLayer->checkVersion(tr, false));
wait(dirLayer->checkVersion(tr, false));
state DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
@ -346,7 +346,7 @@ namespace FDB {
}
ACTOR Future<Reference<DirectorySubspace>> moveInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path oldPath, IDirectory::Path newPath) {
Void _ = wait(dirLayer->checkVersion(tr, true));
wait(dirLayer->checkVersion(tr, true));
if(oldPath.size() <= newPath.size()) {
if(pathsEqual(oldPath, newPath, oldPath.size())) {
@ -386,7 +386,7 @@ namespace FDB {
}
tr->set(parentNode.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(newPath.back(), true).key(), dirLayer->nodeSubspace.unpack(oldNode.subspace.get().key()).getString(0));
Void _ = wait(removeFromParent(dirLayer, tr, oldPath));
wait(removeFromParent(dirLayer, tr, oldPath));
return dirLayer->contentsOfNode(oldNode.subspace.get(), newPath, oldNode.layer);
}
@ -420,7 +420,7 @@ namespace FDB {
}
// waits are done concurrently
Void _ = wait(waitForAll(futures));
wait(waitForAll(futures));
Standalone<StringRef> nodePrefix = dirLayer->nodeSubspace.unpack(nodeSub.key()).getString(0);
@ -432,7 +432,7 @@ namespace FDB {
Future<bool> removeInternal(Reference<DirectoryLayer> const&, Reference<Transaction> const&, IDirectory::Path const&, bool const&);
ACTOR Future<bool> removeInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path, bool failOnNonexistent) {
Void _ = wait(dirLayer->checkVersion(tr, true));
wait(dirLayer->checkVersion(tr, true));
if(path.empty()) {
throw cannot_modify_root_directory();
@ -459,7 +459,7 @@ namespace FDB {
futures.push_back(removeRecursive(dirLayer, tr, node.subspace.get()));
futures.push_back(removeFromParent(dirLayer, tr, path));
Void _ = wait(waitForAll(futures));
wait(waitForAll(futures));
return true;
}
@ -473,7 +473,7 @@ namespace FDB {
}
ACTOR Future<bool> existsInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
Void _ = wait(dirLayer->checkVersion(tr, false));
wait(dirLayer->checkVersion(tr, false));
DirectoryLayer::Node node = wait(find(dirLayer, tr, path));

View File

@ -75,7 +75,7 @@ namespace FDB {
tr->set(recent.get(candidate).key(), ValueRef());
// }
Void _ = wait(success(latestCounter) && success(candidateValue));
wait(success(latestCounter) && success(candidateValue));
int64_t currentWindowStart = 0;
if(latestCounter.get().size() > 0) {
currentWindowStart = counters.unpack(latestCounter.get()[0].key).getInt(0);

View File

@ -56,7 +56,7 @@ ACTOR Future<Void> _test() {
for ( i = 0; i < 100000; i++ ) {
Version v = wait( versions[i] );
}
// Void _ = wait( waitForAllReady( versions ) );
// wait( waitForAllReady( versions ) );
printf("Elapsed: %lf\n", timer_monotonic() - starttime );
tr->set( LiteralStringRef("foo"), LiteralStringRef("bar") );
@ -124,7 +124,7 @@ namespace FDB {
Future<Void> onReady = ready.getFuture();
throw_on_error( fdb_future_set_callback( f->f, backToFutureCallback, ready.extractRawPointer() ) );
Void _ = wait( onReady );
wait( onReady );
return convertValue( f );
}

View File

@ -305,7 +305,7 @@ struct DirectoryRemoveFunc : InstructionFunc {
if(count.getInt(0) == 0) {
logOp(format("remove %s", pathToString(directory->getPath()).c_str()));
Void _ = wait(executeMutation(instruction, [this] () {
wait(executeMutation(instruction, [this] () {
return directory->remove(instruction->tr);
}));
}
@ -313,7 +313,7 @@ struct DirectoryRemoveFunc : InstructionFunc {
IDirectory::Path path = wait(popPath(data));
logOp(format("remove %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
Void _ = wait(executeMutation(instruction, [this, path] () {
wait(executeMutation(instruction, [this, path] () {
return directory->remove(instruction->tr, path);
}));
}

View File

@ -327,8 +327,8 @@ struct EmptyStackFunc : InstructionFunc {
static const char* name;
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
//Void _ = wait(printFlowTesterStack(&(data->stack)));
//Void _ = wait(debugPrintRange(instruction->tr, "\x01test_results", ""));
//wait(printFlowTesterStack(&(data->stack)));
//wait(debugPrintRange(instruction->tr, "\x01test_results", ""));
data->stack.clear();
return Void();
}
@ -340,7 +340,7 @@ struct SwapFunc : InstructionFunc {
static const char* name;
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
Void _ = wait(stackSwap(&(data->stack)));
wait(stackSwap(&(data->stack)));
return Void();
}
};
@ -365,7 +365,7 @@ struct SubFunc : InstructionFunc {
static const char* name;
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
Void _ = wait(stackSub(&(data->stack)));
wait(stackSub(&(data->stack)));
return Void();
}
};
@ -376,7 +376,7 @@ struct ConcatFunc : InstructionFunc {
static const char* name;
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
Void _ = wait(stackConcat(&(data->stack)));
wait(stackConcat(&(data->stack)));
return Void();
}
};
@ -399,11 +399,11 @@ struct LogStackFunc : InstructionFunc {
tr->set(pk, pv.substr(0, std::min(pv.size(), 40000)));
}
Void _ = wait(tr->commit());
wait(tr->commit());
return Void();
}
catch(Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -422,11 +422,11 @@ struct LogStackFunc : InstructionFunc {
ASSERT(it.size() == 1);
entries[data->stack.data.size()] = it.front();
if(entries.size() == 100) {
Void _ = wait(logStack(data, entries, prefix));
wait(logStack(data, entries, prefix));
entries.clear();
}
Void _ = wait(logStack(data, entries, prefix));
wait(logStack(data, entries, prefix));
}
return Void();
@ -440,7 +440,7 @@ REGISTER_INSTRUCTION_FUNC(LogStackFunc);
//
ACTOR Future<Standalone<StringRef>> waitForVoid(Future<Void> f) {
try{
Void _ = wait(f);
wait(f);
Tuple t;
t.append(LiteralStringRef("RESULT_NOT_PRESENT"));
return t.pack();
@ -605,7 +605,7 @@ struct SetFunc : InstructionFunc {
data->stack.push(waitForVoid(mutation));
}
else {
Void _ = wait(mutation);
wait(mutation);
}
return Void();
@ -765,7 +765,7 @@ struct ClearFunc : InstructionFunc {
data->stack.push(waitForVoid(mutation));
}
else {
Void _ = wait(mutation);
wait(mutation);
}
return Void();
@ -902,7 +902,7 @@ struct ClearRangeFunc : InstructionFunc {
data->stack.push(waitForVoid(mutation));
}
else {
Void _ = wait(mutation);
wait(mutation);
}
return Void();
@ -933,7 +933,7 @@ struct ClearRangeStartWithFunc : InstructionFunc {
data->stack.push(waitForVoid(mutation));
}
else {
Void _ = wait(mutation);
wait(mutation);
}
return Void();
@ -1346,7 +1346,7 @@ struct WaitEmptyFunc : InstructionFunc {
break;
}
catch(Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
@ -1493,7 +1493,7 @@ struct AtomicOPFunc : InstructionFunc {
data->stack.push(waitForVoid(mutation));
}
else {
Void _ = wait(mutation);
wait(mutation);
}
return Void();
@ -1572,7 +1572,7 @@ ACTOR static Future<Void> getInstructions(Reference<FlowTesterData> data, String
return Void();
}
catch(Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -1605,8 +1605,8 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
// idx, data->instructions.size(), printable(StringRef(data->instructions[idx].key)).c_str(), printable(StringRef(data->instructions[idx].value)).c_str(),
// isDatabase, isSnapshot, data->stack.data.size());
//Void _ = wait(printFlowTesterStack(&(data->stack)));
//Void _ = wait(debugPrintRange(instruction->tr, "\x01test_results", ""));
//wait(printFlowTesterStack(&(data->stack)));
//wait(debugPrintRange(instruction->tr, "\x01test_results", ""));
state Reference<InstructionData> instruction = Reference<InstructionData>(new InstructionData(isDatabase, isSnapshot, data->instructions[idx].value, Reference<Transaction>()));
if (isDatabase) {
@ -1621,7 +1621,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
ASSERT(!isDirectory || !isSnapshot);
data->stack.index = idx;
Void _ = wait(InstructionFunc::call(op.toString(), data, instruction));
wait(InstructionFunc::call(op.toString(), data, instruction));
}
catch (Error& e) {
if(LOG_ERRORS) {
@ -1648,9 +1648,9 @@ ACTOR static Future<Void> runTest(Reference<FlowTesterData> data, Reference<Data
ASSERT(data);
try {
data->db = db;
Void _ = wait(getInstructions(data, prefix));
Void _ = wait(doInstructions(data));
Void _ = wait(waitForAll(data->subThreads));
wait(getInstructions(data, prefix));
wait(doInstructions(data));
wait(waitForAll(data->subThreads));
}
catch (Error& e) {
TraceEvent(SevError, "FlowTesterDataRunError").error(e);
@ -1718,7 +1718,7 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
Reference<DatabaseContext> db = cluster->createDatabase();
Reference<FlowTesterData> data = Reference<FlowTesterData>(new FlowTesterData(fdb));
Void _ = wait(runTest(data, db, prefix));
wait(runTest(data, db, prefix));
// Stopping the network returns from g_network->run() and allows
// the program to terminate
@ -1752,7 +1752,7 @@ ACTOR void _test_versionstamp() {
tr->atomicOp(LiteralStringRef("foo"), LiteralStringRef("blahblahbl\x00\x00\x00\x00"), FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
Void _ = wait(tr->commit()); // should use retry loop
wait(tr->commit()); // should use retry loop
tr->reset();

View File

@ -32,6 +32,7 @@
#include "bindings/flow/IDirectory.h"
#include "bindings/flow/Subspace.h"
#include "bindings/flow/DirectoryLayer.h"
#include "flow/actorcompiler.h" // This must be the last #include.
#define LOG_ALL 0
#define LOG_INSTRUCTIONS LOG_ALL || 0
@ -224,13 +225,13 @@ Future<decltype(fake<F>()().getValue())> executeMutation(Reference<InstructionDa
try {
state decltype(fake<F>()().getValue()) result = wait(func());
if(instruction->isDatabase) {
Void _ = wait(instruction->tr->commit());
wait(instruction->tr->commit());
}
return result;
}
catch(Error &e) {
if(instruction->isDatabase) {
Void _ = wait(instruction->tr->onError(e));
wait(instruction->tr->onError(e));
}
else {
throw;
@ -239,4 +240,5 @@ Future<decltype(fake<F>()().getValue())> executeMutation(Reference<InstructionDa
}
}
#include "flow/unactorcompiler.h"
#endif

View File

@ -6,24 +6,25 @@ fdb-go
This package requires:
- Go 1.1+ with CGO enabled
- [Mono](http://www.mono-project.com/) (macOS or Windows) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
- FoundationDB C API 2.0.x, 3.0.x, or 4.x.y (part of the [FoundationDB clients package](https://apple.github.io/foundationdb/downloads.html#c))
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
- FoundationDB C API 2.0.x-6.0.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-600.
To build this package, in the top level of this repository run:
To install this package, you can run the "fdb-go-install.sh" script (for versions 5.0.x and greater):
./fdb-go-install.sh install --fdbver <x.y.z>
The "install" command of this script does not depend on the presence of the repo in general and will download the repository into
your local go path. Running "localinstall" instead of "install" will use the local copy here (with a symlink) instead
of downloading from the remote repository.
You can also build this package, in the top level of this repository run:
make fdb_go
This will create binary packages for the appropriate platform within the "build" subdirectory of this folder.
To install this package, you can run the "fdb-go-install.sh" script:
./fdb-go-install.sh install
The "install" command of this script does not depend on the presence of the repo in general and will download the repository into
your local go path. Running "localinstall" instead of "install" will use the local copy here (with a symlink) instead
of downloading from the remote repository.
Documentation
-------------

View File

@ -12,7 +12,7 @@
#
DESTDIR="${DESTDIR:-}"
FDBVER="${FDBVER:-5.1.0}"
FDBVER="${FDBVER:-}"
REMOTE="${REMOTE:-github.com}"
FDBREPO="${FDBREPO:-apple/foundationdb}"
@ -68,10 +68,11 @@ function printUsage() {
echo " help Print this help message and then quit"
echo
echo "Command Line Options:"
echo " --fdbver <version> FoundationDB semantic version (default is ${FDBVER})"
echo " --fdbver <version> FoundationDB semantic version to install or download (required if FDBVER environment variable is not set)"
echo " -d/--dest-dir <dest> Local location for the repo (default is to place in go path)"
echo
echo "Environment Variable Options:"
echo " FDBVER Default FoundationDB semantic version to use if --fdbver flag is not set"
echo " REMOTE Remote repository to download from (currently ${REMOTE})"
echo " FDBREPO Repository of FoundationDB library to download (currently ${FDBREPO})"
echo " FDBLIBDIR Directory within which should be the FoundationDB c library (currently ${FDBLIBDIR})"
@ -126,6 +127,12 @@ function parseArgs() {
shift
done
if [[ -z "${FDBVER}" ]] ; then
echo "No FoundationDB version specified!"
echo "Please supply a version by setting the --fdbver flag or the FDBVER environment variable."
let status="${status} + 1"
fi
return "${status}"
}
@ -210,18 +217,18 @@ else
if [[ -d "${fdbdir}" ]] ; then
echo "Directory ${fdbdir} already exists ; checking out appropriate tag"
cmd1=( 'git' '-C' "${fdbdir}" 'fetch' 'origin' )
cmd2=( 'git' '-C' "${fdbdir}" 'checkout' "release-${FDBVER}" )
cmd2=( 'git' '-C' "${fdbdir}" 'checkout' "${FDBVER}" )
if ! echo "${cmd1[*]}" || ! "${cmd1[@]}" ; then
let status="${status} + 1"
echo "Could not pull latest changes from origin"
elif ! echo "${cmd2[*]}" || ! "${cmd2[@]}" ; then
let status="${status} + 1"
echo "Could not checkout tag release-${FDBVER}."
echo "Could not checkout tag ${FDBVER}."
fi
else
echo "Downloading foundation repository into ${destdir}:"
cmd=( 'git' '-C' "${destdir}" 'clone' '--branch' "release-${FDBVER}" "https://${REMOTE}/${FDBREPO}.git" )
cmd=( 'git' '-C' "${destdir}" 'clone' '--branch' "${FDBVER}" "https://${REMOTE}/${FDBREPO}.git" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then

View File

@ -30,8 +30,6 @@ import (
"log"
"os"
"strings"
"unicode"
"unicode/utf8"
)
type Option struct {
@ -114,23 +112,14 @@ func translateName(old string) string {
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
}
func lowerFirst(s string) string {
if s == "" {
return ""
}
r, n := utf8.DecodeRuneInString(s)
return string(unicode.ToLower(r)) + s[n:]
}
func writeMutation(opt Option) {
desc := lowerFirst(opt.Description)
tname := translateName(opt.Name)
fmt.Printf(`
// %s %s
// %s
func (t Transaction) %s(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, %d)
}
`, tname, desc, tname, opt.Code)
`, opt.Description, tname, opt.Code)
}
func writeEnum(scope Scope, opt Option, delta int) {
@ -207,7 +196,7 @@ func int64ToBytes(i int64) ([]byte, error) {
receiver := scope.Name + "s"
for _, opt := range scope.Option {
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
if !opt.Hidden {
writeOpt(receiver, opt)
}
}
@ -216,7 +205,7 @@ func int64ToBytes(i int64) ([]byte, error) {
if scope.Name == "MutationType" {
for _, opt := range scope.Option {
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
if !opt.Hidden {
writeMutation(opt)
}
}

View File

@ -192,12 +192,17 @@ var apiVersion int
var networkStarted bool
var networkMutex sync.Mutex
type DatabaseId struct {
clusterFile string
dbName string
}
var openClusters map[string]Cluster
var openDatabases map[string]Database
var openDatabases map[DatabaseId]Database
func init() {
openClusters = make(map[string]Cluster)
openDatabases = make(map[string]Database)
openDatabases = make(map[DatabaseId]Database)
}
func startNetwork() error {
@ -287,13 +292,13 @@ func Open(clusterFile string, dbName []byte) (Database, error) {
openClusters[clusterFile] = cluster
}
db, ok := openDatabases[string(dbName)]
db, ok := openDatabases[DatabaseId{clusterFile, string(dbName)}]
if !ok {
db, e = cluster.OpenDatabase(dbName)
if e != nil {
return Database{}, e
}
openDatabases[string(dbName)] = db
openDatabases[DatabaseId{clusterFile, string(dbName)}] = db
}
return db, nil

View File

@ -42,6 +42,20 @@ func int64ToBytes(i int64) ([]byte, error) {
return buf.Bytes(), nil
}
// Deprecated
//
// Parameter: IP:PORT
func (o NetworkOptions) SetLocalAddress(param string) error {
return o.setOpt(10, []byte(param))
}
// Deprecated
//
// Parameter: path to cluster file
func (o NetworkOptions) SetClusterFile(param string) error {
return o.setOpt(20, []byte(param))
}
// Enables trace output to a file in a directory of the clients choosing
//
// Parameter: path to output directory (or NULL for current working directory)
@ -85,7 +99,7 @@ func (o NetworkOptions) SetKnob(param string) error {
return o.setOpt(40, []byte(param))
}
// Set the TLS plugin to load. This option, if used, must be set before any other TLS options
// Deprecated
//
// Parameter: file path or linker-resolved name
func (o NetworkOptions) SetTLSPlugin(param string) error {
@ -280,6 +294,11 @@ func (o TransactionOptions) SetReadYourWritesDisable() error {
return o.setOpt(51, nil)
}
// Deprecated
func (o TransactionOptions) SetReadAheadDisable() error {
return o.setOpt(52, nil)
}
// Not yet implemented.
func (o TransactionOptions) SetDurabilityDatacenter() error {
return o.setOpt(110, nil)
@ -290,6 +309,11 @@ func (o TransactionOptions) SetDurabilityRisky() error {
return o.setOpt(120, nil)
}
// Deprecated
func (o TransactionOptions) SetDurabilityDevNullIsWebScale() error {
return o.setOpt(130, nil)
}
// Specifies that this transaction should be treated as highest priority and that lower priority transactions should block behind this one. Use is discouraged outside of low-level tools
func (o TransactionOptions) SetPrioritySystemImmediate() error {
return o.setOpt(200, nil)
@ -431,57 +455,72 @@ const (
StreamingModeSerial StreamingMode = 5
)
// Add performs an addition of little-endian integers. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The integers to be added must be stored in a little-endian representation. They can be signed in two's complement representation or unsigned. You can add to an integer at a known offset in the value by prepending the appropriate number of zero bytes to ``param`` and padding with zero bytes to match the length of the value. However, this offset technique requires that you know the addition will not cause the integer field within the value to overflow.
// Performs an addition of little-endian integers. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The integers to be added must be stored in a little-endian representation. They can be signed in two's complement representation or unsigned. You can add to an integer at a known offset in the value by prepending the appropriate number of zero bytes to ``param`` and padding with zero bytes to match the length of the value. However, this offset technique requires that you know the addition will not cause the integer field within the value to overflow.
func (t Transaction) Add(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 2)
}
// BitAnd performs a bitwise ``and`` operation. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
// Deprecated
func (t Transaction) And(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 6)
}
// Performs a bitwise ``and`` operation. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
func (t Transaction) BitAnd(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 6)
}
// BitOr performs a bitwise ``or`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
// Deprecated
func (t Transaction) Or(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 7)
}
// Performs a bitwise ``or`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
func (t Transaction) BitOr(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 7)
}
// BitXor performs a bitwise ``xor`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
// Deprecated
func (t Transaction) Xor(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 8)
}
// Performs a bitwise ``xor`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
func (t Transaction) BitXor(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 8)
}
// AppendIfFits appends ``param`` to the end of the existing value already in the database at the given key (or creates the key and sets the value to ``param`` if the key is empty). This will only append the value if the final concatenated value size is less than or equal to the maximum value size (i.e., if it fits). WARNING: No error is surfaced back to the user if the final value is too large because the mutation will not be applied until after the transaction has been committed. Therefore, it is only safe to use this mutation type if one can guarantee that one will keep the total value size under the maximum size.
// Appends ``param`` to the end of the existing value already in the database at the given key (or creates the key and sets the value to ``param`` if the key is empty). This will only append the value if the final concatenated value size is less than or equal to the maximum value size (i.e., if it fits). WARNING: No error is surfaced back to the user if the final value is too large because the mutation will not be applied until after the transaction has been committed. Therefore, it is only safe to use this mutation type if one can guarantee that one will keep the total value size under the maximum size.
func (t Transaction) AppendIfFits(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 9)
}
// Max performs a little-endian comparison of byte strings. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The larger of the two values is then stored in the database.
// Performs a little-endian comparison of byte strings. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The larger of the two values is then stored in the database.
func (t Transaction) Max(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 12)
}
// Min performs a little-endian comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The smaller of the two values is then stored in the database.
// Performs a little-endian comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The smaller of the two values is then stored in the database.
func (t Transaction) Min(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 13)
}
// SetVersionstampedKey transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes.
// Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes.
func (t Transaction) SetVersionstampedKey(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 14)
}
// SetVersionstampedValue transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset.
// Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset.
func (t Transaction) SetVersionstampedValue(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 15)
}
// ByteMin performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the smaller of the two values is then stored in the database.
// Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the smaller of the two values is then stored in the database.
func (t Transaction) ByteMin(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 16)
}
// ByteMax performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the larger of the two values is then stored in the database.
// Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the larger of the two values is then stored in the database.
func (t Transaction) ByteMax(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 17)
}

View File

@ -45,7 +45,7 @@ class FDBDatabase extends NativeObjectWrapper implements Database, OptionConsume
@Override
public <T> T run(Function<? super Transaction, T> retryable, Executor e) {
Transaction t = this.createTransaction();
Transaction t = this.createTransaction(e);
try {
while (true) {
try {
@ -63,7 +63,7 @@ class FDBDatabase extends NativeObjectWrapper implements Database, OptionConsume
@Override
public <T> T read(Function<? super ReadTransaction, T> retryable, Executor e) {
return this.run(retryable);
return this.run(retryable, e);
}
@Override

View File

@ -107,7 +107,7 @@ public class Versionstamp implements Comparable<Versionstamp> {
* @return the unpacked user version included in the array
*/
public static int unpackUserVersion(byte[] bytes, int pos) {
return ((int)bytes[pos] & 0xff << 8) | ((int)bytes[pos + 1] & 0xff);
return (((int)bytes[pos] & 0xff) << 8) | ((int)bytes[pos + 1] & 0xff);
}
/**

View File

@ -18,7 +18,7 @@ setup(name="foundationdb",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache v2 License',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',

11
build/concatinate_jsons.py Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env python
import sys
import json
lst = []
for filename in sys.argv[1:]:
commands = json.load(open(filename))
lst.extend(commands)
json.dump(lst, open("compile_commands.json", "w"))

39
build/project_commands.py Executable file
View File

@ -0,0 +1,39 @@
#!/usr/bin/env python
import argparse
import json
import os
import os.path
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cflags', help="$(CFLAGS)")
parser.add_argument('--cxxflags', help="$(CXXFLAGS)")
parser.add_argument('--sources', help="All the source files")
parser.add_argument('--out', help="Output file name")
return parser.parse_args()
def main():
args = parse_args()
cwd = os.getcwd()
args.cflags = args.cflags.replace('-DNO_INTELLISENSE', '').replace("/opt/boost", cwd+"/../boost")
commands = []
for fname in args.sources.split(' '):
d = {}
d["directory"] = cwd
compiler = ""
if fname.endswith("cpp") or fname.endswith(".h"):
compiler = "clang++ -x c++ " + args.cflags + args.cxxflags
if fname.endswith("c"):
compiler = "clang -x c " + args.cflags
d["command"] = compiler
d["file"] = fname
commands.append(d)
json.dump(commands, open(args.out, "w"))
if __name__ == '__main__':
main()

View File

@ -58,6 +58,10 @@ GENNAME()_DEPS := $(addprefix $(DEPSDIR)/,$(GENNAME()_BUILD_SOURCES:=.d))
GENNAME: GENTARGET
$(CMDDIR)/GENDIR/compile_commands.json: build/project_commands.py ${GENNAME()_ALL_SOURCES}
@mkdir -p $(basename $@)
@build/project_commands.py --cflags="$(CFLAGS) $(GENNAME()_CFLAGS)" --cxxflags="$(CXXFLAGS) $(GENNAME()_CXXFLAGS)" --sources="$(GENNAME()_ALL_SOURCES)" --out="$@"
-include $(GENNAME()_DEPS)
$(OBJDIR)/GENDIR/%.actor.g.cpp: GENDIR/%.actor.cpp $(ACTORCOMPILER)

View File

@ -421,11 +421,35 @@
"total_disk_used_bytes":0,
"total_kv_size_bytes":0,
"partitions_count":2,
"moving_data":{
"moving_data":{
"total_written_bytes":0,
"in_flight_bytes":0,
"in_queue_bytes":0
"in_queue_bytes":0,
"highest_priority":0
},
"team_trackers":[
{
"primary":true,
"in_flight_bytes":0,
"unhealthy_servers":0,
"state":{
"healthy":true,
"min_replicas_remaining":0,
"name":{
"$enum":[
"initializing",
"missing_data",
"healing",
"healthy_repartitioning",
"healthy_removing_server",
"healthy_rebalancing",
"healthy"
]
},
"description":""
}
}
],
"least_operating_space_bytes_storage_server":0,
"max_machine_failures_without_losing_data":0
},

View File

@ -533,4 +533,4 @@ The recommended minimum number of ``class=transaction`` (log server) processes i
.. warning:: The conflict-resolution algorithm used by FoundationDB is conservative: it guarantees that no conflicting transactions will be committed, but it may fail to commit some transactions that theoretically could have been. The effects of this conservatism may increase as you increase the number of resolvers. It is therefore important to employ the recommended techniques for :ref:`minimizing conflicts <developer-guide-transaction-conflicts>` when increasing the number of resolvers.
You can contact us on the `community forums <https://forums.foundationdb.org>`_ if you are interested in more details or if you are benchmarking or performance-tuning on large clusters. Also see our `performance benchmarks </performance>`_ for a baseline of how a well-configured cluster should perform.
You can contact us on the `community forums <https://forums.foundationdb.org>`_ if you are interested in more details or if you are benchmarking or performance-tuning on large clusters. Also see our :doc:`performance benchmarks <performance>` for a baseline of how a well-configured cluster should perform.

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-6.0.3.pkg <https://www.foundationdb.org/downloads/6.0.3/macOS/installers/FoundationDB-6.0.3.pkg>`_
* `FoundationDB-6.0.11.pkg <https://www.foundationdb.org/downloads/6.0.11/macOS/installers/FoundationDB-6.0.11.pkg>`_
Ubuntu
------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-6.0.3-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.3/ubuntu/installers/foundationdb-clients_6.0.3-1_amd64.deb>`_
* `foundationdb-server-6.0.3-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.3/ubuntu/installers/foundationdb-server_6.0.3-1_amd64.deb>`_ (depends on the clients package)
* `foundationdb-clients-6.0.11-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.11/ubuntu/installers/foundationdb-clients_6.0.11-1_amd64.deb>`_
* `foundationdb-server-6.0.11-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.11/ubuntu/installers/foundationdb-server_6.0.11-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6
---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-6.0.3-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.3/rhel6/installers/foundationdb-clients-6.0.3-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.0.3-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.3/rhel6/installers/foundationdb-server-6.0.3-1.el6.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.0.11-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel6/installers/foundationdb-clients-6.0.11-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.0.11-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel6/installers/foundationdb-server-6.0.11-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7
---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-6.0.3-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.3/rhel7/installers/foundationdb-clients-6.0.3-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.0.3-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.3/rhel7/installers/foundationdb-server-6.0.3-1.el7.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.0.11-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel7/installers/foundationdb-clients-6.0.11-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.0.11-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel7/installers/foundationdb-server-6.0.11-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows
-------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-6.0.3-x64.msi <https://www.foundationdb.org/downloads/6.0.3/windows/installers/foundationdb-6.0.3-x64.msi>`_
* `foundationdb-6.0.11-x64.msi <https://www.foundationdb.org/downloads/6.0.11/windows/installers/foundationdb-6.0.11-x64.msi>`_
API Language Bindings
=====================
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
* `foundationdb-6.0.3.tar.gz <https://www.foundationdb.org/downloads/6.0.3/bindings/python/foundationdb-6.0.3.tar.gz>`_
* `foundationdb-6.0.11.tar.gz <https://www.foundationdb.org/downloads/6.0.11/bindings/python/foundationdb-6.0.11.tar.gz>`_
Ruby 1.9.3/2.0.0+
-----------------
* `fdb-6.0.3.gem <https://www.foundationdb.org/downloads/6.0.3/bindings/ruby/fdb-6.0.3.gem>`_
* `fdb-6.0.11.gem <https://www.foundationdb.org/downloads/6.0.11/bindings/ruby/fdb-6.0.11.gem>`_
Java 8+
-------
* `fdb-java-6.0.3.jar <https://www.foundationdb.org/downloads/6.0.3/bindings/java/fdb-java-6.0.3.jar>`_
* `fdb-java-6.0.3-javadoc.jar <https://www.foundationdb.org/downloads/6.0.3/bindings/java/fdb-java-6.0.3-javadoc.jar>`_
* `fdb-java-6.0.11.jar <https://www.foundationdb.org/downloads/6.0.11/bindings/java/fdb-java-6.0.11.jar>`_
* `fdb-java-6.0.11-javadoc.jar <https://www.foundationdb.org/downloads/6.0.11/bindings/java/fdb-java-6.0.11-javadoc.jar>`_
Go 1.1+
-------

View File

@ -2,6 +2,33 @@
Release Notes
#############
5.2.8
=====
Bindings
--------
* Java: ``FDBDatabase::run`` and ``FDBDatabase::read`` now use the ``Executor`` provided for executing asynchronous callbacks instead of the default one for the database. `(Issue #640) <https://github.com/apple/foundationdb/issues/640>`_
Fixes
-----
* A large number of concurrent read attempts could bring the database down after a cluster reboot. `(PR #650) <https://github.com/apple/foundationdb/pull/650>`_
5.2.7
=====
Bindings
--------
* The go bindings now caches database connections on a per-cluster basis. `(Issue #607) <https://github.com/apple/foundationdb/issues/607>`_
Fixes
-----
* A client could fail to connect to a cluster when the cluster was upgraded to a version compatible with the client. This affected upgrades that were using the multi-version client to maintain compatibility with both versions of the cluster. `(PR #637) <https://github.com/apple/foundationdb/pull/637>`_
* Incorrect accounting of incompatible connections led to occasional assertion failures. `(PR #637) <https://github.com/apple/foundationdb/pull/637>`_
5.2.6
=====

View File

@ -2,7 +2,7 @@
Release Notes
#############
6.0.3
6.0.11
=====
Features
@ -13,6 +13,8 @@ Features
* The TLS plugin is now statically linked into the client and server binaries and no longer requires a separate library. `(Issue #436) <https://github.com/apple/foundationdb/issues/436>`_
* TLS peer verification now supports verifiying on Subject Alternative Name. `(Issue #514) <https://github.com/apple/foundationdb/issues/514>`_
* TLS peer verification now supports suffix matching by field. `(Issue #515) <https://github.com/apple/foundationdb/issues/515>`_
* TLS certificates are automatically reloaded after being updated. [6.0.5] `(Issue #505) <https://github.com/apple/foundationdb/issues/505>`_
* Added the ``fileconfigure`` command to fdbcli, which configures a database from a JSON document. [6.0.10] `(PR #713) <https://github.com/apple/foundationdb/pull/713>`_
Performance
-----------
@ -23,23 +25,43 @@ Performance
* Clients optimistically assume the first leader reply from a coordinator is correct. `(PR #425) <https://github.com/apple/foundationdb/pull/425>`_
* Network connections are now closed after no interface needs the connection. [6.0.1] `(Issue #375) <https://github.com/apple/foundationdb/issues/375>`_
* Significantly improved the CPU efficiency of copy mutations to transaction logs during recovery. [6.0.2] `(PR #595) <https://github.com/apple/foundationdb/pull/595>`_
* A cluster configured with usable_regions=2 did not limit the rate at which it could copy data from the primary DC to the remote DC. This caused poor performance when recovering from a DC outage. [6.0.5] `(PR #673) <https://github.com/apple/foundationdb/pull/673>`_
* Significantly improved the CPU efficiency of generating status on the cluster controller. [6.0.11] `(PR #758) <https://github.com/apple/foundationdb/pull/758>`_
Fixes
-----
* Backed out the changes which closed unnecessary connections. [6.0.3] `(PR #633) <https://github.com/apple/foundationdb/pull/633>`_
* Not all endpoint failures were reported to the failure monitor.
* Watches registered on a lagging storage server would take a long time to trigger.
* The cluster controller would not start a new generation until it recovered its files from disk.
* Under heavy write load, storage servers would occasionally pause for ~100ms. [6.0.2] `(PR #597) <https://github.com/apple/foundationdb/pull/597>`_
* Storage servers were not given time to rejoin the cluster before being marked as failed. [6.0.2] `(PR #592) <https://github.com/apple/foundationdb/pull/592>`_
* Incorrect accounting of incompatible connections led to occasional assertion failures. [6.0.3] `(PR #616) <https://github.com/apple/foundationdb/pull/616>`_
* A client could fail to connect to a cluster when the cluster was upgraded to a version compatible with the client. This affected upgrades that were using the multi-version client to maintain compatibility with both versions of the cluster. [6.0.4] `(PR #637) <https://github.com/apple/foundationdb/pull/637>`_
* A large number of concurrent read attempts could bring the database down after a cluster reboot. [6.0.4] `(PR #650) <https://github.com/apple/foundationdb/pull/650>`_
* Automatic suppression of trace events which occur too frequently was happening before trace events were suppressed by other mechanisms. [6.0.4] `(PR #656) <https://github.com/apple/foundationdb/pull/656>`_
* After a recovery, the rate at which transaction logs made mutations durable to disk was around 5 times slower than normal. [6.0.5] `(PR #666) <https://github.com/apple/foundationdb/pull/666>`_
* Clusters configured to use TLS could get stuck spending all of their CPU opening new connections. [6.0.5] `(PR #666) <https://github.com/apple/foundationdb/pull/666>`_
* Configuring usable_regions=2 on a cluster with a large amount of data caused commits to pause for a few seconds. [6.0.5] `(PR #687) <https://github.com/apple/foundationdb/pull/687>`_
* On clusters configured with usable_regions=2, status reported no replicas remaining when the primary DC was still healthy. [6.0.5] `(PR #687) <https://github.com/apple/foundationdb/pull/687>`_
* Clients could crash when passing in TLS options. [6.0.5] `(PR #649) <https://github.com/apple/foundationdb/pull/649>`_
* A mismatched TLS certificate and key set could cause the server to crash. [6.0.5] `(PR #689) <https://github.com/apple/foundationdb/pull/689>`_
* Databases with more than 10TB of data would pause for a few seconds after recovery. [6.0.6] `(PR #705) <https://github.com/apple/foundationdb/pull/705>`_
* Sometimes a minority of coordinators would fail to converge after a new leader was elected. [6.0.6] `(PR #700) <https://github.com/apple/foundationdb/pull/700>`_
* Calling status too many times in a 5 second interval caused the cluster controller to pause for a few seconds. [6.0.7] `(PR #711) <https://github.com/apple/foundationdb/pull/711>`_
* TLS certificate reloading could cause TLS connections to drop until process restart. [6.0.9] `(PR #717) <https://github.com/apple/foundationdb/pull/717>`_
* Configuring from usable_regions=2 to usable_regions=1 on a cluster with a large number of processes would prevent data distribution from completing. [6.0.10] `(PR #721) <https://github.com/apple/foundationdb/pull/721>`_ `(PR #739) <https://github.com/apple/foundationdb/pull/739>`_
* Watches polled the server much more frequently than intended. [6.0.10] `(PR #728) <https://github.com/apple/foundationdb/pull/728>`_
* Backup and DR didn't allow setting certain knobs. [6.0.10] `(Issue #715) <https://github.com/apple/foundationdb/issues/715>`_
* The failure monitor will become much less reactive after multiple successive failed recoveries. [6.0.10] `(PR #739) <https://github.com/apple/foundationdb/pull/739>`_
* Data distribution did not limit the number of source servers for a shard. [6.0.10] `(PR #739) <https://github.com/apple/foundationdb/pull/739>`_
Status
------
* The replication factor in status JSON is stored under "redundancy_mode" instead of "redundancy":"factor". `(PR #492) <https://github.com/apple/foundationdb/pull/492>`_
* Additional metrics for storage server lag as well as the number of watches and mutation count have been added and are exposed through status. `(PR #521) <https://github.com/apple/foundationdb/pull/521>`_
* The replication factor in status JSON is stored under ``redundancy_mode`` instead of ``redundancy.factor``. `(PR #492) <https://github.com/apple/foundationdb/pull/492>`_
* The metric ``data_version_lag`` has been replaced by ``data_lag.versions`` and ``data_lag.seconds``. `(PR #521) <https://github.com/apple/foundationdb/pull/521>`_
* Additional metrics for the number of watches and mutation count have been added and are exposed through status. `(PR #521) <https://github.com/apple/foundationdb/pull/521>`_
Bindings
@ -48,12 +70,15 @@ Bindings
* API version updated to 600. There are no changes since API version 520.
* Several cases where functions in go might previously cause a panic now return a non-``nil`` error. `(PR #532) <https://github.com/apple/foundationdb/pull/532>`_
* C API calls made on the network thread could be reordered with calls made from other threads. [6.0.2] `(Issue #518) <https://github.com/apple/foundationdb/issues/518>`_
* The TLS_PLUGIN option is now a no-op and has been deprecated. [6.0.10] `(PR #710) <https://github.com/apple/foundationdb/pull/710>`_
* Java: the `Versionstamp::getUserVersion() </javadoc/com/apple/foundationdb/tuple/Versionstamp.html#getUserVersion-->`_ method did not handle user versions greater than ``0x00FF`` due to operator precedence errors. [6.0.11] `(Issue #761) <https://github.com/apple/foundationdb/issues/761>`_
Other Changes
-------------
* Does not support upgrades from any version older than 5.0.
* Normalized the capitalization of trace event names and attributes. `(PR #455) <https://github.com/apple/foundationdb/pull/455>`_
* Increased the memory requirements of the transaction log by 400MB. [6.0.5] `(PR #673) <https://github.com/apple/foundationdb/pull/673>`_
Earlier release notes
---------------------

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include "flow/flow.h"
#include "flow/FastAlloc.h"
#include "flow/serialize.h"
#include "flow/IRandom.h"
@ -68,6 +68,7 @@ using std::endl;
#endif
#include "flow/SimpleOpt.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// Type of program being executed
@ -1086,7 +1087,7 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
tagLastRestorableVersions.push_back(fba.getLastRestorable(tr, StringRef(tag->tagName)));
}
Void _ = wait( waitForAll(tagLastRestorableVersions) && waitForAll(tagStates) && waitForAll(tagContainers) && waitForAll(tagRangeBytes) && waitForAll(tagLogBytes) && success(fBackupPaused));
wait( waitForAll(tagLastRestorableVersions) && waitForAll(tagStates) && waitForAll(tagContainers) && waitForAll(tagRangeBytes) && waitForAll(tagLogBytes) && success(fBackupPaused));
JSONDoc tagsRoot = layerRoot.subDoc("tags.$latest");
layerRoot.create("tags.timestamp") = now();
@ -1137,7 +1138,7 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
tagLogBytesDR.push_back(dba.getLogBytesWritten(tr2, tagUID));
}
Void _ = wait(waitForAll(backupStatus) && waitForAll(backupVersion) && waitForAll(tagRangeBytesDR) && waitForAll(tagLogBytesDR) && success(fDRPaused));
wait(waitForAll(backupStatus) && waitForAll(backupVersion) && waitForAll(tagRangeBytesDR) && waitForAll(tagLogBytesDR) && success(fDRPaused));
JSONDoc tagsRoot = layerRoot.subDoc("tags.$latest");
layerRoot.create("tags.timestamp") = now();
@ -1233,7 +1234,7 @@ ACTOR Future<json_spirit::mObject> getLayerStatus(Database src, std::string root
return statusDoc;
}
catch (Error& e) {
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
}
@ -1253,7 +1254,7 @@ ACTOR Future<Void> updateAgentPollRate(Database src, std::string rootKey, std::s
} catch(Error &e) {
TraceEvent(SevWarn, "BackupAgentPollRateUpdateError").error(e);
}
Void _ = wait(delay(CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE_UPDATE_INTERVAL));
wait(delay(CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE_UPDATE_INTERVAL));
}
}
@ -1271,11 +1272,11 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest, std::string name
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
tr->set(metaKey, rootKey);
Void _ = wait(tr->commit());
wait(tr->commit());
break;
}
catch (Error& e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
@ -1288,18 +1289,18 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest, std::string name
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state Future<std::string> futureStatusDoc = getLayerStatus(tr, name, id, exe, taskDest);
Void _ = wait(cleanupStatus(tr, rootKey, name, id));
wait(cleanupStatus(tr, rootKey, name, id));
std::string statusdoc = wait(futureStatusDoc);
tr->set(instanceKey, statusdoc);
Void _ = wait(tr->commit());
wait(tr->commit());
break;
}
catch (Error& e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
Void _ = wait(delay(CLIENT_KNOBS->BACKUP_STATUS_DELAY * ( ( 1.0 - CLIENT_KNOBS->BACKUP_STATUS_JITTER ) + 2 * g_random->random01() * CLIENT_KNOBS->BACKUP_STATUS_JITTER )));
wait(delay(CLIENT_KNOBS->BACKUP_STATUS_DELAY * ( ( 1.0 - CLIENT_KNOBS->BACKUP_STATUS_JITTER ) + 2 * g_random->random01() * CLIENT_KNOBS->BACKUP_STATUS_JITTER )));
// Now that status was written at least once by this process (and hopefully others), start the poll rate control updater if it wasn't started yet
if(!pollRateUpdater.isValid() && pollDelay != nullptr)
@ -1307,7 +1308,7 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest, std::string name
}
catch (Error& e) {
TraceEvent(SevWarnAlways, "UnableToWriteStatus").error(e);
Void _ = wait(delay(10.0));
wait(delay(10.0));
}
}
}
@ -1321,7 +1322,7 @@ ACTOR Future<Void> runDBAgent(Database src, Database dest) {
loop {
try {
state Void run = wait(backupAgent.run(dest, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
wait(backupAgent.run(dest, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
break;
}
catch (Error& e) {
@ -1331,7 +1332,7 @@ ACTOR Future<Void> runDBAgent(Database src, Database dest) {
TraceEvent(SevError, "DA_runAgent").error(e);
fprintf(stderr, "ERROR: DR agent encountered fatal error `%s'\n", e.what());
Void _ = wait( delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY) );
wait( delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY) );
}
}
@ -1346,7 +1347,7 @@ ACTOR Future<Void> runAgent(Database db) {
loop {
try {
state Void run = wait(backupAgent.run(db, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
wait(backupAgent.run(db, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
break;
}
catch (Error& e) {
@ -1356,7 +1357,7 @@ ACTOR Future<Void> runAgent(Database db) {
TraceEvent(SevError, "BA_runAgent").error(e);
fprintf(stderr, "ERROR: backup agent encountered fatal error `%s'\n", e.what());
Void _ = wait( delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY) );
wait( delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY) );
}
}
@ -1374,7 +1375,7 @@ ACTOR Future<Void> submitDBBackup(Database src, Database dest, Standalone<Vector
}
Void _ = wait(backupAgent.submitBackup(dest, KeyRef(tagName), backupRanges, false, StringRef(), StringRef(), true));
wait(backupAgent.submitBackup(dest, KeyRef(tagName), backupRanges, false, StringRef(), StringRef(), true));
// Check if a backup agent is running
bool agentRunning = wait(backupAgent.checkActive(dest));
@ -1457,7 +1458,7 @@ ACTOR Future<Void> submitBackup(Database db, std::string url, int snapshotInterv
}
else {
Void _ = wait(backupAgent.submitBackup(db, KeyRef(url), snapshotIntervalSeconds, tagName, backupRanges, stopWhenDone));
wait(backupAgent.submitBackup(db, KeyRef(url), snapshotIntervalSeconds, tagName, backupRanges, stopWhenDone));
// Wait for the backup to complete, if requested
if (waitForCompletion) {
@ -1513,7 +1514,7 @@ ACTOR Future<Void> switchDBBackup(Database src, Database dest, Standalone<Vector
}
Void _ = wait(backupAgent.atomicSwitchover(dest, KeyRef(tagName), backupRanges, StringRef(), StringRef()));
wait(backupAgent.atomicSwitchover(dest, KeyRef(tagName), backupRanges, StringRef(), StringRef()));
printf("The DR on tag `%s' was successfully switched.\n", printable(StringRef(tagName)).c_str());
}
@ -1580,8 +1581,8 @@ ACTOR Future<Void> abortDBBackup(Database src, Database dest, std::string tagNam
{
state DatabaseBackupAgent backupAgent(src);
Void _ = wait(backupAgent.abortBackup(dest, Key(tagName), partial));
Void _ = wait(backupAgent.unlockBackup(dest, Key(tagName)));
wait(backupAgent.abortBackup(dest, Key(tagName), partial));
wait(backupAgent.unlockBackup(dest, Key(tagName)));
printf("The DR on tag `%s' was successfully aborted.\n", printable(StringRef(tagName)).c_str());
}
@ -1611,7 +1612,7 @@ ACTOR Future<Void> abortBackup(Database db, std::string tagName) {
{
state FileBackupAgent backupAgent;
Void _ = wait(backupAgent.abortBackup(db, tagName));
wait(backupAgent.abortBackup(db, tagName));
printf("The backup on tag `%s' was successfully aborted.\n", printable(StringRef(tagName)).c_str());
}
@ -1661,7 +1662,7 @@ ACTOR Future<Void> discontinueBackup(Database db, std::string tagName, bool wait
{
state FileBackupAgent backupAgent;
Void _ = wait(backupAgent.discontinueBackup(db, StringRef(tagName)));
wait(backupAgent.discontinueBackup(db, StringRef(tagName)));
// Wait for the backup to complete, if requested
if (waitForCompletion) {
@ -1700,7 +1701,7 @@ ACTOR Future<Void> discontinueBackup(Database db, std::string tagName, bool wait
ACTOR Future<Void> changeBackupResumed(Database db, bool pause) {
try {
state FileBackupAgent backupAgent;
Void _ = wait(backupAgent.taskBucket->changePause(db, pause));
wait(backupAgent.taskBucket->changePause(db, pause));
printf("All backup agents have been %s.\n", pause ? "paused" : "resumed");
}
catch (Error& e) {
@ -1716,7 +1717,7 @@ ACTOR Future<Void> changeBackupResumed(Database db, bool pause) {
ACTOR Future<Void> changeDBBackupResumed(Database src, Database dest, bool pause) {
try {
state DatabaseBackupAgent backupAgent(src);
Void _ = wait(backupAgent.taskBucket->changePause(dest, pause));
wait(backupAgent.taskBucket->changePause(dest, pause));
printf("All DR agents have been %s.\n", pause ? "paused" : "resumed");
}
catch (Error& e) {
@ -1760,7 +1761,7 @@ ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string cont
state BackupDescription description = wait(bc->describeBackup());
if(dbVersion <= 0) {
Void _ = wait(description.resolveVersionTimes(db));
wait(description.resolveVersionTimes(db));
if(description.maxRestorableVersion.present())
restoreVersion = description.maxRestorableVersion.get();
else {
@ -1842,7 +1843,7 @@ ACTOR Future<Void> expireBackupData(const char *name, std::string destinationCon
try {
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
Void _ = wait(c->expireData(endVersion, force, restorableAfterVersion));
wait(c->expireData(endVersion, force, restorableAfterVersion));
printf("All data before version %lld is deleted.\n", endVersion);
}
catch (Error& e) {
@ -1866,11 +1867,11 @@ ACTOR Future<Void> deleteBackupContainer(const char *name, std::string destinati
loop {
choose {
when ( Void _ = wait(done) ) {
when ( wait(done) ) {
printf("The entire container has been deleted.\n");
break;
}
when ( Void _ = wait(delay(3)) ) {
when ( wait(delay(3)) ) {
printf("%d files have been deleted so far...\n", numDeleted);
}
}
@ -1891,7 +1892,7 @@ ACTOR Future<Void> describeBackup(const char *name, std::string destinationConta
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
state BackupDescription desc = wait(c->describeBackup(deep));
if(cx.present())
Void _ = wait(desc.resolveVersionTimes(cx.get()));
wait(desc.resolveVersionTimes(cx.get()));
printf("%s\n", desc.toString().c_str());
}
catch (Error& e) {
@ -2620,13 +2621,19 @@ int main(int argc, char* argv[]) {
commandLine += argv[a];
}
delete FLOW_KNOBS;
FlowKnobs* flowKnobs = new FlowKnobs(true);
FLOW_KNOBS = flowKnobs;
delete CLIENT_KNOBS;
ClientKnobs* clientKnobs = new ClientKnobs(true);
CLIENT_KNOBS = clientKnobs;
for(auto k=knobs.begin(); k!=knobs.end(); ++k) {
try {
if (!clientKnobs->setKnob( k->first, k->second )) {
if (!flowKnobs->setKnob( k->first, k->second ) &&
!clientKnobs->setKnob( k->first, k->second ))
{
fprintf(stderr, "Unrecognized knob option '%s'\n", k->first.c_str());
return FDB_EXIT_ERROR;
}
@ -2671,7 +2678,6 @@ int main(int argc, char* argv[]) {
Reference<Cluster> source_cluster;
Reference<ClusterConnectionFile> source_ccf;
Database source_db;
const KeyRef databaseKey = LiteralStringRef("DB");
FileBackupAgent ba;
Key tag;
Future<Optional<Void>> f;
@ -2745,7 +2751,7 @@ int main(int argc, char* argv[]) {
.detail("MemoryLimit", memLimit)
.trackLatest("ProgramStart");
db = cluster->createDatabase(databaseKey, localities).get();
db = cluster->createDatabase(localities).get();
return true;
};
@ -2768,7 +2774,7 @@ int main(int argc, char* argv[]) {
return FDB_EXIT_ERROR;
}
source_db = source_cluster->createDatabase(databaseKey, localities).get();
source_db = source_cluster->createDatabase(localities).get();
}
switch (programExe)

View File

@ -18,7 +18,6 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include "FlowLineNoise.h"
#include "flow/IThreadPool.h"
@ -35,6 +34,7 @@
#else
#define HAVE_LINENOISE 0
#endif
#include "flow/actorcompiler.h" // This must be the last #include.
struct LineNoiseReader : IThreadPoolReceiver {
virtual void init() {}
@ -150,7 +150,7 @@ ACTOR Future<Void> waitKeyboardInterrupt(boost::asio::io_service* ios) {
}
});
Void _ = wait(result.getFuture());
wait(result.getFuture());
return Void();
}
@ -178,4 +178,4 @@ void LineNoise::historySave( std::string const& filename ) {
throw io_error();
}
#endif
}
}

View File

@ -27,6 +27,7 @@
#include "fdbclient/ReadYourWrites.h"
#include "fdbclient/ClusterInterface.h"
#include "fdbclient/ManagementAPI.h"
#include "fdbclient/Schemas.h"
#include "fdbclient/CoordinationInterface.h"
#include "fdbclient/FDBOptions.g.h"
@ -50,6 +51,8 @@
#include "versions.h"
#endif
#include "flow/actorcompiler.h" // This must be the last #include.
extern const char* getHGVersion();
std::vector<std::string> validOptions;
@ -443,8 +446,12 @@ void initHelp() {
"All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK);
helpMap["configure"] = CommandHelp(
"configure [new] <single|double|triple|three_data_hall|three_datacenter|ssd|memory|proxies=<PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*",
"change database configuration",
"change the database configuration",
"The `new' option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When used, both a redundancy mode and a storage engine must be specified.\n\nRedundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. Must be at least 1, or set to -1 which restores the number of proxies to the default value.\n\nlogs=<LOGS>: Sets the desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\nSee the FoundationDB Administration Guide for more information.");
helpMap["fileconfigure"] = CommandHelp(
"fileconfigure [new] <FILENAME>",
"change the database configuration from a file",
"The `new' option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. Load a JSON document from the provided file, and change the database configuration to match the contents of the JSON document. The format should be the same as the value of the \"configuration\" entry in status JSON without \"excluded_servers\" or \"coordinators_count\".");
helpMap["coordinators"] = CommandHelp(
"coordinators auto|<ADDRESS>+ [description=new_cluster_description]",
"change cluster coordinators or description",
@ -1450,14 +1457,14 @@ int printStatusFromJSON( std::string const& jsonFileName ) {
}
ACTOR Future<Void> timeWarning( double when, const char* msg ) {
Void _ = wait( delay(when) );
wait( delay(when) );
fputs( msg, stderr );
return Void();
}
ACTOR Future<Void> checkStatus(Future<Void> f, Reference<ClusterConnectionFile> clusterFile, bool displayDatabaseAvailable = true) {
Void _ = wait(f);
wait(f);
StatusObject s = wait(StatusClient::statusFetcher(clusterFile));
printf("\n");
printStatus(s, StatusClient::MINIMAL, displayDatabaseAvailable);
@ -1469,23 +1476,23 @@ ACTOR template <class T> Future<T> makeInterruptable( Future<T> f ) {
Future<Void> interrupt = LineNoise::onKeyboardInterrupt();
choose {
when (T t = wait(f)) { return t; }
when (Void _ = wait(interrupt)) {
when (wait(interrupt)) {
f.cancel();
throw operation_cancelled();
}
}
}
ACTOR Future<Database> openDatabase( Reference<ClusterConnectionFile> ccf, Reference<Cluster> cluster, Standalone<StringRef> name, bool doCheckStatus ) {
state Database db = wait( cluster->createDatabase(name) );
ACTOR Future<Database> openDatabase( Reference<ClusterConnectionFile> ccf, Reference<Cluster> cluster, bool doCheckStatus ) {
state Database db = wait( cluster->createDatabase() );
if (doCheckStatus) {
Void _ = wait( makeInterruptable( checkStatus( Void(), ccf )) );
wait( makeInterruptable( checkStatus( Void(), ccf )) );
}
return db;
}
ACTOR Future<Void> commitTransaction( Reference<ReadYourWritesTransaction> tr ) {
Void _ = wait( makeInterruptable( tr->commit() ) );
wait( makeInterruptable( tr->commit() ) );
auto ver = tr->getCommittedVersion();
if (ver != invalidVersion)
printf("Committed (%" PRId64 ")\n", ver);
@ -1573,9 +1580,96 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
case ConfigurationResult::UNKNOWN_OPTION:
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
printUsage(tokens[0]);
ret = true;
ret=true;
break;
case ConfigurationResult::INVALID_CONFIGURATION:
printf("ERROR: These changes would make the configuration invalid\n");
ret=true;
break;
case ConfigurationResult::DATABASE_ALREADY_CREATED:
printf("ERROR: Database already exists! To change configuration, don't say `new'\n");
ret=true;
break;
case ConfigurationResult::DATABASE_CREATED:
printf("Database created\n");
ret=false;
break;
case ConfigurationResult::SUCCESS:
printf("Configuration changed\n");
ret=false;
break;
default:
ASSERT(false);
ret=true;
};
return ret;
}
ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDatabase) {
std::string contents(readFileBytes(filePath, 100000));
json_spirit::mValue config;
if(!json_spirit::read_string( contents, config )) {
printf("ERROR: Invalid JSON\n");
return true;
}
StatusObject configJSON = config.get_obj();
json_spirit::mValue schema;
if(!json_spirit::read_string( JSONSchemas::configurationSchema.toString(), schema )) {
ASSERT(false);
}
std::string errorStr;
if( !schemaMatch(schema.get_obj(), configJSON, errorStr) ) {
printf("%s", errorStr.c_str());
return true;
}
std::string configString;
if(isNewDatabase) {
configString = "new";
}
for(auto kv : configJSON) {
if(!configString.empty()) {
configString += " ";
}
if( kv.second.type() == json_spirit::int_type ) {
configString += kv.first + ":=" + format("%d", kv.second.get_int());
} else if( kv.second.type() == json_spirit::str_type ) {
configString += kv.second.get_str();
} else if( kv.second.type() == json_spirit::array_type ) {
configString += kv.first + "=" + json_spirit::write_string(json_spirit::mValue(kv.second.get_array()), json_spirit::Output_options::none);
} else {
printUsage(LiteralStringRef("fileconfigure"));
return true;
}
}
ConfigurationResult::Type result = wait( makeInterruptable( changeConfig(db, configString) ) );
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
// there are various results specific to changeConfig() that we need to report:
bool ret;
switch(result) {
case ConfigurationResult::NO_OPTIONS_PROVIDED:
printf("ERROR: No options provided\n");
ret=true;
break;
case ConfigurationResult::CONFLICTING_OPTIONS:
printf("ERROR: Conflicting options\n");
ret=true;
break;
case ConfigurationResult::UNKNOWN_OPTION:
printf("ERROR: Unknown option\n"); //This should not be possible because of schema match
ret=true;
break;
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
printf("ERROR: Must specify both a replication level and a storage engine when creating a new database\n");
ret=true;
break;
case ConfigurationResult::INVALID_CONFIGURATION:
printf("ERROR: These changes would make the configuration invalid\n");
ret=true;
break;
case ConfigurationResult::DATABASE_ALREADY_CREATED:
printf("ERROR: Database already exists! To change configuration, don't say `new'\n");
ret=true;
@ -1701,7 +1795,7 @@ ACTOR Future<bool> include( Database db, std::vector<StringRef> tokens ) {
}
}
Void _ = wait( makeInterruptable(includeServers(db, addresses)) );
wait( makeInterruptable(includeServers(db, addresses)) );
return false;
};
@ -1827,14 +1921,14 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
}
}
Void _ = wait( makeInterruptable(excludeServers(db,addresses)) );
wait( makeInterruptable(excludeServers(db,addresses)) );
printf("Waiting for state to be removed from all excluded servers. This may take a while.\n");
printf("(Interrupting this wait with CTRL+C will not cancel the data movement.)\n");
if(warn.isValid())
warn.cancel();
Void _ = wait( makeInterruptable(waitForExcludedServers(db,addresses)) );
wait( makeInterruptable(waitForExcludedServers(db,addresses)) );
std::vector<ProcessData> workers = wait( makeInterruptable(getWorkers(db)) );
std::map<uint32_t, std::set<uint16_t>> workerPorts;
@ -1914,7 +2008,7 @@ ACTOR Future<bool> setClass( Database db, std::vector<StringRef> tokens ) {
return true;
}
Void _ = wait( makeInterruptable(setClass(db,addr,processClass)) );
wait( makeInterruptable(setClass(db,addr,processClass)) );
return false;
};
@ -2218,9 +2312,6 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
state FdbOptions *options = &globalOptions;
state const char *database = "DB";
state Standalone<StringRef> openDbName = StringRef(database);
state Reference<ClusterConnectionFile> ccf;
state std::pair<std::string, bool> resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName( opt.clusterFile );
@ -2258,9 +2349,9 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
.trackLatest("ProgramStart");
}
if (connected && database) {
if (connected) {
try {
Database _db = wait( openDatabase( ccf, cluster, openDbName, !opt.exec.present() && opt.initialStatusCheck ) );
Database _db = wait( openDatabase( ccf, cluster, !opt.exec.present() && opt.initialStatusCheck ) );
db = _db;
tr = Reference<ReadYourWritesTransaction>();
opened = true;
@ -2269,7 +2360,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
} catch (Error& e) {
if(e.code() != error_code_actor_cancelled) {
printf("ERROR: %s (%d)\n", e.what(), e.code());
printf("Unable to open database `%s'\n", database);
printf("Unable to open database\n");
}
return 1;
}
@ -2415,7 +2506,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
}
if (tokencmp(tokens[0], "waitconnected")) {
Void _ = wait( makeInterruptable( cluster->onConnected() ) );
wait( makeInterruptable( cluster->onConnected() ) );
continue;
}
@ -2457,6 +2548,17 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
continue;
}
if (tokencmp(tokens[0], "fileconfigure")) {
if (tokens.size() == 2 || (tokens.size() == 3 && tokens[1] == LiteralStringRef("new"))) {
bool err = wait( fileConfigure( db, tokens.back().toString(), tokens.size() == 3 ) );
if (err) is_error = true;
} else {
printUsage(tokens[0]);
is_error = true;
}
continue;
}
if (tokencmp(tokens[0], "coordinators")) {
auto cs = ClusterConnectionFile( ccf->getFilename() ).getConnectionString();
if (tokens.size() < 2) {
@ -2529,7 +2631,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
printf("ERROR: No active transaction\n");
is_error = true;
} else {
Void _ = wait( commitTransaction( tr ) );
wait( commitTransaction( tr ) );
intrans = false;
options = &globalOptions;
}
@ -2639,7 +2741,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
printUsage(tokens[0]);
is_error = true;
}
Void _ = wait( makeInterruptable( forceRecovery( ccf ) ) );
wait( makeInterruptable( forceRecovery( ccf ) ) );
continue;
}
@ -2665,7 +2767,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
}
state Future<Optional<Standalone<StringRef>>> sampleRateFuture = tr->get(fdbClientInfoTxnSampleRate);
state Future<Optional<Standalone<StringRef>>> sizeLimitFuture = tr->get(fdbClientInfoTxnSizeLimit);
Void _ = wait(makeInterruptable(success(sampleRateFuture) && success(sizeLimitFuture)));
wait(makeInterruptable(success(sampleRateFuture) && success(sizeLimitFuture)));
std::string sampleRateStr = "default", sizeLimitStr = "default";
if (sampleRateFuture.get().present()) {
const double sampleRateDbl = BinaryReader::fromStringRef<double>(sampleRateFuture.get().get(), Unversioned());
@ -2716,7 +2818,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
tr->set(fdbClientInfoTxnSampleRate, BinaryWriter::toValue(sampleRate, Unversioned()));
tr->set(fdbClientInfoTxnSizeLimit, BinaryWriter::toValue(sizeLimit, Unversioned()));
if (!intrans) {
Void _ = wait( commitTransaction( tr ) );
wait( commitTransaction( tr ) );
}
continue;
}
@ -2803,7 +2905,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
}
}
if (!is_error) {
Void _ = wait(waitForAll(all_profiler_responses));
wait(waitForAll(all_profiler_responses));
for (int i = 0; i < all_profiler_responses.size(); i++) {
const ErrorOr<Void>& err = all_profiler_responses[i].get();
if (err.isError()) {
@ -2972,7 +3074,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
tr->set(tokens[1], tokens[2]);
if (!intrans) {
Void _ = wait( commitTransaction( tr ) );
wait( commitTransaction( tr ) );
}
}
continue;
@ -2993,7 +3095,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
tr->clear(tokens[1]);
if (!intrans) {
Void _ = wait( commitTransaction( tr ) );
wait( commitTransaction( tr ) );
}
}
continue;
@ -3014,7 +3116,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
tr->clear(KeyRangeRef(tokens[1], tokens[2]));
if (!intrans) {
Void _ = wait( commitTransaction( tr ) );
wait( commitTransaction( tr ) );
}
}
continue;
@ -3087,7 +3189,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
}
catch(Error &e) {
//options->setOption() prints error message
TraceEvent(SevWarn, "CLISetOptionError").detail("Option", printable(tokens[2])).error(e);
TraceEvent(SevWarn, "CLISetOptionError").error(e).detail("Option", printable(tokens[2]));
is_error = true;
}
}
@ -3138,7 +3240,7 @@ ACTOR Future<int> runCli(CLIOptions opt) {
linenoise.historyLoad(historyFilename);
}
catch(Error &e) {
TraceEvent(SevWarnAlways, "ErrorLoadingCliHistory").detail("Filename", historyFilename.empty() ? "<unknown>" : historyFilename).error(e).GetLastError();
TraceEvent(SevWarnAlways, "ErrorLoadingCliHistory").error(e).detail("Filename", historyFilename.empty() ? "<unknown>" : historyFilename).GetLastError();
}
state int result = wait(cli(opt, &linenoise));
@ -3148,7 +3250,7 @@ ACTOR Future<int> runCli(CLIOptions opt) {
linenoise.historySave(historyFilename);
}
catch(Error &e) {
TraceEvent(SevWarnAlways, "ErrorSavingCliHistory").detail("Filename", historyFilename).error(e).GetLastError();
TraceEvent(SevWarnAlways, "ErrorSavingCliHistory").error(e).detail("Filename", historyFilename).GetLastError();
}
}
@ -3156,7 +3258,7 @@ ACTOR Future<int> runCli(CLIOptions opt) {
}
ACTOR Future<Void> timeExit(double duration) {
Void _ = wait(delay(duration));
wait(delay(duration));
fprintf(stderr, "Specified timeout reached -- exiting...\n");
return Void();
}

View File

@ -346,7 +346,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RangeResultWithVersi
//add lock
releaser.release();
Void _ = wait(lock->take(TaskDefaultYield, limits.bytes + CLIENT_KNOBS->VALUE_SIZE_LIMIT + CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT));
wait(lock->take(TaskDefaultYield, limits.bytes + CLIENT_KNOBS->VALUE_SIZE_LIMIT + CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT));
releaser = FlowLock::Releaser(*lock, limits.bytes + CLIENT_KNOBS->VALUE_SIZE_LIMIT + CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT);
state Standalone<RangeResultRef> values = wait(tr.getRange(begin, end, limits));
@ -357,7 +357,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RangeResultWithVersi
values.more = true;
// Half of the time wait for this tr to expire so that the next read is at a different version
if(g_random->random01() < 0.5)
Void _ = wait(delay(6.0));
wait(delay(6.0));
}
releaser.remaining -= values.expectedSize(); //its the responsibility of the caller to release after this point
@ -411,13 +411,13 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RCGroup> results, Fu
rangevalue.more = true;
// Half of the time wait for this tr to expire so that the next read is at a different version
if(g_random->random01() < 0.5)
Void _ = wait(delay(6.0));
wait(delay(6.0));
}
//add lock
Void _ = wait(active);
wait(active);
releaser.release();
Void _ = wait(lock->take(TaskDefaultYield, rangevalue.expectedSize() + rcGroup.items.expectedSize()));
wait(lock->take(TaskDefaultYield, rangevalue.expectedSize() + rcGroup.items.expectedSize()));
releaser = FlowLock::Releaser(*lock, rangevalue.expectedSize() + rcGroup.items.expectedSize());
int index(0);
@ -468,7 +468,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RCGroup> results, Fu
catch (Error &e) {
if (e.code() != error_code_transaction_too_old && e.code() != error_code_future_version)
throw;
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
}
@ -531,7 +531,7 @@ ACTOR Future<int> dumpData(Database cx, PromiseStream<RCGroup> results, Referenc
req.flags = req.flags | CommitTransactionRequest::FLAG_IS_LOCK_AWARE;
totalBytes += mutationSize;
Void _ = wait( commitLock->take(TaskDefaultYield, mutationSize) );
wait( commitLock->take(TaskDefaultYield, mutationSize) );
addActor.send( commitLock->releaseWhen( success(commit.getReply(req)), mutationSize ) );
if(endOfStream) {
@ -571,7 +571,7 @@ ACTOR Future<Void> coalesceKeyVersionCache(Key uid, Version endVersion, Referenc
req.transaction.read_snapshot = committedVersion->get();
req.flags = req.flags | CommitTransactionRequest::FLAG_IS_LOCK_AWARE;
Void _ = wait( commitLock->take(TaskDefaultYield, mutationSize) );
wait( commitLock->take(TaskDefaultYield, mutationSize) );
addActor.send( commitLock->releaseWhen( success(commit.getReply(req)), mutationSize ) );
}
@ -587,7 +587,7 @@ ACTOR Future<Void> applyMutations(Database cx, Key uid, Key addPrefix, Key remov
try {
loop {
if(beginVersion >= *endVersion) {
Void _ = wait( commitLock.take(TaskDefaultYield, CLIENT_KNOBS->BACKUP_LOCK_BYTES) );
wait( commitLock.take(TaskDefaultYield, CLIENT_KNOBS->BACKUP_LOCK_BYTES) );
commitLock.release(CLIENT_KNOBS->BACKUP_LOCK_BYTES);
if(beginVersion >= *endVersion) {
return Void();
@ -615,7 +615,7 @@ ACTOR Future<Void> applyMutations(Database cx, Key uid, Key addPrefix, Key remov
if(error.isError()) throw error.getError();
}
Void _ = wait(coalesceKeyVersionCache(uid, newEndVersion, keyVersion, commit, committedVersion, addActor, &commitLock));
wait(coalesceKeyVersionCache(uid, newEndVersion, keyVersion, commit, committedVersion, addActor, &commitLock));
beginVersion = newEndVersion;
}
} catch( Error &e ) {
@ -715,7 +715,7 @@ ACTOR static Future<Void> _eraseLogData(Database cx, Key logUidValue, Key destUi
}
}
}
Void _ = wait(tr->commit());
wait(tr->commit());
if (!endVersion.present() && (backupVersions.size() == 1 || currEndVersion >= nextSmallestVersion)) {
return Void();
@ -725,7 +725,7 @@ ACTOR static Future<Void> _eraseLogData(Database cx, Key logUidValue, Key destUi
}
tr->reset();
} catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}

View File

@ -37,8 +37,8 @@ namespace IBackupFile_impl {
ACTOR Future<Void> appendStringRefWithLen(Reference<IBackupFile> file, Standalone<StringRef> s) {
state uint32_t lenBuf = bigEndian32((uint32_t)s.size());
Void _ = wait(file->append(&lenBuf, sizeof(lenBuf)));
Void _ = wait(file->append(s.begin(), s.size()));
wait(file->append(&lenBuf, sizeof(lenBuf)));
wait(file->append(s.begin(), s.size()));
return Void();
}
}
@ -329,7 +329,7 @@ public:
}
else
throw restore_unknown_file_type();
Void _ = wait(yield());
wait(yield());
}
state json_spirit::mValue json;
@ -340,12 +340,12 @@ public:
doc.create("beginVersion") = minVer;
doc.create("endVersion") = maxVer;
Void _ = wait(yield());
wait(yield());
state std::string docString = json_spirit::write_string(json);
state Reference<IBackupFile> f = wait(bc->writeFile(format("snapshots/snapshot,%lld,%lld,%lld", minVer, maxVer, totalBytes)));
Void _ = wait(f->append(docString.data(), docString.size()));
Void _ = wait(f->finish());
wait(f->append(docString.data(), docString.size()));
wait(f->finish());
return Void();
}
@ -430,7 +430,7 @@ public:
state Future<std::vector<RangeFile>> fRanges = bc->listRangeFiles(0, std::numeric_limits<Version>::max());
state Future<std::vector<KeyspaceSnapshotFile>> fSnapshots = bc->listKeyspaceSnapshots();
state Future<std::vector<LogFile>> fLogs = bc->listLogFiles(0, std::numeric_limits<Version>::max());
Void _ = wait(success(fRanges) && success(fSnapshots) && success(fLogs));
wait(success(fRanges) && success(fSnapshots) && success(fLogs));
return FullBackupListing({fRanges.get(), fLogs.get(), fSnapshots.get()});
}
@ -451,7 +451,7 @@ public:
state Optional<Version> end;
if(!deepScan) {
Void _ = wait(store(bc->logBeginVersion().get(), begin) && store(bc->logEndVersion().get(), end));
wait(store(bc->logBeginVersion().get(), begin) && store(bc->logEndVersion().get(), end));
}
// Use the known log range if present
@ -503,7 +503,7 @@ public:
updates = updates && bc->logBeginVersion().set(desc.minLogBegin.get());
if(desc.contiguousLogEnd.present() && (!end.present() || end.get() < desc.contiguousLogEnd.get()) )
updates = updates && bc->logEndVersion().set(desc.contiguousLogEnd.get());
Void _ = wait(updates);
wait(updates);
} catch(Error &e) {
if(e.code() == error_code_actor_cancelled)
throw;
@ -576,7 +576,7 @@ public:
state Optional<Version> expiredEnd;
state Optional<Version> logBegin;
state Optional<Version> logEnd;
Void _ = wait(store(bc->expiredEndVersion().get(), expiredEnd) && store(bc->logBeginVersion().get(), logBegin) && store(bc->logEndVersion().get(), logEnd));
wait(store(bc->expiredEndVersion().get(), expiredEnd) && store(bc->logBeginVersion().get(), logBegin) && store(bc->logEndVersion().get(), logEnd));
// Update scan range if expiredEnd is present
if(expiredEnd.present()) {
@ -646,9 +646,9 @@ public:
// If we're expiring the entire log range described by the metadata then clear both metadata values
if(logEnd.present() && logEnd.get() < expireEndVersion) {
if(logBegin.present())
Void _ = wait(bc->logBeginVersion().clear());
wait(bc->logBeginVersion().clear());
if(logEnd.present())
Void _ = wait(bc->logEndVersion().clear());
wait(bc->logEndVersion().clear());
}
else {
// If we are expiring to a point within the metadata range then update the begin if we have a new
@ -656,13 +656,13 @@ public:
// repairing the metadata from an incorrect state)
if(logBegin.present() && logBegin.get() < expireEndVersion) {
if(newLogBeginVersion.present()) {
Void _ = wait(bc->logBeginVersion().set(newLogBeginVersion.get()));
wait(bc->logBeginVersion().set(newLogBeginVersion.get()));
}
else {
if(logBegin.present())
Void _ = wait(bc->logBeginVersion().clear());
wait(bc->logBeginVersion().clear());
if(logEnd.present())
Void _ = wait(bc->logEndVersion().clear());
wait(bc->logEndVersion().clear());
}
}
}
@ -685,13 +685,13 @@ public:
state int targetFuturesSize = toDelete.empty() ? 0 : (CLIENT_KNOBS->BACKUP_CONCURRENT_DELETES - 1);
while(deleteFutures.size() > targetFuturesSize) {
Void _ = wait(deleteFutures.front());
wait(deleteFutures.front());
deleteFutures.pop_front();
}
}
// Update the expiredEndVersion property.
Void _ = wait(bc->expiredEndVersion().set(expireEndVersion));
wait(bc->expiredEndVersion().set(expireEndVersion));
return Void();
}
@ -789,12 +789,11 @@ public:
try {
state Reference<IBackupFile> f = wait(bc->writeFile(path));
std::string s = format("%lld", v);
Void _ = wait(f->append(s.data(), s.size()));
Void _ = wait(f->finish());
wait(f->append(s.data(), s.size()));
wait(f->finish());
return Void();
} catch(Error &e) {
if(e.code() != error_code_actor_cancelled)
TraceEvent(SevWarn, "BackupContainerWritePropertyFailed").detail("Path", path).error(e);
TraceEvent(SevWarn, "BackupContainerWritePropertyFailed").error(e).detail("Path", path);
throw;
}
}
@ -816,8 +815,7 @@ public:
} catch(Error &e) {
if(e.code() == error_code_file_not_found)
return Optional<Version>();
if(e.code() != error_code_actor_cancelled)
TraceEvent(SevWarn, "BackupContainerReadPropertyFailed").detail("Path", path).error(e);
TraceEvent(SevWarn, "BackupContainerReadPropertyFailed").error(e).detail("Path", path);
throw;
}
}
@ -917,8 +915,8 @@ public:
}
ACTOR static Future<Void> finish_impl(Reference<BackupFile> f) {
Void _ = wait(f->m_file->truncate(f->size())); // Some IAsyncFile implementations extend in whole block sizes.
Void _ = wait(f->m_file->sync());
wait(f->m_file->truncate(f->size())); // Some IAsyncFile implementations extend in whole block sizes.
wait(f->m_file->sync());
std::string name = f->m_file->getFilename();
f->m_file.clear();
renameFile(name, f->m_finalFullPath);
@ -1103,12 +1101,12 @@ public:
}
ACTOR static Future<Void> create_impl(Reference<BackupContainerBlobStore> bc) {
Void _ = wait(bc->m_bstore->createBucket(BUCKET));
wait(bc->m_bstore->createBucket(BUCKET));
// Check/create the index entry
bool exists = wait(bc->m_bstore->objectExists(BUCKET, bc->indexEntry()));
if(!exists) {
Void _ = wait(bc->m_bstore->writeEntireFile(BUCKET, bc->indexEntry(), ""));
wait(bc->m_bstore->writeEntireFile(BUCKET, bc->indexEntry(), ""));
}
return Void();
@ -1120,10 +1118,10 @@ public:
ACTOR static Future<Void> deleteContainer_impl(Reference<BackupContainerBlobStore> bc, int *pNumDeleted) {
// First delete everything under the data prefix in the bucket
Void _ = wait(bc->m_bstore->deleteRecursively(BUCKET, bc->dataPath(""), pNumDeleted));
wait(bc->m_bstore->deleteRecursively(BUCKET, bc->dataPath(""), pNumDeleted));
// Now that all files are deleted, delete the index entry
Void _ = wait(bc->m_bstore->deleteObject(BUCKET, bc->indexEntry()));
wait(bc->m_bstore->deleteObject(BUCKET, bc->indexEntry()));
return Void();
}
@ -1253,11 +1251,12 @@ ACTOR Future<Version> timeKeeperVersionFromDatetime(std::string datetime, Databa
loop {
try {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state std::vector<std::pair<int64_t, Version>> results = wait( versionMap.getRange(tr, 0, time, 1, false, true) );
if (results.size() != 1) {
// No key less than time was found in the database
// Look for a key >= time.
Void _ = wait( store( versionMap.getRange(tr, time, std::numeric_limits<int64_t>::max(), 1), results) );
wait( store( versionMap.getRange(tr, time, std::numeric_limits<int64_t>::max(), 1), results) );
if(results.size() != 1) {
fprintf(stderr, "ERROR: Unable to calculate a version for given date/time.\n");
@ -1270,7 +1269,7 @@ ACTOR Future<Version> timeKeeperVersionFromDatetime(std::string datetime, Databa
return std::max<Version>(0, result.second + (time - result.first) * CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
} catch (Error& e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -1297,7 +1296,7 @@ ACTOR Future<Optional<int64_t>> timeKeeperEpochsFromVersion(Version v, Reference
if(mid == min) {
// There aren't any records having a version < v, so just look for any record having a time < now
// and base a result on it
Void _ = wait(store(versionMap.getRange(tr, 0, (int64_t)now(), 1), results));
wait(store(versionMap.getRange(tr, 0, (int64_t)now(), 1), results));
if (results.size() != 1) {
// There aren't any timekeeper records to base a result on so return nothing
@ -1335,9 +1334,9 @@ ACTOR Future<Void> writeAndVerifyFile(Reference<IBackupContainer> c, Reference<I
for(int i = 0; i < content.size(); ++i)
mutateString(content)[i] = (uint8_t)g_random->randomInt(0, 256);
Void _ = wait(f->append(content.begin(), content.size()));
wait(f->append(content.begin(), content.size()));
}
Void _ = wait(f->finish());
wait(f->finish());
state Reference<IAsyncFile> inputFile = wait(c->readFile(f->getFileName()));
int64_t fileSize = wait(inputFile->size());
ASSERT(size == fileSize);
@ -1357,13 +1356,13 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
// Make sure container doesn't exist, then create it.
try {
Void _ = wait(c->deleteContainer());
wait(c->deleteContainer());
} catch(Error &e) {
if(e.code() != error_code_backup_invalid_url)
throw;
}
Void _ = wait(c->create());
wait(c->create());
state int64_t versionShift = g_random->randomInt64(0, std::numeric_limits<Version>::max() - 500);
@ -1373,7 +1372,7 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
state Reference<IBackupFile> range2 = wait(c->writeRangeFile(300 + versionShift, 10));
state Reference<IBackupFile> range3 = wait(c->writeRangeFile(310 + versionShift, 10));
Void _ = wait(
wait(
writeAndVerifyFile(c, log1, 0)
&& writeAndVerifyFile(c, log2, g_random->randomInt(0, 10000000))
&& writeAndVerifyFile(c, range1, g_random->randomInt(0, 1000))
@ -1381,7 +1380,7 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
&& writeAndVerifyFile(c, range3, g_random->randomInt(0, 3000000))
);
Void _ = wait(
wait(
c->writeKeyspaceSnapshotFile({range1->getFileName(), range2->getFileName()}, range1->size() + range2->size())
&& c->writeKeyspaceSnapshotFile({range3->getFileName()}, range3->size())
);
@ -1410,21 +1409,21 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
ASSERT(rest.get().ranges.size() == 2);
printf("Expire 1\n");
Void _ = wait(c->expireData(100 + versionShift));
wait(c->expireData(100 + versionShift));
BackupDescription d = wait(c->describeBackup());
printf("Backup Description 2\n%s", d.toString().c_str());
ASSERT(d.minLogBegin == 100 + versionShift);
ASSERT(d.maxRestorableVersion == desc.maxRestorableVersion);
printf("Expire 2\n");
Void _ = wait(c->expireData(101 + versionShift));
wait(c->expireData(101 + versionShift));
BackupDescription d = wait(c->describeBackup());
printf("Backup Description 3\n%s", d.toString().c_str());
ASSERT(d.minLogBegin == 100 + versionShift);
ASSERT(d.maxRestorableVersion == desc.maxRestorableVersion);
printf("Expire 3\n");
Void _ = wait(c->expireData(300 + versionShift));
wait(c->expireData(300 + versionShift));
BackupDescription d = wait(c->describeBackup());
printf("Backup Description 4\n%s", d.toString().c_str());
ASSERT(d.minLogBegin.present());
@ -1432,13 +1431,13 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
ASSERT(d.maxRestorableVersion == desc.maxRestorableVersion);
printf("Expire 4\n");
Void _ = wait(c->expireData(301 + versionShift, true));
wait(c->expireData(301 + versionShift, true));
BackupDescription d = wait(c->describeBackup());
printf("Backup Description 4\n%s", d.toString().c_str());
ASSERT(d.snapshots.size() == 1);
ASSERT(!d.minLogBegin.present());
Void _ = wait(c->deleteContainer());
wait(c->deleteContainer());
BackupDescription d = wait(c->describeBackup());
printf("Backup Description 5\n%s", d.toString().c_str());
@ -1452,9 +1451,9 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
TEST_CASE("backup/containers/localdir") {
if(g_network->isSimulated())
Void _ = wait(testBackupContainer(format("file://simfdb/backups/%llx", timer_int())));
wait(testBackupContainer(format("file://simfdb/backups/%llx", timer_int())));
else
Void _ = wait(testBackupContainer(format("file:///private/tmp/fdb_backups/%llx", timer_int())));
wait(testBackupContainer(format("file:///private/tmp/fdb_backups/%llx", timer_int())));
return Void();
};
@ -1462,7 +1461,7 @@ TEST_CASE("backup/containers/url") {
if (!g_network->isSimulated()) {
const char *url = getenv("FDB_TEST_BACKUP_URL");
ASSERT(url != nullptr);
Void _ = wait(testBackupContainer(url));
wait(testBackupContainer(url));
}
return Void();
};

23
fdbclient/ClusterInterface.h Normal file → Executable file
View File

@ -117,7 +117,7 @@ struct OpenDatabaseRequest {
// info changes. Returns immediately if the current client info id is different from
// knownClientInfoID; otherwise returns when it next changes (or perhaps after a long interval)
Arena arena;
StringRef dbName, issues, traceLogGroup;
StringRef issues, traceLogGroup;
VectorRef<ClientVersionRef> supportedVersions;
UID knownClientInfoID;
ReplyPromise< struct ClientDBInfo > reply;
@ -125,7 +125,7 @@ struct OpenDatabaseRequest {
template <class Ar>
void serialize(Ar& ar) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A400040001LL );
ar & dbName & issues & supportedVersions & traceLogGroup & knownClientInfoID & reply & arena;
ar & issues & supportedVersions & traceLogGroup & knownClientInfoID & reply & arena;
}
};
@ -145,7 +145,7 @@ struct SystemFailureStatus {
struct FailureMonitoringRequest {
// Sent by all participants to the cluster controller reply.clientRequestIntervalMS
// ms after receiving the previous reply.
// Provides the controller the self-diagnosed status of the sender, and also
// Provides the controller the self-diagnosed status of the sender, and also
// requests the status of other systems. Failure to timely send one of these implies
// a failed status.
// If !senderStatus.present(), the sender wants to receive the latest failure information
@ -188,13 +188,26 @@ struct StatusRequest {
struct StatusReply {
StatusObject statusObj;
std::string statusStr;
StatusReply() {}
StatusReply( StatusObject statusObj ) : statusObj(statusObj) {}
explicit StatusReply(StatusObject obj) : statusObj(obj), statusStr(json_spirit::write_string(json_spirit::mValue(obj))) {}
explicit StatusReply(std::string &&text) : statusStr(text) {}
template <class Ar>
void serialize(Ar& ar) {
ar & statusObj;
ar & statusStr;
if( ar.isDeserializing ) {
json_spirit::mValue mv;
if(g_network->isSimulated()) {
mv = readJSONStrictly(statusStr);
}
else {
// In non-simulation allow errors because some status data is better than no status data
json_spirit::read_string( statusStr, mv );
}
statusObj = std::move(mv.get_obj());
}
}
};

View File

@ -110,22 +110,14 @@ struct LeaderInfo {
// All but the first 7 bits are used to represent process id
bool equalInternalId(LeaderInfo const& leaderInfo) const {
if ( (changeID.first() & mask) == (leaderInfo.changeID.first() & mask) && changeID.second() == leaderInfo.changeID.second() ) {
return true;
} else {
return false;
}
return ((changeID.first() & mask) == (leaderInfo.changeID.first() & mask)) && changeID.second() == leaderInfo.changeID.second();
}
// Change leader only if
// Change leader only if
// 1. the candidate has better process class fitness and the candidate is not the leader
// 2. the leader process class fitness become worse
// 2. the leader process class fitness becomes worse
bool leaderChangeRequired(LeaderInfo const& candidate) const {
if ( ((changeID.first() & ~mask) > (candidate.changeID.first() & ~mask) && !equalInternalId(candidate)) || ((changeID.first() & ~mask) < (candidate.changeID.first() & ~mask) && equalInternalId(candidate)) ) {
return true;
} else {
return false;
}
return ((changeID.first() & ~mask) > (candidate.changeID.first() & ~mask) && !equalInternalId(candidate)) || ((changeID.first() & ~mask) < (candidate.changeID.first() & ~mask) && equalInternalId(candidate));
}
template <class Ar>

View File

@ -110,7 +110,7 @@ namespace dbBackup {
uint32_t taskVersion = task->getVersion();
if (taskVersion > version) {
TraceEvent(SevError, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version);
Void _ = wait(logError(tr, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]),
wait(logError(tr, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]),
format("ERROR: %s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version)));
throw task_invalid_version();
@ -162,7 +162,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -170,7 +170,7 @@ namespace dbBackup {
state Reference<FlowLock> lock(new FlowLock(CLIENT_KNOBS->BACKUP_LOCK_BYTES));
state Subspace conf = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(task->params[BackupAgentBase::keyConfigLogUid]);
Void _ = wait(checkTaskVersion(cx, task, BackupRangeTaskFunc::name, BackupRangeTaskFunc::version));
wait(checkTaskVersion(cx, task, BackupRangeTaskFunc::name, BackupRangeTaskFunc::version));
// Find out if there is a shard boundary in(beginKey, endKey)
Standalone<VectorRef<KeyRef>> keys = wait(runRYWTransaction(taskBucket->src, [=](Reference<ReadYourWritesTransaction> tr){ return getBlockOfShards(tr, task->params[DatabaseBackupAgent::keyBeginKey], task->params[DatabaseBackupAgent::keyEndKey], CLIENT_KNOBS->BACKUP_SHARD_TASK_LIMIT); }));
if (keys.size() > 0) {
@ -239,7 +239,7 @@ namespace dbBackup {
return Void();
}
Void _ = wait(logError(cx, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]), format("ERROR: %s", err.what())));
wait(logError(cx, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]), format("ERROR: %s", err.what())));
throw err;
}
@ -270,8 +270,8 @@ namespace dbBackup {
state Future<Standalone<RangeResultRef>> nextRange = tr->getRange(firstGreaterOrEqual(rangeEnd.withPrefix(prefix)), firstGreaterOrEqual(strinc(prefix)), 1, true, false);
state Future<Void> verified = taskBucket->keepRunning(tr, task);
Void _ = wait( checkDatabaseLock(tr, BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())) );
Void _ = wait( success(backupVersions) && success(logVersionValue) && success(rangeCountValue) && success(prevRange) && success(nextRange) && success(verified) );
wait( checkDatabaseLock(tr, BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())) );
wait( success(backupVersions) && success(logVersionValue) && success(rangeCountValue) && success(prevRange) && success(nextRange) && success(verified) );
int64_t rangeCount = 0;
if(rangeCountValue.get().present()) {
@ -288,7 +288,7 @@ namespace dbBackup {
if(rangeCount > CLIENT_KNOBS->BACKUP_MAP_KEY_UPPER_LIMIT)
TraceEvent(SevWarnAlways, "DBA_KeyRangeMapTooLarge");
Void _ = wait( delay(1) );
wait( delay(1) );
task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey] = rangeBegin;
return Void();
}
@ -343,9 +343,9 @@ namespace dbBackup {
versionLoc++;
}
Void _ = wait(waitForAll(setRanges));
wait(waitForAll(setRanges));
Void _ = wait(tr->commit());
wait(tr->commit());
Params.bytesWritten().set(task, Params.bytesWritten().getOrDefault(task) + bytesSet);
//TraceEvent("DBA_SetComplete", debugID).detail("Ver", values.second).detail("LogVersion", logVersion).detail("ReadVersion", readVer).detail("CommitVer", tr.getCommittedVersion()).detail("Range", printable(versionRange));
@ -358,7 +358,7 @@ namespace dbBackup {
}
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
valueLoc = committedValueLoc;
}
}
@ -384,7 +384,7 @@ namespace dbBackup {
addTaskVector.push_back(addTask(tr, taskBucket, task, nextKey, task->params[BackupAgentBase::keyEndKey], TaskCompletionKey::joinWith(onDone)));
}
Void _ = wait(waitForAll(addTaskVector));
wait(waitForAll(addTaskVector));
return Void();
}
@ -398,14 +398,14 @@ namespace dbBackup {
config.rangeBytesWritten().atomicOp(tr, bytesWritten, MutationRef::AddValue);
if (task->params.find(BackupRangeTaskFunc::keyAddBackupRangeTasks) != task->params.end()) {
Void _ = wait(startBackupRangeInternal(tr, BinaryReader::fromStringRef<Standalone<VectorRef<KeyRef>>>(task->params[BackupRangeTaskFunc::keyAddBackupRangeTasks], IncludeVersion()), taskBucket, futureBucket, task, taskFuture) && taskBucket->finish(tr, task));
wait(startBackupRangeInternal(tr, BinaryReader::fromStringRef<Standalone<VectorRef<KeyRef>>>(task->params[BackupRangeTaskFunc::keyAddBackupRangeTasks], IncludeVersion()), taskBucket, futureBucket, task, taskFuture) && taskBucket->finish(tr, task));
}
else if (task->params.find(BackupRangeTaskFunc::keyBackupRangeBeginKey) != task->params.end() && task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey] < task->params[BackupAgentBase::keyEndKey]) {
ASSERT(taskFuture->key.size() > 0);
Void _ = wait(success(BackupRangeTaskFunc::addTask(tr, taskBucket, task, task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey], task->params[BackupAgentBase::keyEndKey], TaskCompletionKey::signal(taskFuture->key))) && taskBucket->finish(tr, task));
wait(success(BackupRangeTaskFunc::addTask(tr, taskBucket, task, task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey], task->params[BackupAgentBase::keyEndKey], TaskCompletionKey::signal(taskFuture->key))) && taskBucket->finish(tr, task));
}
else {
Void _ = wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
}
return Void();
@ -424,7 +424,7 @@ namespace dbBackup {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace states = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
Void _ = wait(checkTaskVersion(tr, task, FinishFullBackupTaskFunc::name, FinishFullBackupTaskFunc::version));
wait(checkTaskVersion(tr, task, FinishFullBackupTaskFunc::name, FinishFullBackupTaskFunc::version));
// Enable the stop key
Transaction srcTr(taskBucket->src);
@ -432,7 +432,7 @@ namespace dbBackup {
Version readVersion = wait(srcTr.getReadVersion());
tr->set(states.pack(DatabaseBackupAgent::keyCopyStop), BinaryWriter::toValue(readVersion, Unversioned()));
TraceEvent("DBA_FinishFullBackup").detail("CopyStop", readVersion);
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -448,7 +448,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -474,11 +474,11 @@ namespace dbBackup {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state FlowLock lock(CLIENT_KNOBS->BACKUP_LOCK_BYTES);
Void _ = wait(checkTaskVersion(cx, task, EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version));
wait(checkTaskVersion(cx, task, EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version));
Version endVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyEndVersion], Unversioned());
Void _ = wait(eraseLogData(taskBucket->src, task->params[BackupAgentBase::keyConfigLogUid], task->params[BackupAgentBase::destUid], Optional<Version>(endVersion), true, BinaryReader::fromStringRef<Version>(task->params[BackupAgentBase::keyFolderId], Unversioned())));
wait(eraseLogData(taskBucket->src, task->params[BackupAgentBase::keyConfigLogUid], task->params[BackupAgentBase::destUid], Optional<Version>(endVersion), true, BinaryReader::fromStringRef<Version>(task->params[BackupAgentBase::keyFolderId], Unversioned())));
return Void();
}
@ -496,7 +496,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -504,7 +504,7 @@ namespace dbBackup {
state Reference<TaskFuture> taskFuture = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
Void _ = wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
return Void();
}
};
@ -579,7 +579,7 @@ namespace dbBackup {
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.options.customTransactionSizeLimit = 2 * CLIENT_KNOBS->TRANSACTION_SIZE_LIMIT;
Void _ = wait(checkDatabaseLock(&tr, BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())));
wait(checkDatabaseLock(&tr, BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())));
state int64_t bytesSet = 0;
bool first = true;
@ -594,12 +594,12 @@ namespace dbBackup {
}
}
Void _ = wait(tr.commit());
wait(tr.commit());
Params.bytesWritten().set(task, Params.bytesWritten().getOrDefault(task) + bytesSet);
break;
}
catch (Error &e) {
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
}
@ -608,7 +608,7 @@ namespace dbBackup {
throw e;
state Error err = e;
Void _ = wait(logError(cx, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]), format("ERROR: Failed to dump mutations because of error %s", err.what())));
wait(logError(cx, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]), format("ERROR: Failed to dump mutations because of error %s", err.what())));
throw err;
}
@ -618,7 +618,7 @@ namespace dbBackup {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Reference<FlowLock> lock(new FlowLock(CLIENT_KNOBS->BACKUP_LOCK_BYTES));
Void _ = wait(checkTaskVersion(cx, task, CopyLogRangeTaskFunc::name, CopyLogRangeTaskFunc::version));
wait(checkTaskVersion(cx, task, CopyLogRangeTaskFunc::name, CopyLogRangeTaskFunc::version));
state Version beginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyBeginVersion], Unversioned());
state Version endVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyEndVersion], Unversioned());
@ -635,7 +635,7 @@ namespace dbBackup {
dump.push_back(dumpData(cx, task, results[i], lock.getPtr(), taskBucket));
}
Void _ = wait(waitForAll(dump));
wait(waitForAll(dump));
if (newEndVersion < endVersion) {
task->params[CopyLogRangeTaskFunc::keyNextBeginVersion] = BinaryWriter::toValue(newEndVersion, Unversioned());
@ -657,7 +657,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -674,11 +674,11 @@ namespace dbBackup {
if (task->params.find(CopyLogRangeTaskFunc::keyNextBeginVersion) != task->params.end()) {
state Version nextVersion = BinaryReader::fromStringRef<Version>(task->params[CopyLogRangeTaskFunc::keyNextBeginVersion], Unversioned());
Void _ = wait(success(CopyLogRangeTaskFunc::addTask(tr, taskBucket, task, nextVersion, endVersion, TaskCompletionKey::signal(taskFuture->key))) &&
wait(success(CopyLogRangeTaskFunc::addTask(tr, taskBucket, task, nextVersion, endVersion, TaskCompletionKey::signal(taskFuture->key))) &&
taskBucket->finish(tr, task));
}
else {
Void _ = wait(taskFuture->set(tr, taskBucket) &&
wait(taskFuture->set(tr, taskBucket) &&
taskBucket->finish(tr, task));
}
@ -697,7 +697,7 @@ namespace dbBackup {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace conf = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(task->params[BackupAgentBase::keyConfigLogUid]);
state Subspace states = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
Void _ = wait(checkTaskVersion(tr, task, CopyLogsTaskFunc::name, CopyLogsTaskFunc::version));
wait(checkTaskVersion(tr, task, CopyLogsTaskFunc::name, CopyLogsTaskFunc::version));
state Version beginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyBeginVersion], Unversioned());
state Version prevBeginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyPrevBeginVersion], Unversioned());
@ -711,9 +711,9 @@ namespace dbBackup {
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
if (endVersion <= beginVersion) {
Void _ = wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(CopyLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone)));
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -751,16 +751,16 @@ namespace dbBackup {
addTaskVector.push_back(EraseLogRangeTaskFunc::addTask(tr, taskBucket, task, beginVersion, TaskCompletionKey::joinWith(allPartsDone)));
}
Void _ = wait(waitForAll(addTaskVector) && taskBucket->finish(tr, task));
wait(waitForAll(addTaskVector) && taskBucket->finish(tr, task));
} else {
if(appliedVersion <= stopVersionData) {
Void _ = wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(CopyLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone)));
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
Void _ = wait(onDone->set(tr, taskBucket) && taskBucket->finish(tr, task));
wait(onDone->set(tr, taskBucket) && taskBucket->finish(tr, task));
tr->set(states.pack(DatabaseBackupAgent::keyStateStop), BinaryWriter::toValue(beginVersion, Unversioned()));
}
@ -779,7 +779,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -802,7 +802,7 @@ namespace dbBackup {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace sourceStates = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
Void _ = wait(checkTaskVersion(cx, task, FinishedFullBackupTaskFunc::name, FinishedFullBackupTaskFunc::version));
wait(checkTaskVersion(cx, task, FinishedFullBackupTaskFunc::name, FinishedFullBackupTaskFunc::version));
state Transaction tr2(cx);
loop {
@ -815,13 +815,13 @@ namespace dbBackup {
//TraceEvent("DBA_FinishedFullBackup").detail("Applied", appliedVersion).detail("EndVer", endVersion);
if(appliedVersion < endVersion) {
Void _ = wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
task->params[FinishedFullBackupTaskFunc::keyInsertTask] = StringRef();
return Void();
}
break;
} catch( Error &e ) {
Void _ = wait(tr2.onError(e));
wait(tr2.onError(e));
}
}
@ -850,12 +850,12 @@ namespace dbBackup {
endVersion = tr->getReadVersion().get();
break;
} catch(Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
Version backupUid = BinaryReader::fromStringRef<Version>(task->params[BackupAgentBase::keyFolderId], Unversioned());
Void _ = wait(eraseLogData(taskBucket->src, logUidValue, destUidValue, Optional<Version>(), true, backupUid));
wait(eraseLogData(taskBucket->src, logUidValue, destUidValue, Optional<Version>(), true, backupUid));
return Void();
}
@ -870,7 +870,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -881,7 +881,7 @@ namespace dbBackup {
if (task->params.find(FinishedFullBackupTaskFunc::keyInsertTask) != task->params.end()) {
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
Key _ = wait(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone)));
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -893,7 +893,7 @@ namespace dbBackup {
tr->clear(conf.range());
tr->set(states.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_COMPLETED)));
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -912,7 +912,7 @@ namespace dbBackup {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace conf = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(task->params[BackupAgentBase::keyConfigLogUid]);
state Subspace states = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
Void _ = wait(checkTaskVersion(tr, task, CopyDiffLogsTaskFunc::name, CopyDiffLogsTaskFunc::version));
wait(checkTaskVersion(tr, task, CopyDiffLogsTaskFunc::name, CopyDiffLogsTaskFunc::version));
state Version beginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyBeginVersion], Unversioned());
state Version prevBeginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyPrevBeginVersion], Unversioned());
@ -925,9 +925,9 @@ namespace dbBackup {
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
if (endVersion <= beginVersion) {
Void _ = wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone)));
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -954,9 +954,9 @@ namespace dbBackup {
addTaskVector.push_back(EraseLogRangeTaskFunc::addTask(tr, taskBucket, task, beginVersion, TaskCompletionKey::joinWith(allPartsDone)));
}
Void _ = wait(waitForAll(addTaskVector) && taskBucket->finish(tr, task));
wait(waitForAll(addTaskVector) && taskBucket->finish(tr, task));
} else {
Void _ = wait(onDone->set(tr, taskBucket) && taskBucket->finish(tr, task));
wait(onDone->set(tr, taskBucket) && taskBucket->finish(tr, task));
}
return Void();
}
@ -974,7 +974,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -994,7 +994,7 @@ namespace dbBackup {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Reference<TaskFuture> taskFuture = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
Void _ = wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
return Void();
}
@ -1076,7 +1076,7 @@ namespace dbBackup {
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.options.customTransactionSizeLimit = 2 * CLIENT_KNOBS->TRANSACTION_SIZE_LIMIT;
Void _ = wait(checkDatabaseLock(&tr, BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())));
wait(checkDatabaseLock(&tr, BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())));
state int64_t bytesSet = 0;
bool first = true;
@ -1091,12 +1091,12 @@ namespace dbBackup {
}
}
Void _ = wait(tr.commit());
wait(tr.commit());
Params.bytesWritten().set(task, Params.bytesWritten().getOrDefault(task) + bytesSet);
break;
}
catch (Error &e) {
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
}
@ -1105,7 +1105,7 @@ namespace dbBackup {
throw e;
state Error err = e;
Void _ = wait(logError(cx, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]), format("ERROR: Failed to dump mutations because of error %s", err.what())));
wait(logError(cx, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]), format("ERROR: Failed to dump mutations because of error %s", err.what())));
throw err;
}
@ -1115,7 +1115,7 @@ namespace dbBackup {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Reference<FlowLock> lock(new FlowLock(CLIENT_KNOBS->BACKUP_LOCK_BYTES));
Void _ = wait(checkTaskVersion(cx, task, OldCopyLogRangeTaskFunc::name, OldCopyLogRangeTaskFunc::version));
wait(checkTaskVersion(cx, task, OldCopyLogRangeTaskFunc::name, OldCopyLogRangeTaskFunc::version));
state Version beginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyBeginVersion], Unversioned());
state Version endVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyEndVersion], Unversioned());
@ -1132,7 +1132,7 @@ namespace dbBackup {
dump.push_back(dumpData(cx, task, results[i], lock.getPtr(), taskBucket));
}
Void _ = wait(waitForAll(dump));
wait(waitForAll(dump));
if (newEndVersion < endVersion) {
task->params[OldCopyLogRangeTaskFunc::keyNextBeginVersion] = BinaryWriter::toValue(newEndVersion, Unversioned());
@ -1154,7 +1154,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -1171,10 +1171,10 @@ namespace dbBackup {
if (task->params.find(OldCopyLogRangeTaskFunc::keyNextBeginVersion) != task->params.end()) {
state Version nextVersion = BinaryReader::fromStringRef<Version>(task->params[OldCopyLogRangeTaskFunc::keyNextBeginVersion], Unversioned());
Void _ = wait(success(OldCopyLogRangeTaskFunc::addTask(tr, taskBucket, task, nextVersion, endVersion, TaskCompletionKey::signal(taskFuture->key))) && taskBucket->finish(tr, task));
wait(success(OldCopyLogRangeTaskFunc::addTask(tr, taskBucket, task, nextVersion, endVersion, TaskCompletionKey::signal(taskFuture->key))) && taskBucket->finish(tr, task));
}
else {
Void _ = wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
}
return Void();
@ -1207,18 +1207,18 @@ namespace dbBackup {
tagNameKey = tagName.get();
break;
} catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
TraceEvent("DBA_AbortOldBackup").detail("TagName", tagNameKey.printable());
Void _ = wait(srcDrAgent.abortBackup(cx, tagNameKey, false, true));
wait(srcDrAgent.abortBackup(cx, tagNameKey, false, true));
return Void();
}
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -1232,7 +1232,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -1260,7 +1260,7 @@ namespace dbBackup {
state Key logUidValue = task->params[DatabaseBackupAgent::keyConfigLogUid];
state Subspace sourceStates = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceStates).get(logUidValue);
state Subspace config = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(logUidValue);
Void _ = wait(checkTaskVersion(cx, task, CopyDiffLogsUpgradeTaskFunc::name, CopyDiffLogsUpgradeTaskFunc::version));
wait(checkTaskVersion(cx, task, CopyDiffLogsUpgradeTaskFunc::name, CopyDiffLogsUpgradeTaskFunc::version));
// Retrieve backupRanges
state Standalone<VectorRef<KeyRangeRef>> backupRanges;
@ -1270,7 +1270,7 @@ namespace dbBackup {
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
Future<Void> verified = taskBucket->keepRunning(tr, task);
Void _ = wait(verified);
wait(verified);
Optional<Key> backupKeysPacked = wait(tr->get(config.pack(BackupAgentBase::keyConfigBackupRanges)));
if (!backupKeysPacked.present()) {
@ -1281,7 +1281,7 @@ namespace dbBackup {
br >> backupRanges;
break;
} catch(Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
@ -1316,10 +1316,10 @@ namespace dbBackup {
Key versionKey = logUidValue.withPrefix(destUidValue).withPrefix(backupLatestVersionsPrefix);
srcTr->set(versionKey, task->params[DatabaseBackupAgent::keyBeginVersion]);
Void _ = wait(srcTr->commit());
wait(srcTr->commit());
break;
} catch(Error &e) {
Void _ = wait(srcTr->onError(e));
wait(srcTr->onError(e));
}
}
@ -1331,7 +1331,7 @@ namespace dbBackup {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
Void _ = wait(checkTaskVersion(tr, task, CopyDiffLogsUpgradeTaskFunc::name, CopyDiffLogsUpgradeTaskFunc::version));
wait(checkTaskVersion(tr, task, CopyDiffLogsUpgradeTaskFunc::name, CopyDiffLogsUpgradeTaskFunc::version));
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
if (task->params[BackupAgentBase::destUid].size() == 0) {
@ -1345,7 +1345,7 @@ namespace dbBackup {
Key _ = wait(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, 0, beginVersion, TaskCompletionKey::signal(onDone)));
}
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -1364,7 +1364,7 @@ namespace dbBackup {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace sourceStates = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
Void _ = wait(checkTaskVersion(cx, task, BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version));
wait(checkTaskVersion(cx, task, BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version));
state Transaction tr(taskBucket->src);
loop{
try {
@ -1380,11 +1380,11 @@ namespace dbBackup {
task->params[DatabaseBackupAgent::keyPrevBeginVersion] = prevBeginVersion.get();
Void _ = wait(tr.commit());
wait(tr.commit());
return Void();
}
catch (Error &e) {
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
}
@ -1392,7 +1392,7 @@ namespace dbBackup {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace conf = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(task->params[BackupAgentBase::keyConfigLogUid]);
state Subspace states = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
Void _ = wait(checkTaskVersion(tr, task, BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version));
wait(checkTaskVersion(tr, task, BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version));
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
@ -1423,7 +1423,7 @@ namespace dbBackup {
Key _ = wait(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), allPartsDone));
}
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -1437,7 +1437,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -1457,7 +1457,7 @@ namespace dbBackup {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Key logUidValue = task->params[DatabaseBackupAgent::keyConfigLogUid];
state Subspace sourceStates = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceStates).get(logUidValue);
Void _ = wait(checkTaskVersion(cx, task, StartFullBackupTaskFunc::name, StartFullBackupTaskFunc::version));
wait(checkTaskVersion(cx, task, StartFullBackupTaskFunc::name, StartFullBackupTaskFunc::version));
state Key destUidValue(logUidValue);
state Standalone<VectorRef<KeyRangeRef>> backupRanges = BinaryReader::fromStringRef<Standalone<VectorRef<KeyRangeRef>>>(task->params[DatabaseBackupAgent::keyConfigBackupRanges], IncludeVersion());
@ -1486,10 +1486,10 @@ namespace dbBackup {
task->params[BackupAgentBase::destUid] = destUidValue;
Void _ = wait(srcTr->commit());
wait(srcTr->commit());
break;
} catch(Error &e) {
Void _ = wait(srcTr->onError(e));
wait(srcTr->onError(e));
}
}
@ -1499,7 +1499,7 @@ namespace dbBackup {
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
state Future<Void> verified = taskBucket->keepRunning(tr, task);
Void _ = wait(verified);
wait(verified);
// Set destUid at destination side
state Subspace config = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(logUidValue);
@ -1515,10 +1515,10 @@ namespace dbBackup {
task->params[BackupAgentBase::keyBeginVersion] = beginVersionKey;
Void _ = wait(tr->commit());
wait(tr->commit());
break;
} catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
@ -1546,10 +1546,10 @@ namespace dbBackup {
srcTr2->set(logRangesEncodeKey(backupRange.begin, BinaryReader::fromStringRef<UID>(destUidValue, Unversioned())), logRangesEncodeValue(backupRange.end, destPath));
}
Void _ = wait(srcTr2->commit());
wait(srcTr2->commit());
break;
} catch (Error &e) {
Void _ = wait(srcTr2->onError(e));
wait(srcTr2->onError(e));
}
}
@ -1590,7 +1590,7 @@ namespace dbBackup {
// After the Backup completes, clear the backup subspace and update the status
Key _ = wait(BackupRestorableTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), kvBackupComplete));
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -1611,7 +1611,7 @@ namespace dbBackup {
return taskBucket->addTask(tr, task, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(logUid).pack(BackupAgentBase::keyFolderId), task->params[BackupAgentBase::keyFolderId]);
}
Void _ = wait(waitFor->onSetAddTask(tr, taskBucket, task, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(logUid).pack(BackupAgentBase::keyFolderId), task->params[BackupAgentBase::keyFolderId]));
wait(waitFor->onSetAddTask(tr, taskBucket, task, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(logUid).pack(BackupAgentBase::keyFolderId), task->params[BackupAgentBase::keyFolderId]));
return LiteralStringRef("OnSetAddTask");
}
@ -1650,11 +1650,11 @@ public:
}
state Future<Void> watchDrVersionFuture = tr->watch(drVersionKey);
Void _ = wait(tr->commit());
Void _ = wait(watchDrVersionFuture);
wait(tr->commit());
wait(watchDrVersionFuture);
break;
} catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -1685,11 +1685,11 @@ public:
}
state Future<Void> watchFuture = tr->watch(statusKey);
Void _ = wait(tr->commit());
Void _ = wait(watchFuture);
wait(tr->commit());
wait(watchFuture);
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -1713,11 +1713,11 @@ public:
}
state Future<Void> watchFuture = tr->watch(statusKey);
Void _ = wait(tr->commit());
Void _ = wait(watchFuture);
wait(tr->commit());
wait(watchFuture);
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -1768,7 +1768,7 @@ public:
for (auto& backupRange : backupRanges) {
backupIntoResults.push_back(tr->getRange(backupRange.removePrefix(removePrefix).withPrefix(addPrefix), 1));
}
Void _ = wait(waitForAll(backupIntoResults));
wait(waitForAll(backupIntoResults));
for (auto result : backupIntoResults) {
if (result.get().size() > 0) {
// One of the ranges we will be backing up into has pre-existing data.
@ -1820,9 +1820,9 @@ public:
addPrefix, removePrefix, BinaryWriter::toValue(backupRanges, IncludeVersion()), tagName, TaskCompletionKey::noSignal(), Reference<TaskFuture>(), databasesInSync));
if (lockDB)
Void _ = wait(lockDatabase(tr, logUid));
wait(lockDatabase(tr, logUid));
else
Void _ = wait(checkDatabaseLock(tr, logUid));
wait(checkDatabaseLock(tr, logUid));
TraceEvent("DBA_Submit").detail("LogUid", logUid).detail("Lock", lockDB).detail("LogUID", printable(logUidValue)).detail("Tag", printable(tagName))
.detail("Key", printable(backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId))).detail("MapPrefix", printable(mapPrefix));
@ -1832,7 +1832,7 @@ public:
ACTOR static Future<Void> unlockBackup(DatabaseBackupAgent* backupAgent, Reference<ReadYourWritesTransaction> tr, Key tagName) {
UID logUid = wait(backupAgent->getLogUid(tr, tagName));
Void _ = wait(unlockDatabase(tr, logUid));
wait(unlockDatabase(tr, logUid));
TraceEvent("DBA_Unlock").detail("Tag", printable(tagName));
return Void();
}
@ -1861,13 +1861,13 @@ public:
state Version commitVersion;
loop {
try {
Void _ = wait( lockDatabase(&tr, logUid) );
wait( lockDatabase(&tr, logUid) );
tr.set(backupAgent->tagNames.pack(tagName), logUidValue);
Void _ = wait(tr.commit());
wait(tr.commit());
commitVersion = tr.getCommittedVersion();
break;
} catch( Error &e ) {
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
@ -1889,18 +1889,18 @@ public:
break;
state Future<Void> versionWatch = tr2.watch(BinaryWriter::toValue(destlogUid, Unversioned()).withPrefix(applyMutationsBeginRange.begin));
Void _ = wait(tr2.commit());
Void _ = wait(versionWatch);
wait(tr2.commit());
wait(versionWatch);
tr2.reset();
} catch( Error &e ) {
Void _ = wait(tr2.onError(e));
wait(tr2.onError(e));
}
}
TraceEvent("DBA_SwitchoverReady");
try {
Void _ = wait( backupAgent->discontinueBackup(dest, tagName) );
wait( backupAgent->discontinueBackup(dest, tagName) );
} catch( Error &e ) {
if( e.code() != error_code_backup_duplicate && e.code() != error_code_backup_unneeded )
throw;
@ -1920,19 +1920,19 @@ public:
if (destVersion <= commitVersion) {
TEST(true); // Forcing dest backup cluster to higher version
tr3.set(minRequiredCommitVersionKey, BinaryWriter::toValue(commitVersion+1, Unversioned()));
Void _ = wait(tr3.commit());
wait(tr3.commit());
} else {
break;
}
} catch( Error &e ) {
Void _ = wait(tr3.onError(e));
wait(tr3.onError(e));
}
}
TraceEvent("DBA_SwitchoverVersionUpgraded");
try {
Void _ = wait( drAgent.submitBackup(backupAgent->taskBucket->src, tagName, backupRanges, false, addPrefix, removePrefix, true, true) );
wait( drAgent.submitBackup(backupAgent->taskBucket->src, tagName, backupRanges, false, addPrefix, removePrefix, true, true) );
} catch( Error &e ) {
if( e.code() != error_code_backup_duplicate )
throw;
@ -1944,7 +1944,7 @@ public:
TraceEvent("DBA_SwitchoverStarted");
Void _ = wait( backupAgent->unlockBackup(dest, tagName) );
wait( backupAgent->unlockBackup(dest, tagName) );
TraceEvent("DBA_SwitchoverUnlocked");
@ -1992,7 +1992,7 @@ public:
state Future<int> statusFuture= backupAgent->getStateValue(tr, logUid);
state Future<UID> destUidFuture = backupAgent->getDestUid(tr, logUid);
Void _ = wait(success(statusFuture) && success(destUidFuture));
wait(success(statusFuture) && success(destUidFuture));
UID destUid = destUidFuture.get();
if (destUid.isValid()) {
@ -2016,12 +2016,12 @@ public:
tr->set(StringRef(backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyStateStatus)), StringRef(DatabaseBackupAgent::getStateText(BackupAgentBase::STATE_PARTIALLY_ABORTED)));
Void _ = wait(tr->commit());
wait(tr->commit());
TraceEvent("DBA_Abort").detail("CommitVersion", tr->getCommittedVersion());
break;
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
@ -2059,10 +2059,10 @@ public:
tr->addWriteConflictRange(singleKeyRange(minRequiredCommitVersionKey));
}
}
Void _ = wait(tr->commit());
wait(tr->commit());
break;
} catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
@ -2106,18 +2106,18 @@ public:
srcTr->set( backupAgent->sourceStates.pack(DatabaseBackupAgent::keyStateStatus), StringRef(DatabaseBackupAgent::getStateText(BackupAgentBase::STATE_PARTIALLY_ABORTED) ));
srcTr->set( backupAgent->sourceStates.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId), backupUid );
Void _ = wait(srcTr->commit());
wait(srcTr->commit());
endVersion = srcTr->getCommittedVersion() + 1;
break;
}
catch (Error &e) {
Void _ = wait(srcTr->onError(e));
wait(srcTr->onError(e));
}
}
if (clearSrcDb && !abortOldBackup) {
Void _ = wait(eraseLogData(backupAgent->taskBucket->src, logUidValue, destUidValue));
wait(eraseLogData(backupAgent->taskBucket->src, logUidValue, destUidValue));
}
tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(cx));
@ -2133,12 +2133,12 @@ public:
tr->set(StringRef(backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyStateStatus)), StringRef(DatabaseBackupAgent::getStateText(BackupAgentBase::STATE_ABORTED)));
Void _ = wait(tr->commit());
wait(tr->commit());
return Void();
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -2257,7 +2257,7 @@ public:
break;
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}

View File

@ -177,7 +177,8 @@ bool DatabaseConfiguration::isValid() const {
usableRegions <= 2 &&
regions.size() <= 2 &&
( usableRegions == 1 || regions.size() == 2 ) &&
( regions.size() == 0 || regions[0].priority >= 0 ) ) ) {
( regions.size() == 0 || regions[0].priority >= 0 ) &&
( regions.size() == 0 || tLogPolicy->info() != "dcid^2 x zoneid^2 x 1") ) ) { //We cannot specify regions with three_datacenter replication
return false;
}

View File

@ -51,15 +51,14 @@ public:
class DatabaseContext : public ReferenceCounted<DatabaseContext>, NonCopyable {
public:
static Future<Database> createDatabase( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<Cluster> cluster, Standalone<StringRef> dbName, LocalityData const& clientLocality );
//static Future< Void > configureDatabase( ZookeeperInterface const& zk, int configScope, int configMode, Standalone<StringRef> dbName = Standalone<StringRef>() );
static Future<Database> createDatabase( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<Cluster> cluster, LocalityData const& clientLocality );
// For internal (fdbserver) use only: create a database context for a DB with already known client info
static Database create( Reference<AsyncVar<ClientDBInfo>> info, Future<Void> dependency, LocalityData clientLocality, bool enableLocalityLoadBalance, int taskID = TaskDefaultEndpoint, bool lockAware = false );
~DatabaseContext();
Database clone() const { return Database(new DatabaseContext( clientInfo, cluster, clientInfoMonitor, dbName, dbId, taskID, clientLocality, enableLocalityLoadBalance, lockAware )); }
Database clone() const { return Database(new DatabaseContext( clientInfo, cluster, clientInfoMonitor, dbId, taskID, clientLocality, enableLocalityLoadBalance, lockAware )); }
pair<KeyRange,Reference<LocationInfo>> getCachedLocation( const KeyRef&, bool isBackward = false );
bool getCachedLocations( const KeyRangeRef&, vector<std::pair<KeyRange,Reference<LocationInfo>>>&, int limit, bool reverse );
@ -90,7 +89,7 @@ public:
//private: friend class ClientInfoMonitorActor;
explicit DatabaseContext( Reference<AsyncVar<ClientDBInfo>> clientInfo,
Reference<Cluster> cluster, Future<Void> clientInfoMonitor,
Standalone<StringRef> dbName, Standalone<StringRef> dbId, int taskID, LocalityData clientLocality, bool enableLocalityLoadBalance, bool lockAware );
Standalone<StringRef> dbId, int taskID, LocalityData clientLocality, bool enableLocalityLoadBalance, bool lockAware );
// These are reference counted
Reference<Cluster> cluster;
@ -127,8 +126,6 @@ public:
std::map< std::vector<UID>, LocationInfo* > ssid_locationInfo;
// for logging/debugging (relic of multi-db support)
Standalone<StringRef> dbName;
Standalone<StringRef> dbId;
int64_t transactionReadVersions;

View File

@ -27,12 +27,14 @@
#elif !defined(FDBCLIENT_EVENTTYPES_ACTOR_H)
#define FDBCLIENT_EVENTTYPESS_ACTOR_H
#include "flow/actorcompiler.h"
#include "flow/flow.h"
#include "flow/TDMetric.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
DESCR struct GetValueComplete {
int64_t latency; //ns
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -47,6 +47,7 @@ private:
public:
typename std::map<typename T::Option, FDBOptionInfo>::iterator begin() { return optionInfo.begin(); }
typename std::map<typename T::Option, FDBOptionInfo>::iterator end() { return optionInfo.end(); }
typename std::map<typename T::Option, FDBOptionInfo>::iterator find(const typename T::Option& key) { return optionInfo.find(key); }
FDBOptionInfo& operator[] (const typename T::Option& key) { return optionInfo[key]; }

View File

@ -94,14 +94,14 @@ ACTOR Future<Void> failureMonitorClientLoop(
waitfor = reply.clientRequestIntervalMS * .001;
nextRequest = delayJittered( waitfor, TaskFailureMonitor );
}
when( Void _ = wait( requestTimeout ) ) {
when( wait( requestTimeout ) ) {
g_network->setCurrentTask(TaskDefaultDelay);
requestTimeout = Never();
TraceEvent(SevWarn, "FailureMonitoringServerDown").detail("OldServerID",controller.id());
monitor->setStatus( controller.failureMonitoring.getEndpoint().address, FailureStatus(true) );
fmState->knownAddrs.erase( controller.failureMonitoring.getEndpoint().address );
}
when( Void _ = wait( nextRequest ) ) {
when( wait( nextRequest ) ) {
g_network->setCurrentTask(TaskDefaultDelay);
nextRequest = Never();
@ -137,6 +137,6 @@ ACTOR Future<Void> failureMonitorClient( Reference<AsyncVar<Optional<struct Clus
loop {
state Future<Void> client = ci->get().present() ? failureMonitorClientLoop(monitor, ci->get().get(), fmState, trackMyStatus) : Void();
Void _ = wait( ci->onChange() );
wait( ci->onChange() );
}
}

File diff suppressed because it is too large Load Diff

View File

@ -84,7 +84,7 @@ public:
class ICluster {
public:
virtual ~ICluster() {}
virtual ThreadFuture<Reference<IDatabase>> createDatabase(Standalone<StringRef> dbName) = 0;
virtual ThreadFuture<Reference<IDatabase>> createDatabase() = 0;
virtual void setOption(FDBClusterOptions::Option option, Optional<StringRef> value = Optional<StringRef>()) = 0;
virtual void addref() = 0;

149
fdbclient/JsonBuilder.cpp Executable file
View File

@ -0,0 +1,149 @@
#include "JsonBuilder.h"
#include <iostream>
JsonBuilderObject JsonBuilder::makeMessage(const char *name, const char *description) {
JsonBuilderObject out;
out["name"] = name;
out["description"] = description;
return out;
}
// dst must have at least len + 3 bytes available (".e" becomes "0.0e0")
// Returns bytes written, or 0 on failure.
int JsonBuilder::coerceAsciiNumberToJSON(const char *s, int len, char *dst) {
if(len == 0) {
return 0;
}
const char *send = s + len;
char *wptr = dst;
bool dot = false;
// Allow one optional sign
if(*s == '-') {
*wptr++ = *s++;
// Output not yet valid so return failure
if(s == send) {
return 0;
}
}
// 'inf' becomes 1e99
if(*s == 'i') {
if(len >= 3 && (strncmp(s, "inf", 3) == 0)) {
strcpy(wptr, "1e99");
return 4 + wptr - dst;
}
// Anything else starting with 'i' is a failure
return 0;
}
// Skip leading zeroes
while(*s == '0') {
++s;
// If found end, number is valid and zero
if(s == send) {
*wptr++ = '0';
return wptr - dst;
}
}
// If a dot is found, write a zero before it
if(*s == '.') {
dot = true;
*wptr++ = '0';
*wptr++ = *s++;
// If found end, add a zero and return
if(s == send) {
*wptr++ = '0';
return wptr - dst;
}
// If there is no digit after the dot, write a zero
if(!isdigit(*s)) {
*wptr++ = '0';
}
}
// Write all digits found
while(isdigit(*s)) {
*wptr++ = *s++;
// If found end, number is valid so return
if(s == send) {
return wptr - dst;
}
}
// If there is a dot, return unless its the first
if(*s == '.') {
if(dot) {
return wptr - dst;
}
*wptr++ = *s++;
// If found end, add a zero and return
if(s == send) {
*wptr++ = '0';
return wptr - dst;
}
// If there are more digits write them, else write a 0
if(isdigit(*s)) {
do {
*wptr++ = *s++;
// If found end, number is valid so return
if(s == send) {
return wptr - dst;
}
} while(isdigit(*s));
}
else {
*wptr++ = '0';
}
}
// Now we can have an e or E, else stop
if(*s == 'e' || *s == 'E') {
*wptr++ = *s++;
// If found end, add a zero and return
if(s == send) {
*wptr++ = '0';
return wptr - dst;
}
// Allow one optional sign
if(*s == '-' || *s == '+') {
*wptr++ = *s++;
}
// If found end, add a zero and return
if(s == send) {
*wptr++ = '0';
return wptr - dst;
}
// If there are more digits write then, else write a 0
if(isdigit(*s)) {
do {
*wptr++ = *s++;
// If found end, number is valid so return
if(s == send) {
return wptr - dst;
}
} while(isdigit(*s));
}
else {
*wptr++ = '0';
}
}
return wptr - dst;
}

347
fdbclient/JsonBuilder.h Executable file
View File

@ -0,0 +1,347 @@
#pragma once
#include <string>
#include <vector>
#include <cmath>
#include "flow/flow.h"
#include "flow/Trace.h"
#include "fdbrpc/JSONDoc.h"
class JsonBuilder;
class JsonBuilderObject;
class JsonBuilderArray;
typedef JsonBuilder JsonString;
template <typename T> class JsonBuilderObjectSetter;
// Class for building JSON string values.
// Default value is null, as in the JSON type
class JsonBuilder {
protected:
enum EType { NULLVALUE, OBJECT, ARRAY };
typedef VectorRef<char> VString;
public:
// Default value is null, which will be considered "empty"
JsonBuilder() : type(NULLVALUE), elements(0), bytes(0) {
jsonText.resize(arena, 1);
}
int getFinalLength() const {
return bytes + strlen(getEnd());
}
// TODO: Remove the need for this by changing usages to steal this's content
std::string getJson() const {
std::string result;
result.reserve(bytes + 1);
for(auto& it : jsonText) {
result.append(it.begin(), it.end());
}
result.append(getEnd());
return result;
}
int size() const {
return elements;
}
bool empty() const {
return elements == 0;
}
static JsonBuilderObject makeMessage(const char *name, const char *description);
static int coerceAsciiNumberToJSON(const char *s, int len, char *dst);
protected:
EType type;
Arena arena;
mutable VectorRef<VString> jsonText;
int elements;
int bytes;
// 'raw' write methods
inline void write(const char *s, int len) {
bytes += len;
jsonText.back().append(arena, s, len);
}
inline void write(const char* s) {
write(s, strlen(s));
}
inline void write(const StringRef &s) {
write((char *)s.begin(), s.size());
}
inline void write(char s) {
++bytes;
jsonText.back().push_back(arena, s);
}
// writeValue() methods write JSON form of the value
void writeValue(const json_spirit::mValue &val) {
switch(val.type()) {
case json_spirit::int_type:
return writeValue(val.get_int64());
case json_spirit::bool_type:
return writeValue(val.get_bool());
case json_spirit::real_type:
return writeValue(val.get_real());
case json_spirit::str_type:
return writeValue(val.get_str());
default:
// Catch-all for objects/arrays
return write(json_spirit::write_string(val));
};
}
void writeValue(const bool& val) {
write(val ? "true" : "false");
}
template<typename T> inline void writeFormat(const char *fmt, const T &val) {
VString &dst = jsonText.back();
const int limit = 30;
dst.reserve(arena, dst.size() + limit);
int len = snprintf(dst.end(), limit, fmt, val);
if(len > 0 && len < limit) {
dst.extendUnsafeNoReallocNoInit(len);
}
else {
write(format(fmt, val));
}
}
void writeValue(const int64_t& val) {
writeFormat("%lld", val);
}
void writeValue(const uint64_t& val) {
writeFormat("%llu", val);
}
void writeValue(const int& val) {
writeFormat("%d", val);
}
void writeValue(const double& val) {
if(std::isfinite(val)) {
writeFormat("%g", val);
}
else if(std::isnan(val)) {
write("-999");
}
else {
write("1e99");
}
}
bool shouldEscape(char c) {
switch( c ) {
case '"':
case '\\':
case '\b':
case '\f':
case '\n':
case '\r':
case '\t':
return true;
default:
return false;
}
}
void writeValue(const char *val, int len) {
write('"');
int beginCopy = 0;
VString &dst = jsonText.back();
for (int i = 0; i < len; i++) {
if (shouldEscape(val[i])) {
dst.append(arena, val + beginCopy, i - beginCopy);
beginCopy = i + 1;
write('\\');
write(val[i]);
}
}
if(beginCopy < len) {
dst.append(arena, val + beginCopy, len - beginCopy);
}
write('"');
}
inline void writeValue(const std::string& val) {
writeValue(val.data(), val.size());
}
inline void writeValue(const char* val) {
writeValue(val, strlen(val));
}
inline void writeValue(const StringRef &s) {
writeValue((const char *)s.begin(), s.size());
}
// Write the finalized (closed) form of val
void writeValue(const JsonBuilder &val) {
bytes += val.bytes;
jsonText.append(arena, val.jsonText.begin(), val.jsonText.size());
val.jsonText.push_back(arena, VString());
arena.dependsOn(val.arena);
write(val.getEnd());
}
void writeCoercedAsciiNumber(const char *s, int len) {
VString &val = jsonText.back();
val.reserve(arena, val.size() + len + 3);
int written = coerceAsciiNumberToJSON(s, len, val.end());
if(written > 0) {
val.extendUnsafeNoReallocNoInit(written);
}
else {
write("-999");
}
}
inline void writeCoercedAsciiNumber(const StringRef &s) {
writeCoercedAsciiNumber((const char *)s.begin(), s.size());
}
inline void writeCoercedAsciiNumber(const std::string &s) {
writeCoercedAsciiNumber(s.data(), s.size());
}
// Helper function to add contents of another JsonBuilder to this one.
// This is only used by the subclasses to combine like-typed (at compile time) objects,
// so it can be assumed that the other object has been initialized with an opening character.
void _addContents(const JsonBuilder &other) {
if(other.empty()) {
return;
}
if(elements > 0) {
write(',');
}
// Add everything but the first byte of the first string in arr
bytes += other.bytes - 1;
const VString &front = other.jsonText.front();
jsonText.push_back(arena, front.slice(1, front.size()));
jsonText.append(arena, other.jsonText.begin() + 1, other.jsonText.size() - 1);
// Both JsonBuilders would now want to write to the same additional VString capacity memory
// if they were modified, so force the other (likely to not be modified again) to start a new one.
other.jsonText.push_back(arena, VString());
arena.dependsOn(other.arena);
elements += other.elements;
}
// Get the text necessary to finish the JSON string
const char * getEnd() const {
switch(type) {
case NULLVALUE:
return "null";
case OBJECT:
return "}";
case ARRAY:
return "]";
default:
return "";
};
}
};
class JsonBuilderArray : public JsonBuilder {
public:
JsonBuilderArray() {
type = ARRAY;
write('[');
}
template<typename VT> inline JsonBuilderArray & push_back(const VT &val) {
if(elements++ > 0) {
write(',');
}
writeValue(val);
return *this;
}
JsonBuilderArray & addContents(const json_spirit::mArray &arr) {
for(auto &v : arr) {
push_back(v);
}
return *this;
}
JsonBuilderArray & addContents(const JsonBuilderArray &arr) {
_addContents(arr);
return *this;
}
};
class JsonBuilderObject : public JsonBuilder {
public:
JsonBuilderObject() {
type = OBJECT;
write('{');
}
template<typename KT, typename VT> inline JsonBuilderObject & setKey(const KT &name, const VT &val) {
if(elements++ > 0) {
write(',');
}
write('"');
write(name);
write("\":");
writeValue(val);
return *this;
}
template<typename KT, typename VT> inline JsonBuilderObject & setKeyRawNumber(const KT &name, const VT &val) {
if(elements++ > 0) {
write(',');
}
write('"');
write(name);
write("\":");
writeCoercedAsciiNumber(val);
return *this;
}
template<typename T> inline JsonBuilderObjectSetter<T> operator[](T &&name);
JsonBuilderObject & addContents(const json_spirit::mObject &obj) {
for(auto &kv : obj) {
setKey(kv.first, kv.second);
}
return *this;
}
JsonBuilderObject & addContents(const JsonBuilderObject &obj) {
_addContents(obj);
return *this;
}
};
// Template is the key name, accepted as an r-value if possible to avoid copying if it's a string
template<typename KT>
class JsonBuilderObjectSetter {
public:
JsonBuilderObjectSetter(JsonBuilderObject &dest, KT &&name) : dest(dest), name(std::forward<KT>(name)) {}
// Value is accepted as an rvalue if possible
template <class VT> inline void operator=(const VT &value) {
dest.setKey(name, value);
}
protected:
JsonBuilderObject& dest;
KT name;
};
template<typename T> inline JsonBuilderObjectSetter<T> JsonBuilderObject::operator[](T &&name) {
return JsonBuilderObjectSetter<T>(*this, std::forward<T>(name));
}

View File

@ -158,7 +158,7 @@ ACTOR Future<Void> krmSetRangeCoalescing( Transaction *tr, Key mapPrefix, KeyRan
state vector<Future<Standalone<RangeResultRef>>> keys;
keys.push_back(tr->getRange(lastLessThan(withPrefix.begin), firstGreaterOrEqual(withPrefix.begin), 1, true));
keys.push_back(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end) + 1, 2, true));
Void _ = wait(waitForAll(keys));
wait(waitForAll(keys));
//Determine how far to extend this range at the beginning
auto beginRange = keys[0].get();

View File

@ -39,6 +39,8 @@ ClientKnobs::ClientKnobs(bool randomize) {
init( FAILURE_MIN_DELAY, 4.0 ); if( randomize && BUGGIFY ) FAILURE_MIN_DELAY = 1.0;
init( FAILURE_TIMEOUT_DELAY, FAILURE_MIN_DELAY );
init( CLIENT_FAILURE_TIMEOUT_DELAY, FAILURE_MIN_DELAY );
init( FAILURE_EMERGENCY_DELAY, 60.0 );
init( FAILURE_MAX_GENERATIONS, 4 );
// wrong_shard_server sometimes comes from the only nonfailed server, so we need to avoid a fast spin
@ -77,7 +79,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
init( WATCH_POLLING_TIME, 1.0 ); if( randomize && BUGGIFY ) WATCH_POLLING_TIME = 5.0;
init( NO_RECENT_UPDATES_DURATION, 20.0 ); if( randomize && BUGGIFY ) NO_RECENT_UPDATES_DURATION = 0.1;
init( FAST_WATCH_TIMEOUT, 20.0 ); if( randomize && BUGGIFY ) FAST_WATCH_TIMEOUT = 1.0;
init( WATCH_TIMEOUT, 900.0 ); if( randomize ) WATCH_TIMEOUT = 20.0;
init( WATCH_TIMEOUT, 900.0 ); if( randomize && BUGGIFY ) WATCH_TIMEOUT = 20.0;
// Core
init( CORE_VERSIONSPERSECOND, 1e6 );
@ -168,7 +170,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
init( BLOBSTORE_MAX_SEND_BYTES_PER_SECOND, 1e9 );
init( BLOBSTORE_MAX_RECV_BYTES_PER_SECOND, 1e9 );
init( BLOBSTORE_LIST_REQUESTS_PER_SECOND, 25 );
init( BLOBSTORE_LIST_REQUESTS_PER_SECOND, 200 );
init( BLOBSTORE_WRITE_REQUESTS_PER_SECOND, 50 );
init( BLOBSTORE_READ_REQUESTS_PER_SECOND, 100 );
init( BLOBSTORE_DELETE_REQUESTS_PER_SECOND, 200 );

View File

@ -38,6 +38,8 @@ public:
double FAILURE_MIN_DELAY;
double FAILURE_TIMEOUT_DELAY;
double CLIENT_FAILURE_TIMEOUT_DELAY;
double FAILURE_EMERGENCY_DELAY;
double FAILURE_MAX_GENERATIONS;
// wrong_shard_server sometimes comes from the only nonfailed server, so we need to avoid a fast spin
double WRONG_SHARD_SERVER_DELAY; // SOMEDAY: This delay can limit performance of retrieving data when the cache is mostly wrong (e.g. dumping the database after a test)

View File

@ -18,7 +18,6 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include "ManagementAPI.h"
#include "SystemData.h"
@ -30,6 +29,7 @@
#include "flow/UnitTest.h"
#include "fdbrpc/ReplicationPolicy.h"
#include "fdbrpc/Replication.h"
#include "flow/actorcompiler.h" // This must be the last #include.
static Future<vector<AddressExclusion>> getExcludedServers( Transaction* const& tr );
@ -252,6 +252,22 @@ bool isCompleteConfiguration( std::map<std::string, std::string> const& options
options.count( p+"storage_engine" ) == 1;
}
ACTOR Future<DatabaseConfiguration> getDatabaseConfiguration( Database cx ) {
state Transaction tr(cx);
loop {
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
Standalone<RangeResultRef> res = wait( tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY) );
ASSERT( res.size() < CLIENT_KNOBS->TOO_MANY );
DatabaseConfiguration config;
config.fromKeyValues((VectorRef<KeyValueRef>) res);
return config;
} catch( Error &e ) {
wait( tr.onError(e) );
}
}
}
ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std::string, std::string> m ) {
state StringRef initIdKey = LiteralStringRef( "\xff/init_id" );
state Transaction tr(cx);
@ -266,6 +282,19 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
m[initIdKey.toString()] = g_random->randomUniqueID().toString();
if (!isCompleteConfiguration(m))
return ConfigurationResult::INCOMPLETE_CONFIGURATION;
} else {
state Future<DatabaseConfiguration> fConfig = getDatabaseConfiguration(cx);
wait( success(fConfig) || delay(1.0) );
if(fConfig.isReady()) {
DatabaseConfiguration config = fConfig.get();
for(auto kv : m) {
config.set(kv.first, kv.second);
}
if(!config.isValid()) {
return ConfigurationResult::INVALID_CONFIGURATION;
}
}
}
loop {
@ -284,7 +313,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
for(auto i=m.begin(); i!=m.end(); ++i)
tr.set( StringRef(i->first), StringRef(i->second) );
Void _ = wait( tr.commit() );
wait( tr.commit() );
break;
} catch (Error& e) {
state Error e1(e);
@ -302,11 +331,11 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
else
return ConfigurationResult::DATABASE_CREATED;
} catch (Error& e2) {
Void _ = wait( tr.onError(e2) );
wait( tr.onError(e2) );
}
}
}
Void _ = wait( tr.onError(e1) );
wait( tr.onError(e1) );
}
}
return ConfigurationResult::SUCCESS;
@ -599,10 +628,10 @@ ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoRe
tr.set(kv.first, kv.second);
}
Void _ = wait( tr.commit() );
wait( tr.commit() );
return ConfigurationResult::SUCCESS;
} catch( Error &e ) {
Void _ = wait( tr.onError(e));
wait( tr.onError(e));
}
}
}
@ -632,7 +661,7 @@ ACTOR Future<vector<ProcessData>> getWorkers( Transaction* tr ) {
state Future<Standalone<RangeResultRef>> processClasses = tr->getRange( processClassKeys, CLIENT_KNOBS->TOO_MANY );
state Future<Standalone<RangeResultRef>> processData = tr->getRange( workerListKeys, CLIENT_KNOBS->TOO_MANY );
Void _ = wait( success(processClasses) && success(processData) );
wait( success(processClasses) && success(processData) );
ASSERT( !processClasses.get().more && processClasses.get().size() < CLIENT_KNOBS->TOO_MANY );
ASSERT( !processData.get().more && processData.get().size() < CLIENT_KNOBS->TOO_MANY );
@ -667,7 +696,7 @@ ACTOR Future<vector<ProcessData>> getWorkers( Database cx ) {
vector<ProcessData> workers = wait( getWorkers(&tr) );
return workers;
} catch (Error& e) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -683,7 +712,7 @@ ACTOR Future<std::vector<NetworkAddress>> getCoordinators( Database cx ) {
return ClusterConnectionString( currentKey.get().toString() ).coordinators();
} catch (Error& e) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -739,19 +768,19 @@ ACTOR Future<CoordinatorsResult::Type> changeQuorum( Database cx, Reference<IQuo
leaderServers.push_back( retryBrokenPromise( coord.clientLeaderServers[i].getLeader, GetLeaderRequest( coord.clusterKey, UID() ), TaskCoordinationReply ) );
choose {
when( Void _ = wait( waitForAll( leaderServers ) ) ) {}
when( Void _ = wait( delay(5.0) ) ) {
when( wait( waitForAll( leaderServers ) ) ) {}
when( wait( delay(5.0) ) ) {
return CoordinatorsResult::COORDINATOR_UNREACHABLE;
}
}
tr.set( coordinatorsKey, conn.toString() );
Void _ = wait( tr.commit() );
wait( tr.commit() );
ASSERT( false ); //commit should fail, but the value has changed
} catch (Error& e) {
TraceEvent("RetryQuorumChange").error(e).detail("Retries", retries);
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
++retries;
}
}
@ -799,7 +828,7 @@ struct AutoQuorumChange : IQuorumChange {
ACTOR static Future<int> getRedundancy( AutoQuorumChange* self, Transaction* tr ) {
state Future<Optional<Value>> fStorageReplicas = tr->get( LiteralStringRef("storage_replicas").withPrefix( configKeysPrefix ) );
state Future<Optional<Value>> fLogReplicas = tr->get( LiteralStringRef("log_replicas").withPrefix( configKeysPrefix ) );
Void _ = wait( success( fStorageReplicas ) && success( fLogReplicas ) );
wait( success( fStorageReplicas ) && success( fLogReplicas ) );
int redundancy = std::min(
atoi( fStorageReplicas.get().get().toString().c_str() ),
atoi( fLogReplicas.get().get().toString().c_str() ) );
@ -980,10 +1009,10 @@ ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers
TraceEvent("ExcludeServersCommit").detail("Servers", describe(servers));
Void _ = wait( tr.commit() );
wait( tr.commit() );
return Void();
} catch (Error& e) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1017,11 +1046,11 @@ ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers
TraceEvent("IncludeServersCommit").detail("Servers", describe(servers));
Void _ = wait( tr.commit() );
wait( tr.commit() );
return Void();
} catch (Error& e) {
TraceEvent("IncludeServersError").error(e, true);
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1051,10 +1080,10 @@ ACTOR Future<Void> setClass( Database cx, AddressExclusion server, ProcessClass
if(foundChange)
tr.set(processClassChangeKey, g_random->randomUniqueID().toString());
Void _ = wait( tr.commit() );
wait( tr.commit() );
return Void();
} catch (Error& e) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1082,7 +1111,7 @@ ACTOR Future<vector<AddressExclusion>> getExcludedServers( Database cx ) {
vector<AddressExclusion> exclusions = wait( getExcludedServers(&tr) );
return exclusions;
} catch (Error& e) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1111,11 +1140,11 @@ ACTOR Future<int> setDDMode( Database cx, int mode ) {
tr.set( dataDistributionModeKey, wr.toStringRef() );
Void _ = wait( tr.commit() );
wait( tr.commit() );
return oldMode;
} catch (Error& e) {
TraceEvent("SetDDModeRetrying").error(e);
Void _ = wait (tr.onError(e));
wait (tr.onError(e));
}
}
}
@ -1167,30 +1196,13 @@ ACTOR Future<Void> waitForExcludedServers( Database cx, vector<AddressExclusion>
if (ok) return Void();
Void _ = wait( delayJittered( 1.0 ) ); // SOMEDAY: watches!
wait( delayJittered( 1.0 ) ); // SOMEDAY: watches!
} catch (Error& e) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
ACTOR Future<DatabaseConfiguration> getDatabaseConfiguration( Database cx ) {
state Transaction tr(cx);
loop {
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
Standalone<RangeResultRef> res = wait( tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY) );
ASSERT( res.size() < CLIENT_KNOBS->TOO_MANY );
DatabaseConfiguration config;
config.fromKeyValues((VectorRef<KeyValueRef>) res);
return config;
} catch( Error &e ) {
Void _ = wait( tr.onError(e) );
}
}
}
ACTOR Future<Void> waitForFullReplication( Database cx ) {
state ReadYourWritesTransaction tr(cx);
loop {
@ -1208,7 +1220,7 @@ ACTOR Future<Void> waitForFullReplication( Database cx ) {
for(auto& region : config.regions) {
replicasFutures.push_back(tr.get(datacenterReplicasKeyFor(region.dcId)));
}
Void _ = wait( waitForAll(replicasFutures) );
wait( waitForAll(replicasFutures) );
state std::vector<Future<Void>> watchFutures;
for(int i = 0; i < config.regions.size(); i++) {
@ -1221,11 +1233,11 @@ ACTOR Future<Void> waitForFullReplication( Database cx ) {
return Void();
}
Void _ = wait( tr.commit() );
Void _ = wait( waitForAny(watchFutures) );
wait( tr.commit() );
wait( waitForAny(watchFutures) );
tr.reset();
} catch (Error& e) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1237,10 +1249,10 @@ ACTOR Future<Void> timeKeeperSetDisable(Database cx) {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.set(timeKeeperDisableKey, StringRef());
Void _ = wait(tr.commit());
wait(tr.commit());
return Void();
} catch (Error &e) {
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
}
@ -1287,13 +1299,13 @@ ACTOR Future<Void> lockDatabase( Database cx, UID id ) {
state Transaction tr(cx);
loop {
try {
Void _ = wait( lockDatabase(&tr, id) );
Void _ = wait( tr.commit() );
wait( lockDatabase(&tr, id) );
wait( tr.commit() );
return Void();
} catch( Error &e ) {
if(e.code() == error_code_database_locked)
throw e;
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1336,13 +1348,13 @@ ACTOR Future<Void> unlockDatabase( Database cx, UID id ) {
state Transaction tr(cx);
loop {
try {
Void _ = wait( unlockDatabase(&tr, id) );
Void _ = wait( tr.commit() );
wait( unlockDatabase(&tr, id) );
wait( tr.commit() );
return Void();
} catch( Error &e ) {
if(e.code() == error_code_database_locked)
throw e;
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1379,16 +1391,151 @@ ACTOR Future<Void> forceRecovery (Reference<ClusterConnectionFile> clusterFile)
loop{
choose {
when( Void _ = wait( clusterInterface->get().present() ? brokenPromiseToNever( clusterInterface->get().get().forceRecovery.getReply( ForceRecoveryRequest() ) ) : Never() ) ) {
when( wait( clusterInterface->get().present() ? brokenPromiseToNever( clusterInterface->get().get().forceRecovery.getReply( ForceRecoveryRequest() ) ) : Never() ) ) {
return Void();
}
when( Void _ = wait(clusterInterface->onChange()) ) {}
when( wait(clusterInterface->onChange()) ) {}
}
}
}
json_spirit::Value_type normJSONType(json_spirit::Value_type type) {
if (type == json_spirit::int_type)
return json_spirit::real_type;
return type;
}
void schemaCoverage( std::string const& spath, bool covered ) {
static std::map<bool, std::set<std::string>> coveredSchemaPaths;
if( coveredSchemaPaths[covered].insert(spath).second ) {
TraceEvent ev(SevInfo, "CodeCoverage");
ev.detail("File", "documentation/StatusSchema.json/" + spath).detail("Line", 0);
if (!covered)
ev.detail("Covered", 0);
}
}
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev, bool checkCoverage, std::string path, std::string schema_path ) {
// Returns true if everything in `result` is permitted by `schema`
// Really this should recurse on "values" rather than "objects"?
bool ok = true;
try {
for(auto& rkv : result) {
auto& key = rkv.first;
auto& rv = rkv.second;
std::string kpath = path + "." + key;
std::string spath = schema_path + "." + key;
if(checkCoverage) schemaCoverage(spath);
if (!schema.count(key)) {
errorStr += format("ERROR: Unknown key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaPath", spath);
ok = false;
continue;
}
auto& sv = schema.at(key);
if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$enum")) {
auto& enum_values = sv.get_obj().at("$enum").get_array();
bool any_match = false;
for(auto& enum_item : enum_values)
if (enum_item == rv) {
any_match = true;
if(checkCoverage) schemaCoverage(spath + ".$enum." + enum_item.get_str());
break;
}
if (!any_match) {
errorStr += format("ERROR: Unknown value `%s' for key `%s'\n", json_spirit::write_string(rv).c_str(), kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaEnumItems", enum_values.size()).detail("Value", json_spirit::write_string(rv));
if(checkCoverage) schemaCoverage(spath + ".$enum." + json_spirit::write_string(rv));
ok = false;
}
} else if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$map")) {
if (rv.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected an object as the value for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
ok = false;
continue;
}
if(sv.get_obj().at("$map").type() != json_spirit::obj_type) {
continue;
}
auto& schema_obj = sv.get_obj().at("$map").get_obj();
auto& value_obj = rv.get_obj();
if(checkCoverage) schemaCoverage(spath + ".$map");
for(auto& value_pair : value_obj) {
auto vpath = kpath + "[" + value_pair.first + "]";
auto upath = spath + ".$map";
if (value_pair.second.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected an object for `%s'\n", vpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", vpath).detail("ValueType", value_pair.second.type());
ok = false;
continue;
}
if (!schemaMatch(schema_obj, value_pair.second.get_obj(), errorStr, sev, checkCoverage, vpath, upath))
ok = false;
}
} else {
// The schema entry isn't an operator, so it asserts a type and (depending on the type) recursive schema definition
if (normJSONType(sv.type()) != normJSONType(rv.type())) {
errorStr += format("ERROR: Incorrect value type for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
ok = false;
continue;
}
if (rv.type() == json_spirit::array_type) {
auto& value_array = rv.get_array();
auto& schema_array = sv.get_array();
if (!schema_array.size()) {
// An empty schema array means that the value array is required to be empty
if (value_array.size()) {
errorStr += format("ERROR: Expected an empty array for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaSize", schema_array.size()).detail("ValueSize", value_array.size());
ok = false;
continue;
}
} else if (schema_array.size() == 1 && schema_array[0].type() == json_spirit::obj_type) {
// A one item schema array means that all items in the value must match the first item in the schema
auto& schema_obj = schema_array[0].get_obj();
int index = 0;
for(auto &value_item : value_array) {
if (value_item.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected all array elements to be objects for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath + format("[%d]",index)).detail("ValueType", value_item.type());
ok = false;
continue;
}
if (!schemaMatch(schema_obj, value_item.get_obj(), errorStr, sev, checkCoverage, kpath + format("[%d]", index), spath + "[0]"))
ok = false;
index++;
}
} else
ASSERT(false); // Schema doesn't make sense
} else if (rv.type() == json_spirit::obj_type) {
auto& schema_obj = sv.get_obj();
auto& value_obj = rv.get_obj();
if (!schemaMatch(schema_obj, value_obj, errorStr, sev, checkCoverage, kpath, spath))
ok = false;
}
}
}
return ok;
} catch (std::exception& e) {
TraceEvent(SevError, "SchemaMatchException").detail("What", e.what()).detail("Path", path).detail("SchemaPath", schema_path);
throw unknown_error();
}
}
TEST_CASE("ManagementAPI/AutoQuorumChange/checkLocality") {
Void _ = wait(Future<Void>(Void()));
wait(Future<Void>(Void()));
std::vector<ProcessData> workers;
std::vector<NetworkAddress> chosen;

View File

@ -46,6 +46,7 @@ public:
CONFLICTING_OPTIONS,
UNKNOWN_OPTION,
INCOMPLETE_CONFIGURATION,
INVALID_CONFIGURATION,
DATABASE_ALREADY_CREATED,
DATABASE_CREATED,
SUCCESS
@ -166,4 +167,8 @@ Future<Void> forceRecovery (Reference<ClusterConnectionFile> const& clusterFile)
// Gets the cluster connection string
Future<std::vector<NetworkAddress>> getCoordinators( Database const& cx );
void schemaCoverage( std::string const& spath, bool covered=true );
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev=SevError, bool checkCoverage=false, std::string path = std::string(), std::string schema_path = std::string() );
#endif

View File

@ -56,7 +56,7 @@ struct MasterProxyInterface {
getConsistentReadVersion.getEndpoint(TaskProxyGetConsistentReadVersion);
getRawCommittedVersion.getEndpoint(TaskProxyGetRawCommittedVersion);
commit.getEndpoint(TaskProxyCommitDispatcher);
getKeyServersLocations.getEndpoint(TaskProxyGetKeyServersLocations);
//getKeyServersLocations.getEndpoint(TaskProxyGetKeyServersLocations); //do not increase the priority of these requests, because clients cans bring down the cluster with too many of these messages.
}
};

View File

@ -18,13 +18,13 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include <cmath>
#include "flow/UnitTest.h"
#include "flow/TDMetric.actor.h"
#include "fdbclient/DatabaseContext.h"
#include "fdbclient/ReadYourWrites.h"
#include "fdbclient/KeyBackedTypes.h"
#include <cmath>
#include "flow/actorcompiler.h" // This must be the last #include.
struct MetricsRule {
MetricsRule(bool enabled = false, int minLevel = 0, StringRef const &name = StringRef()) : enabled(enabled), minLevel(minLevel), namePattern(name) {}
@ -158,12 +158,12 @@ ACTOR Future<Void> metricRuleUpdater(Database cx, MetricsConfig *config, TDMetri
config->rules = std::move(rules);
state Future<Void> rulesChanged = tr->watch(config->ruleChangeKey);
Void _ = wait(tr->commit());
Void _ = wait(rulesChanged || newMetric);
wait(tr->commit());
wait(rulesChanged || newMetric);
tr->reset();
} catch(Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -235,7 +235,7 @@ ACTOR Future<Void> dumpMetrics(Database cx, MetricsConfig *config, TDMetricColle
state Error lastError;
while(cb != results.end()) {
try {
Void _ = wait(cb->second);
wait(cb->second);
cb = results.erase(cb);
} catch(Error &e) {
++cb;
@ -248,7 +248,7 @@ ACTOR Future<Void> dumpMetrics(Database cx, MetricsConfig *config, TDMetricColle
break;
// Otherwise, wait to retry
Void _ = wait(cbtr.onError(lastError));
wait(cbtr.onError(lastError));
for(auto &cb : results)
cb.second = batch.callbacks[cb.first](&mdb, &batch);
}
@ -283,13 +283,13 @@ ACTOR Future<Void> dumpMetrics(Database cx, MetricsConfig *config, TDMetricColle
tr.set(u.first, u.second);
}
Void _ = wait( tr.commit() );
wait( tr.commit() );
break;
} catch( Error &e ) {
Void _ = wait( tr.onError( e ) );
wait( tr.onError( e ) );
}
}
Void _ = wait( nextDump );
wait( nextDump );
}
}
@ -347,15 +347,15 @@ ACTOR Future<Void> updateMetricRegistration(Database cx, MetricsConfig *config,
tr.set(key, timestamp);
}
Void _ = wait(tr.commit());
wait(tr.commit());
break;
} catch(Error &e) {
Void _ = wait(tr.onError(e));
wait(tr.onError(e));
}
}
// Wait for a metric to require registration or a new metric to be added
Void _ = wait(registrationChange || newMetric);
wait(registrationChange || newMetric);
}
}
@ -373,7 +373,7 @@ ACTOR Future<Void> runMetrics( Future<Database> fcx, Key prefix ) {
if(metrics != nullptr)
if(metrics->init())
break;
Void _ = wait(delay(1.0));
wait(delay(1.0));
}
state MetricsConfig config(prefix);
@ -384,7 +384,7 @@ ACTOR Future<Void> runMetrics( Future<Database> fcx, Key prefix ) {
Future<Void> dump = dumpMetrics(cx, &config, metrics);
Future<Void> reg = updateMetricRegistration(cx, &config, metrics);
Void _ = wait( conf || dump || reg);
wait( conf || dump || reg);
} catch( Error &e ) {
if( e.code() != error_code_actor_cancelled ) {
// Disable all metrics
@ -410,7 +410,7 @@ TEST_CASE("fdbserver/metrics/TraceEvents") {
state Reference<Cluster> metricsCluster = Cluster::createCluster( metricsConnFile, Cluster::API_VERSION_LATEST );
TDMetricCollection::getTDMetrics()->address = LiteralStringRef("0.0.0.0:0");
state Future<Void> metrics = runMetrics(metricsCluster->createDatabase(LiteralStringRef("DB")), KeyRef(metricsPrefix));
state Future<Void> metrics = runMetrics(metricsCluster->createDatabase(), KeyRef(metricsPrefix));
state int64_t x = 0;
state double w = 0.5;
@ -453,7 +453,7 @@ TEST_CASE("fdbserver/metrics/TraceEvents") {
.detail("K", sin(3.0 * x))
.detail("S", sstart + (double)chunk * sin(10.0 * i / chunk));
}
Void _ = wait(delay(w));
wait(delay(w));
double sstart = x;
for(int i = 0; i < chunk; ++i, ++x) {
@ -469,7 +469,7 @@ TEST_CASE("fdbserver/metrics/TraceEvents") {
.detail("K", sin(3.0 * x))
.detail("S", sstart + (double)chunk * sin(40.0 * i / chunk));
}
Void _ = wait(delay(w));
wait(delay(w));
double sstart = x;
for(int i = 0; i < chunk; ++i, ++x) {
@ -485,7 +485,7 @@ TEST_CASE("fdbserver/metrics/TraceEvents") {
.detail("K", sin(3.0 * x))
.detail("S", sstart + (double)chunk * sin(160.0 * i / chunk));
}
Void _ = wait(delay(w));
wait(delay(w));
if(x >= total)
return Void();

View File

@ -99,7 +99,7 @@ bool ClusterConnectionFile::fileContentsUpToDate(ClusterConnectionString &fileCo
return fileConnectionString.toString() == cs.toString();
}
catch (Error& e) {
TraceEvent(SevWarnAlways, "ClusterFileError").detail("Filename", filename).error(e);
TraceEvent(SevWarnAlways, "ClusterFileError").error(e).detail("Filename", filename);
return false; // Swallow the error and report that the file is out of date
}
}
@ -118,7 +118,7 @@ bool ClusterConnectionFile::writeFile() {
return true;
} catch( Error &e ) {
TraceEvent(SevWarnAlways, "UnableToChangeConnectionFile").detail("Filename", filename).detail("ConnStr", cs.toString()).error(e);
TraceEvent(SevWarnAlways, "UnableToChangeConnectionFile").error(e).detail("Filename", filename).detail("ConnStr", cs.toString());
}
}
@ -309,17 +309,17 @@ ClientLeaderRegInterface::ClientLeaderRegInterface( INetwork* local ) {
ACTOR Future<Void> monitorNominee( Key key, ClientLeaderRegInterface coord, AsyncTrigger* nomineeChange, Optional<LeaderInfo> *info, int generation ) {
loop {
state Optional<LeaderInfo> li = wait( retryBrokenPromise( coord.getLeader, GetLeaderRequest( key, info->present() ? info->get().changeID : UID() ), TaskCoordinationReply ) );
Void _ = wait( Future<Void>(Void()) ); // Make sure we weren't cancelled
wait( Future<Void>(Void()) ); // Make sure we weren't cancelled
TraceEvent("GetLeaderReply").detail("Coordinator", coord.getLeader.getEndpoint().address).detail("Nominee", li.present() ? li.get().changeID : UID()).detail("Generation", generation);
TraceEvent("GetLeaderReply").suppressFor(1.0).detail("Coordinator", coord.getLeader.getEndpoint().address).detail("Nominee", li.present() ? li.get().changeID : UID()).detail("Generation", generation);
if (li != *info) {
*info = li;
nomineeChange->trigger();
if( li.present() && li.get().forward )
Void _ = wait( Future<Void>(Never()) );
Void _ = wait( Future<Void>(Void()) );
wait( Future<Void>(Never()) );
wait( Future<Void>(Void()) );
}
}
}
@ -420,7 +420,7 @@ ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration( Reference<ClusterCon
outSerializedLeaderInfo->set( leader.get().first.serializedInfo );
}
Void _ = wait( nomineeChange.onTrigger() || allActors );
wait( nomineeChange.onTrigger() || allActors );
}
}

View File

@ -25,6 +25,8 @@
#include "flow/Platform.h"
#include "flow/UnitTest.h"
#include "flow/actorcompiler.h" // This must be the last #include.
void throwIfError(FdbCApi::fdb_error_t e) {
if(e) {
throw Error(e);
@ -221,8 +223,8 @@ void DLDatabase::setOption(FDBDatabaseOptions::Option option, Optional<StringRef
}
// DLCluster
ThreadFuture<Reference<IDatabase>> DLCluster::createDatabase(Standalone<StringRef> dbName) {
FdbCApi::FDBFuture *f = api->clusterCreateDatabase(cluster, (uint8_t*)dbName.toString().c_str(), dbName.size());
ThreadFuture<Reference<IDatabase>> DLCluster::createDatabase() {
FdbCApi::FDBFuture *f = api->clusterCreateDatabase(cluster, (uint8_t*)"DB", 2);
return toThreadFuture<Reference<IDatabase>>(api, f, [](FdbCApi::FDBFuture *f, FdbCApi *api) {
FdbCApi::FDBDatabase *db;
@ -564,8 +566,8 @@ void MultiVersionTransaction::reset() {
}
// MultiVersionDatabase
MultiVersionDatabase::MultiVersionDatabase(Reference<MultiVersionCluster> cluster, Standalone<StringRef> dbName, Reference<IDatabase> db, ThreadFuture<Void> changed)
: dbState(new DatabaseState(cluster, dbName, db, changed)) {}
MultiVersionDatabase::MultiVersionDatabase(Reference<MultiVersionCluster> cluster, Reference<IDatabase> db, ThreadFuture<Void> changed)
: dbState(new DatabaseState(cluster, db, changed)) {}
MultiVersionDatabase::~MultiVersionDatabase() {
dbState->cancelCallbacks();
@ -573,7 +575,7 @@ MultiVersionDatabase::~MultiVersionDatabase() {
Reference<IDatabase> MultiVersionDatabase::debugCreateFromExistingDatabase(Reference<IDatabase> db) {
auto cluster = Reference<ThreadSafeAsyncVar<Reference<ICluster>>>(new ThreadSafeAsyncVar<Reference<ICluster>>(Reference<ICluster>(NULL)));
return Reference<IDatabase>(new MultiVersionDatabase(Reference<MultiVersionCluster>::addRef(new MultiVersionCluster()), LiteralStringRef("DB"), db, ThreadFuture<Void>(Never())));
return Reference<IDatabase>(new MultiVersionDatabase(Reference<MultiVersionCluster>::addRef(new MultiVersionCluster()), db, ThreadFuture<Void>(Never())));
}
Reference<ITransaction> MultiVersionDatabase::createTransaction() {
@ -583,6 +585,16 @@ Reference<ITransaction> MultiVersionDatabase::createTransaction() {
void MultiVersionDatabase::setOption(FDBDatabaseOptions::Option option, Optional<StringRef> value) {
MutexHolder holder(dbState->optionLock);
auto itr = FDBDatabaseOptions::optionInfo.find(option);
if(itr != FDBDatabaseOptions::optionInfo.end()) {
TraceEvent("SetDatabaseOption").detail("Option", itr->second.name);
}
else {
TraceEvent("UnknownDatabaseOption").detail("Option", option);
throw invalid_option();
}
if(dbState->db) {
dbState->db->setOption(option, value);
}
@ -590,8 +602,8 @@ void MultiVersionDatabase::setOption(FDBDatabaseOptions::Option option, Optional
dbState->options.push_back(std::make_pair(option, value.cast_to<Standalone<StringRef>>()));
}
MultiVersionDatabase::DatabaseState::DatabaseState(Reference<MultiVersionCluster> cluster, Standalone<StringRef> dbName, Reference<IDatabase> db, ThreadFuture<Void> changed)
: cluster(cluster), dbName(dbName), db(db), dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(db)), cancelled(false), changed(changed)
MultiVersionDatabase::DatabaseState::DatabaseState(Reference<MultiVersionCluster> cluster, Reference<IDatabase> db, ThreadFuture<Void> changed)
: cluster(cluster), db(db), dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(db)), cancelled(false), changed(changed)
{
addref();
int userParam;
@ -615,7 +627,7 @@ void MultiVersionDatabase::DatabaseState::fire(const Void &unused, int& userPara
}
catch(Error &e) {
optionFailed = true;
TraceEvent(SevError, "DatabaseVersionChangeOptionError").detail("Option", option.first).detail("OptionValue", printable(option.second)).error(e);
TraceEvent(SevError, "DatabaseVersionChangeOptionError").error(e).detail("Option", option.first).detail("OptionValue", printable(option.second));
}
}
@ -665,7 +677,7 @@ void MultiVersionDatabase::DatabaseState::updateDatabase() {
if(currentCluster.value) {
addref();
dbFuture = currentCluster.value->createDatabase(dbName);
dbFuture = currentCluster.value->createDatabase();
dbFuture.callOrSetAsCallback(this, userParam, false);
}
}
@ -708,29 +720,38 @@ MultiVersionCluster::~MultiVersionCluster() {
clusterState->cancelConnections();
}
ThreadFuture<Reference<IDatabase>> MultiVersionCluster::createDatabase(Standalone<StringRef> dbName) {
ThreadFuture<Reference<IDatabase>> MultiVersionCluster::createDatabase() {
auto cluster = clusterState->clusterVar->get();
if(cluster.value) {
ThreadFuture<Reference<IDatabase>> dbFuture = abortableFuture(cluster.value->createDatabase(dbName), cluster.onChange);
ThreadFuture<Reference<IDatabase>> dbFuture = abortableFuture(cluster.value->createDatabase(), cluster.onChange);
return mapThreadFuture<Reference<IDatabase>, Reference<IDatabase>>(dbFuture, [this, cluster, dbName](ErrorOr<Reference<IDatabase>> db) {
return mapThreadFuture<Reference<IDatabase>, Reference<IDatabase>>(dbFuture, [this, cluster](ErrorOr<Reference<IDatabase>> db) {
if(db.isError() && db.getError().code() != error_code_cluster_version_changed) {
return db;
}
Reference<IDatabase> newDb = db.isError() ? Reference<IDatabase>(NULL) : db.get();
return ErrorOr<Reference<IDatabase>>(Reference<IDatabase>(new MultiVersionDatabase(Reference<MultiVersionCluster>::addRef(this), dbName, newDb, cluster.onChange)));
return ErrorOr<Reference<IDatabase>>(Reference<IDatabase>(new MultiVersionDatabase(Reference<MultiVersionCluster>::addRef(this), newDb, cluster.onChange)));
});
}
else {
return Reference<IDatabase>(new MultiVersionDatabase(Reference<MultiVersionCluster>::addRef(this), dbName, Reference<IDatabase>(), cluster.onChange));
return Reference<IDatabase>(new MultiVersionDatabase(Reference<MultiVersionCluster>::addRef(this), Reference<IDatabase>(), cluster.onChange));
}
}
void MultiVersionCluster::setOption(FDBClusterOptions::Option option, Optional<StringRef> value) {
MutexHolder holder(clusterState->optionLock);
auto itr = FDBClusterOptions::optionInfo.find(option);
if(itr != FDBClusterOptions::optionInfo.end()) {
TraceEvent("SetClusterOption").detail("Option", itr->second.name);
}
else {
TraceEvent("UnknownClusterOption").detail("Option", option);
throw invalid_option();
}
if(clusterState->cluster) {
clusterState->cluster->setOption(option, value);
}
@ -754,7 +775,7 @@ void MultiVersionCluster::Connector::connect() {
}
else {
candidateCluster = cluster.get();
return ErrorOr<ThreadFuture<Reference<IDatabase>>>(cluster.get()->createDatabase(LiteralStringRef("DB")));
return ErrorOr<ThreadFuture<Reference<IDatabase>>>(cluster.get()->createDatabase());
}
});
@ -811,7 +832,7 @@ void MultiVersionCluster::Connector::error(const Error& e, int& userParam) {
// TODO: is it right to abandon this connection attempt?
client->failed = true;
MultiVersionApi::api->updateSupportedVersions();
TraceEvent(SevError, "ClusterConnectionError").detail("ClientLibrary", this->client->libPath).error(e);
TraceEvent(SevError, "ClusterConnectionError").error(e).detail("ClientLibrary", this->client->libPath);
}
delref();
@ -850,7 +871,7 @@ void MultiVersionCluster::ClusterState::stateChanged() {
}
catch(Error &e) {
optionLock.leave();
TraceEvent(SevError, "ClusterVersionChangeOptionError").detail("Option", option.first).detail("OptionValue", printable(option.second)).detail("LibPath", clients[newIndex]->libPath).error(e);
TraceEvent(SevError, "ClusterVersionChangeOptionError").error(e).detail("Option", option.first).detail("OptionValue", printable(option.second)).detail("LibPath", clients[newIndex]->libPath);
connectionAttempts[newIndex]->connected = false;
clients[newIndex]->failed = true;
MultiVersionApi::api->updateSupportedVersions();
@ -910,12 +931,13 @@ void MultiVersionApi::runOnExternalClients(std::function<void(Reference<ClientIn
}
}
catch(Error &e) {
TraceEvent(SevWarnAlways, "ExternalClientFailure").detail("LibPath", c->second->libPath).error(e);
if(e.code() == error_code_external_client_already_loaded) {
TraceEvent(SevInfo, "ExternalClientAlreadyLoaded").error(e).detail("LibPath", c->second->libPath);
c = externalClients.erase(c);
continue;
}
else {
TraceEvent(SevWarnAlways, "ExternalClientFailure").error(e).detail("LibPath", c->second->libPath);
c->second->failed = true;
newFailure = true;
}
@ -983,6 +1005,7 @@ void MultiVersionApi::addExternalLibrary(std::string path) {
std::string filename = basename(path);
if(filename.empty() || !fileExists(path)) {
TraceEvent("ExternalClientNotFound").detail("LibraryPath", filename);
throw file_not_found();
}
@ -1049,6 +1072,15 @@ void MultiVersionApi::setNetworkOption(FDBNetworkOptions::Option option, Optiona
}
void MultiVersionApi::setNetworkOptionInternal(FDBNetworkOptions::Option option, Optional<StringRef> value) {
auto itr = FDBNetworkOptions::optionInfo.find(option);
if(itr != FDBNetworkOptions::optionInfo.end()) {
TraceEvent("SetNetworkOption").detail("Option", itr->second.name);
}
else {
TraceEvent("UnknownNetworkOption").detail("Option", option);
throw invalid_option();
}
if(option == FDBNetworkOptions::DISABLE_MULTI_VERSION_CLIENT_API) {
validateOption(value, false, true);
disableMultiVersionClientApi();
@ -1342,7 +1374,7 @@ void MultiVersionApi::loadEnvironmentVariableNetworkOptions() {
}
}
catch(Error &e) {
TraceEvent(SevError, "EnvironmentVariableNetworkOptionFailed").detail("Option", option.second.name).detail("Value", valueStr).error(e);
TraceEvent(SevError, "EnvironmentVariableNetworkOptionFailed").error(e).detail("Option", option.second.name).detail("Value", valueStr);
throw environment_variable_network_option_failed();
}
}
@ -1565,13 +1597,13 @@ ACTOR Future<Void> checkUndestroyedFutures(std::vector<ThreadSingleAssignmentVar
f = undestroyed[fNum];
while(!f->isReady() && start+5 >= now()) {
Void _ = wait(delay(1.0));
wait(delay(1.0));
}
ASSERT(f->isReady());
}
Void _ = wait(delay(1.0));
wait(delay(1.0));
for(fNum = 0; fNum < undestroyed.size(); ++fNum) {
f = undestroyed[fNum];
@ -1674,7 +1706,7 @@ TEST_CASE( "fdbclient/multiversionclient/AbortableSingleAssignmentVar" ) {
g_network->startThread(runSingleAssignmentVarTest<AbortableTest>, (void*)&done);
while(!done) {
Void _ = wait(delay(1.0));
wait(delay(1.0));
}
return Void();
@ -1745,7 +1777,7 @@ TEST_CASE( "fdbclient/multiversionclient/DLSingleAssignmentVar" ) {
g_network->startThread(runSingleAssignmentVarTest<DLTest>, (void*)&done);
while(!done) {
Void _ = wait(delay(1.0));
wait(delay(1.0));
}
done = false;
@ -1753,7 +1785,7 @@ TEST_CASE( "fdbclient/multiversionclient/DLSingleAssignmentVar" ) {
g_network->startThread(runSingleAssignmentVarTest<DLTest>, (void*)&done);
while(!done) {
Void _ = wait(delay(1.0));
wait(delay(1.0));
}
return Void();
@ -1783,7 +1815,7 @@ TEST_CASE( "fdbclient/multiversionclient/MapSingleAssignmentVar" ) {
g_network->startThread(runSingleAssignmentVarTest<MapTest>, (void*)&done);
while(!done) {
Void _ = wait(delay(1.0));
wait(delay(1.0));
}
return Void();
@ -1816,7 +1848,7 @@ TEST_CASE( "fdbclient/multiversionclient/FlatMapSingleAssignmentVar" ) {
g_network->startThread(runSingleAssignmentVarTest<FlatMapTest>, (void*)&done);
while(!done) {
Void _ = wait(delay(1.0));
wait(delay(1.0));
}
return Void();

View File

@ -178,7 +178,7 @@ public:
DLCluster(Reference<FdbCApi> api, FdbCApi::FDBCluster *cluster) : api(api), cluster(cluster) {}
~DLCluster() { api->clusterDestroy(cluster); }
ThreadFuture<Reference<IDatabase>> createDatabase(Standalone<StringRef> dbName);
ThreadFuture<Reference<IDatabase>> createDatabase();
void setOption(FDBClusterOptions::Option option, Optional<StringRef> value = Optional<StringRef>());
void addref() { ThreadSafeReferenceCounted<DLCluster>::addref(); }
@ -278,7 +278,7 @@ class MultiVersionCluster;
class MultiVersionDatabase : public IDatabase, ThreadSafeReferenceCounted<MultiVersionDatabase> {
public:
MultiVersionDatabase(Reference<MultiVersionCluster> cluster, Standalone<StringRef> dbName, Reference<IDatabase> db, ThreadFuture<Void> changed);
MultiVersionDatabase(Reference<MultiVersionCluster> cluster, Reference<IDatabase> db, ThreadFuture<Void> changed);
~MultiVersionDatabase();
Reference<ITransaction> createTransaction();
@ -291,7 +291,7 @@ public:
private:
struct DatabaseState : ThreadCallback, ThreadSafeReferenceCounted<DatabaseState> {
DatabaseState(Reference<MultiVersionCluster> cluster, Standalone<StringRef> dbName, Reference<IDatabase> db, ThreadFuture<Void> changed);
DatabaseState(Reference<MultiVersionCluster> cluster, Reference<IDatabase> db, ThreadFuture<Void> changed);
void updateDatabase();
void cancelCallbacks();
@ -304,7 +304,6 @@ private:
Reference<IDatabase> db;
const Reference<ThreadSafeAsyncVar<Reference<IDatabase>>> dbVar;
const Standalone<StringRef> dbName;
ThreadFuture<Reference<IDatabase>> dbFuture;
ThreadFuture<Void> changed;
@ -343,7 +342,7 @@ public:
MultiVersionCluster(MultiVersionApi *api, std::string clusterFilePath, Reference<ICluster> cluster);
~MultiVersionCluster();
ThreadFuture<Reference<IDatabase>> createDatabase(Standalone<StringRef> dbName);
ThreadFuture<Reference<IDatabase>> createDatabase();
void setOption(FDBClusterOptions::Option option, Optional<StringRef> value = Optional<StringRef>());
void addref() { ThreadSafeReferenceCounted<MultiVersionCluster>::addref(); }

View File

@ -22,7 +22,6 @@
#include "NativeAPI.h"
#include "Atomic.h"
#include "flow/Platform.h"
#include "flow/actorcompiler.h"
#include "flow/ActorCollection.h"
#include "SystemData.h"
#include "fdbrpc/LoadBalance.h"
@ -53,6 +52,7 @@
#include <time.h>
#include "versions.h"
#endif
#include "flow/actorcompiler.h" // This must be the last #include.
extern IRandom* trace_random;
extern const char* getHGVersion();
@ -64,6 +64,12 @@ using std::make_pair;
NetworkOptions networkOptions;
Reference<TLSOptions> tlsOptions;
static void initTLSOptions() {
if (!tlsOptions) {
tlsOptions = Reference<TLSOptions>(new TLSOptions());
}
}
static const Key CLIENT_LATENCY_INFO_PREFIX = LiteralStringRef("client_latency/");
static const Key CLIENT_LATENCY_INFO_CTR_PREFIX = LiteralStringRef("client_latency_counter/");
@ -209,7 +215,7 @@ template <> void delref( DatabaseContext* ptr ) { ptr->delref(); }
ACTOR Future<Void> databaseLogger( DatabaseContext *cx ) {
loop {
Void _ = wait( delay( CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, cx->taskID ) );
wait( delay( CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, cx->taskID ) );
TraceEvent("TransactionMetrics")
.detail("ReadVersions", cx->transactionReadVersions)
.detail("LogicalUncachedReads", cx->transactionLogicalReads)
@ -260,12 +266,12 @@ ACTOR static Future<Standalone<StringRef> > getSampleVersionStamp(Transaction *t
Optional<Value> _ = wait(tr->get(LiteralStringRef("\xff/StatusJsonTestKey62793")));
state Future<Standalone<StringRef> > vstamp = tr->getVersionstamp();
tr->makeSelfConflicting();
Void _ = wait(tr->commit());
wait(tr->commit());
Standalone<StringRef> val = wait(vstamp);
return val;
}
catch (Error& e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -290,14 +296,14 @@ ACTOR static Future<Void> transactionInfoCommitActor(Transaction *tr, std::vecto
numCommitBytes += chunk.key.size() + chunk.value.size() - 4; // subtract number of bytes of key that denotes verstion stamp index
}
tr->atomicOp(clientLatencyAtomicCtr, StringRef((uint8_t*)&numCommitBytes, 8), MutationRef::AddValue);
Void _ = wait(tr->commit());
wait(tr->commit());
return Void();
}
catch (Error& e) {
retryCount++;
if (retryCount == 10)
throw;
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -338,13 +344,13 @@ ACTOR static Future<Void> delExcessClntTxnEntriesActor(Transaction *tr, int64_t
TraceEvent(SevInfo, "DeletingExcessCntTxnEntries").detail("BytesToBeDeleted", numBytesToDel);
int64_t bytesDel = -numBytesToDel;
tr->atomicOp(clientLatencyAtomicCtr, StringRef((uint8_t*)&bytesDel, 8), MutationRef::AddValue);
Void _ = wait(tr->commit());
wait(tr->commit());
}
if (txInfoSize - numBytesToDel <= clientTxInfoSizeLimit)
return Void();
}
catch (Error& e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -396,7 +402,7 @@ ACTOR static Future<Void> clientStatusUpdateActor(DatabaseContext *cx) {
try {
while (iter != trChunksQ.end()) {
if (iter->value.size() + iter->key.size() + txBytes > dataSizeLimit) {
Void _ = wait(transactionInfoCommitActor(&tr, &commitQ));
wait(transactionInfoCommitActor(&tr, &commitQ));
tracking_iter = iter;
commitQ.clear();
txBytes = 0;
@ -406,7 +412,7 @@ ACTOR static Future<Void> clientStatusUpdateActor(DatabaseContext *cx) {
++iter;
}
if (!commitQ.empty()) {
Void _ = wait(transactionInfoCommitActor(&tr, &commitQ));
wait(transactionInfoCommitActor(&tr, &commitQ));
commitQ.clear();
txBytes = 0;
}
@ -431,12 +437,12 @@ ACTOR static Future<Void> clientStatusUpdateActor(DatabaseContext *cx) {
double clientSamplingProbability = std::isinf(cx->clientInfo->get().clientTxnInfoSampleRate) ? CLIENT_KNOBS->CSI_SAMPLING_PROBABILITY : cx->clientInfo->get().clientTxnInfoSampleRate;
int64_t clientTxnInfoSizeLimit = cx->clientInfo->get().clientTxnInfoSizeLimit == -1 ? CLIENT_KNOBS->CSI_SIZE_LIMIT : cx->clientInfo->get().clientTxnInfoSizeLimit;
if (!trChunksQ.empty() && g_random->random01() < clientSamplingProbability)
Void _ = wait(delExcessClntTxnEntriesActor(&tr, clientTxnInfoSizeLimit));
wait(delExcessClntTxnEntriesActor(&tr, clientTxnInfoSizeLimit));
// tr is destructed because it hold a reference to DatabaseContext which creates a cycle mentioned above.
// Hence destroy the transacation before sleeping to give a chance for the actor to be cleanedup if the Database is destroyed by the user.
tr = Transaction();
Void _ = wait(delay(CLIENT_KNOBS->CSI_STATUS_DELAY));
wait(delay(CLIENT_KNOBS->CSI_STATUS_DELAY));
}
catch (Error& e) {
if (e.code() == error_code_actor_cancelled) {
@ -447,7 +453,7 @@ ACTOR static Future<Void> clientStatusUpdateActor(DatabaseContext *cx) {
// tr is destructed because it hold a reference to DatabaseContext which creates a cycle mentioned above.
// Hence destroy the transacation before sleeping to give a chance for the actor to be cleanedup if the Database is destroyed by the user.
tr = Transaction();
Void _ = wait(delay(10.0));
wait(delay(10.0));
}
}
}
@ -457,7 +463,7 @@ ACTOR static Future<Void> monitorMasterProxiesChange(Reference<AsyncVar<ClientDB
curProxies = clientDBInfo->get().proxies;
loop{
Void _ = wait(clientDBInfo->onChange());
wait(clientDBInfo->onChange());
if (clientDBInfo->get().proxies != curProxies) {
curProxies = clientDBInfo->get().proxies;
triggerVar->trigger();
@ -468,9 +474,9 @@ ACTOR static Future<Void> monitorMasterProxiesChange(Reference<AsyncVar<ClientDB
DatabaseContext::DatabaseContext(
Reference<AsyncVar<ClientDBInfo>> clientInfo,
Reference<Cluster> cluster, Future<Void> clientInfoMonitor,
Standalone<StringRef> dbName, Standalone<StringRef> dbId,
int taskID, LocalityData clientLocality, bool enableLocalityLoadBalance, bool lockAware )
: clientInfo(clientInfo), masterProxiesChangeTrigger(), cluster(cluster), clientInfoMonitor(clientInfoMonitor), dbName(dbName), dbId(dbId),
Standalone<StringRef> dbId, int taskID, LocalityData clientLocality,
bool enableLocalityLoadBalance, bool lockAware )
: clientInfo(clientInfo), masterProxiesChangeTrigger(), cluster(cluster), clientInfoMonitor(clientInfoMonitor), dbId(dbId),
transactionReadVersions(0), transactionLogicalReads(0), transactionPhysicalReads(0), transactionCommittedMutations(0), transactionCommittedMutationBytes(0), transactionsCommitStarted(0),
transactionsCommitCompleted(0), transactionsTooOld(0), transactionsFutureVersions(0), transactionsNotCommitted(0), transactionsMaybeCommitted(0), transactionsResourceConstrained(0), taskID(taskID),
outstandingWatches(0), maxOutstandingWatches(CLIENT_KNOBS->DEFAULT_MAX_OUTSTANDING_WATCHES), clientLocality(clientLocality), enableLocalityLoadBalance(enableLocalityLoadBalance), lockAware(lockAware),
@ -488,14 +494,11 @@ DatabaseContext::DatabaseContext(
clientStatusUpdater.actor = clientStatusUpdateActor(this);
}
ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Standalone<StringRef> dbName,
Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<ClientDBInfo>> outInfo )
{
ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<ClientDBInfo>> outInfo ) {
try {
loop {
OpenDatabaseRequest req;
req.knownClientInfoID = outInfo->get().id;
req.dbName = dbName;
req.supportedVersions = VectorRef<ClientVersionRef>(req.arena, networkOptions.supportedVersions);
req.traceLogGroup = StringRef(req.arena, networkOptions.traceLogGroup);
@ -514,7 +517,7 @@ ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<Cluster
TraceEvent("ClientInfoChange").detail("ChangeID", ni.id);
outInfo->set(ni);
}
when( Void _ = wait( clusterInterface->onChange() ) ) {
when( wait( clusterInterface->onChange() ) ) {
if(clusterInterface->get().present())
TraceEvent("ClientInfo_CCInterfaceChange").detail("CCID", clusterInterface->get().get().id());
}
@ -522,29 +525,23 @@ ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<Cluster
}
} catch( Error& e ) {
TraceEvent(SevError, "MonitorClientInfoError")
.detail("DBName", printable(dbName))
.error(e)
.detail("ConnectionFile", ccf && ccf->canGetFilename() ? ccf->getFilename() : "")
.detail("ConnectionString", ccf ? ccf->getConnectionString().toString() : "")
.error(e);
.detail("ConnectionString", ccf ? ccf->getConnectionString().toString() : "");
throw;
}
}
Future< Database > DatabaseContext::createDatabase( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<Cluster> cluster, Standalone<StringRef> dbName, LocalityData const& clientLocality ) {
if (dbName != LiteralStringRef("DB")) {
return invalid_database_name(); // we no longer offer multi-database support, so all databases *must* be named this
}
else {
Reference<AsyncVar<ClientDBInfo>> info( new AsyncVar<ClientDBInfo> );
Future<Void> monitor = monitorClientInfo( clusterInterface, dbName, cluster ? cluster->getConnectionFile() : Reference<ClusterConnectionFile>(), info );
Future< Database > DatabaseContext::createDatabase( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<Cluster> cluster, LocalityData const& clientLocality ) {
Reference<AsyncVar<ClientDBInfo>> info( new AsyncVar<ClientDBInfo> );
Future<Void> monitor = monitorClientInfo( clusterInterface, cluster ? cluster->getConnectionFile() : Reference<ClusterConnectionFile>(), info );
return std::move( Database( new DatabaseContext( info, cluster, monitor, dbName, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false ) ) );
}
return std::move( Database( new DatabaseContext( info, cluster, monitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false ) ) );
}
Database DatabaseContext::create( Reference<AsyncVar<ClientDBInfo>> info, Future<Void> dependency, LocalityData clientLocality, bool enableLocalityLoadBalance, int taskID, bool lockAware ) {
return Database( new DatabaseContext( info, Reference<Cluster>(), dependency, LiteralStringRef("DB"), LiteralStringRef(""), taskID, clientLocality, enableLocalityLoadBalance, lockAware ) );
return Database( new DatabaseContext( info, Reference<Cluster>(), dependency, LiteralStringRef(""), taskID, clientLocality, enableLocalityLoadBalance, lockAware ) );
}
DatabaseContext::~DatabaseContext() {
@ -735,8 +732,8 @@ Reference<Cluster> Cluster::createCluster(std::string connFileName, int apiVersi
return Reference<Cluster>(new Cluster( rccf, apiVersion));
}
Future<Database> Cluster::createDatabase( Standalone<StringRef> dbName, LocalityData locality ) {
return DatabaseContext::createDatabase( clusterInterface, Reference<Cluster>::addRef( this ), dbName, locality );
Future<Database> Cluster::createDatabase( LocalityData locality ) {
return DatabaseContext::createDatabase( clusterInterface, Reference<Cluster>::addRef( this ), locality );
}
Future<Void> Cluster::onConnected() {
@ -767,17 +764,21 @@ void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> valu
validateOptionValue(value, true);
std::string optionValue = value.get().toString();
TraceEvent("SetKnob").detail("KnobString", optionValue);
size_t eq = optionValue.find_first_of('=');
if(eq == optionValue.npos) {
TraceEvent(SevWarnAlways, "InvalidKnobString").detail("KnobString", optionValue);
throw invalid_option_value();
}
std::string knob_name = optionValue.substr(0, eq);
std::string knob_value = optionValue.substr(eq+1);
if (!const_cast<FlowKnobs*>(FLOW_KNOBS)->setKnob( knob_name, knob_value ) &&
!const_cast<ClientKnobs*>(CLIENT_KNOBS)->setKnob( knob_name, knob_value ))
std::string knobName = optionValue.substr(0, eq);
std::string knobValue = optionValue.substr(eq+1);
if (!const_cast<FlowKnobs*>(FLOW_KNOBS)->setKnob( knobName, knobValue ) &&
!const_cast<ClientKnobs*>(CLIENT_KNOBS)->setKnob( knobName, knobValue ))
{
fprintf(stderr, "FoundationDB client ignoring unrecognized knob option '%s'\n", knob_name.c_str());
TraceEvent(SevWarnAlways, "UnrecognizedKnob").detail("Knob", knobName.c_str());
fprintf(stderr, "FoundationDB client ignoring unrecognized knob option '%s'\n", knobName.c_str());
}
break;
}
@ -786,39 +787,47 @@ void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> valu
break;
case FDBNetworkOptions::TLS_CERT_PATH:
validateOptionValue(value, true);
initTLSOptions();
tlsOptions->set_cert_file( value.get().toString() );
break;
case FDBNetworkOptions::TLS_CERT_BYTES:
initTLSOptions();
tlsOptions->set_cert_data( value.get().toString() );
break;
case FDBNetworkOptions::TLS_CA_PATH:
validateOptionValue(value, true);
initTLSOptions();
tlsOptions->set_ca_file( value.get().toString() );
break;
case FDBNetworkOptions::TLS_CA_BYTES:
validateOptionValue(value, true);
initTLSOptions();
tlsOptions->set_ca_data(value.get().toString());
break;
case FDBNetworkOptions::TLS_PASSWORD:
validateOptionValue(value, true);
initTLSOptions();
tlsOptions->set_key_password(value.get().toString());
break;
case FDBNetworkOptions::TLS_KEY_PATH:
validateOptionValue(value, true);
initTLSOptions();
tlsOptions->set_key_file( value.get().toString() );
break;
case FDBNetworkOptions::TLS_KEY_BYTES:
validateOptionValue(value, true);
initTLSOptions();
tlsOptions->set_key_data( value.get().toString() );
break;
case FDBNetworkOptions::TLS_VERIFY_PEERS:
validateOptionValue(value, true);
initTLSOptions();
try {
tlsOptions->set_verify_peers({ value.get().toString() });
} catch( Error& e ) {
TraceEvent(SevWarnAlways, "TLSValidationSetError")
.detail("Input", value.get().toString() )
.error( e );
.error( e )
.detail("Input", value.get().toString() );
throw invalid_option_value();
}
break;
@ -871,7 +880,7 @@ void setupNetwork(uint64_t transportId, bool useMetrics) {
FlowTransport::createInstance(transportId);
Net2FileSystem::newFileSystem();
tlsOptions = Reference<TLSOptions>( new TLSOptions );
initTLSOptions();
#ifndef TLS_DISABLED
tlsOptions->register_network();
@ -916,7 +925,7 @@ ACTOR Future<Reference<ProxyInfo>> getMasterProxiesFuture(DatabaseContext *cx) {
Reference<ProxyInfo> proxies = cx->getMasterProxies();
if (proxies)
return proxies;
Void _ = wait( cx->onMasterProxiesChanged() );
wait( cx->onMasterProxiesChanged() );
}
}
@ -1040,7 +1049,7 @@ ACTOR Future< pair<KeyRange,Reference<LocationInfo>> > getKeyLocation_internal(
loop {
choose {
when ( Void _ = wait( cx->onMasterProxiesChanged() ) ) {}
when ( wait( cx->onMasterProxiesChanged() ) ) {}
when ( GetKeyServerLocationsReply rep = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(key, Optional<KeyRef>(), 100, isBackward, key.arena()), TaskDefaultPromiseEndpoint ) ) ) {
if( info.debugID.present() )
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.After");
@ -1077,7 +1086,7 @@ ACTOR Future< vector< pair<KeyRange,Reference<LocationInfo>> > > getKeyRangeLoca
loop {
choose {
when ( Void _ = wait( cx->onMasterProxiesChanged() ) ) {}
when ( wait( cx->onMasterProxiesChanged() ) ) {}
when ( GetKeyServerLocationsReply _rep = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(keys.begin, keys.end, limit, reverse, keys.arena()), TaskDefaultPromiseEndpoint ) ) ) {
state GetKeyServerLocationsReply rep = _rep;
if( info.debugID.present() )
@ -1089,7 +1098,7 @@ ACTOR Future< vector< pair<KeyRange,Reference<LocationInfo>> > > getKeyRangeLoca
for (; shard < rep.results.size(); shard++) {
//FIXME: these shards are being inserted into the map sequentially, it would be much more CPU efficient to save the map pairs and insert them all at once.
results.push_back( make_pair(rep.results[shard].first & keys, cx->setCachedLocation(rep.results[shard].first, rep.results[shard].second)) );
Void _ = wait(yield());
wait(yield());
}
return results;
@ -1152,7 +1161,7 @@ ACTOR Future<Void> warmRange_impl( Transaction *self, Database cx, KeyRange keys
Version _ = wait( tr.getReadVersion() );
break;
} catch( Error &e ) {
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
}
}
@ -1224,7 +1233,7 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed ||
(e.code() == error_code_transaction_too_old && ver == latestVersion) ) {
cx->invalidateCache( key );
Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
} else {
if (trLogInfo)
trLogInfo->addLog(FdbClientLogEvents::EventGetError(startTimeD, static_cast<int>(e.code()), key));
@ -1267,14 +1276,12 @@ ACTOR Future<Key> getKey( Database cx, KeySelector k, Future<Version> version, T
if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed) {
cx->invalidateCache(k.getKey(), k.isBackward());
Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
} else {
if(e.code() != error_code_actor_cancelled) {
TraceEvent(SevInfo, "GetKeyError")
.error(e)
.detail("AtKey", printable(k.getKey()))
.detail("Offset", k.offset);
}
TraceEvent(SevInfo, "GetKeyError")
.error(e)
.detail("AtKey", printable(k.getKey()))
.detail("Offset", k.offset);
throw e;
}
}
@ -1285,12 +1292,12 @@ ACTOR Future<Version> waitForCommittedVersion( Database cx, Version version ) {
try {
loop {
choose {
when ( Void _ = wait( cx->onMasterProxiesChanged() ) ) {}
when ( wait( cx->onMasterProxiesChanged() ) ) {}
when ( GetReadVersionReply v = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getConsistentReadVersion, GetReadVersionRequest( 0, GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE ), cx->taskID ) ) ) {
if (v.version >= version)
return v.version;
// SOMEDAY: Do the wait on the server side, possibly use less expensive source of committed version (causal consistency is not needed for this purpose)
Void _ = wait( delay( CLIENT_KNOBS->FUTURE_VERSION_RETRY_DELAY, cx->taskID ) );
wait( delay( CLIENT_KNOBS->FUTURE_VERSION_RETRY_DELAY, cx->taskID ) );
}
}
}
@ -1336,16 +1343,16 @@ ACTOR Future< Void > watchValue( Future<Version> version, Key key, Optional<Valu
} catch (Error& e) {
if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed) {
cx->invalidateCache( key );
Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
} else if( e.code() == error_code_watch_cancelled ) {
TEST( true ); // Too many watches on the storage server, poll for changes instead
Void _ = wait(delay(CLIENT_KNOBS->WATCH_POLLING_TIME, info.taskID));
wait(delay(CLIENT_KNOBS->WATCH_POLLING_TIME, info.taskID));
} else if ( e.code() == error_code_timed_out ) { //The storage server occasionally times out watches in case it was cancelled
TEST( true ); // A watch timed out
Void _ = wait(delay(CLIENT_KNOBS->FUTURE_VERSION_RETRY_DELAY, info.taskID));
wait(delay(CLIENT_KNOBS->FUTURE_VERSION_RETRY_DELAY, info.taskID));
} else {
state Error err = e;
Void _ = wait(delay(CLIENT_KNOBS->FUTURE_VERSION_RETRY_DELAY, info.taskID));
wait(delay(CLIENT_KNOBS->FUTURE_VERSION_RETRY_DELAY, info.taskID));
throw err;
}
}
@ -1486,7 +1493,7 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
keys = KeyRangeRef( range.begin, keys.end );
cx->invalidateCache( keys );
Void _ = wait( delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID ));
wait( delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID ));
break;
} else {
TraceEvent(SevInfo, "GetExactRangeError")
@ -1786,7 +1793,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
return result;
}
Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
} else {
if (trLogInfo)
trLogInfo->addLog(FdbClientLogEvents::EventGetRangeError(startTime, static_cast<int>(e.code()), begin.getKey(), end.getKey()));
@ -1894,13 +1901,13 @@ ACTOR Future<Void> watch( Reference<Watch> watch, Database cx, Transaction *self
choose {
// RYOW write to value that is being watched (if applicable)
// Errors
when(Void _ = wait(watch->onChangeTrigger.getFuture())) { }
when(wait(watch->onChangeTrigger.getFuture())) { }
// NativeAPI finished commit and updated watchFuture
when(Void _ = wait(watch->onSetWatchTrigger.getFuture())) {
when(wait(watch->onSetWatchTrigger.getFuture())) {
// NativeAPI watchValue future finishes or errors
Void _ = wait(watch->watchFuture);
wait(watch->watchFuture);
}
}
}
@ -2250,7 +2257,7 @@ ACTOR void checkWrites( Database cx, Future<Void> committed, Promise<Void> outCo
{
state Version version;
try {
Void _ = wait( committed );
wait( committed );
// If the commit is successful, by definition the transaction still exists for now. Grab the version, and don't use it again.
version = checkTr->getCommittedVersion();
outCommitted.send(Void());
@ -2259,7 +2266,7 @@ ACTOR void checkWrites( Database cx, Future<Void> committed, Promise<Void> outCo
return;
}
Void _ = wait( delay( g_random->random01() ) ); // delay between 0 and 1 seconds
wait( delay( g_random->random01() ) ); // delay between 0 and 1 seconds
//Future<Optional<Version>> version, Database cx, CommitTransactionRequest req ) {
state KeyRangeMap<MutationBlock> expectedValues;
@ -2329,11 +2336,11 @@ ACTOR static Future<Void> commitDummyTransaction( Database cx, KeyRange range, T
tr.setOption( FDBTransactionOptions::LOCK_AWARE );
tr.addReadConflictRange(range);
tr.addWriteConflictRange(range);
Void _ = wait( tr.commit() );
wait( tr.commit() );
return Void();
} catch (Error& e) {
TraceEvent("CommitDummyTransactionError").error(e,true).detail("Key", printable(range.begin)).detail("Retries", retries);
Void _ = wait( tr.onError(e) );
wait( tr.onError(e) );
}
++retries;
}
@ -2390,7 +2397,7 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
}
choose {
when ( Void _ = wait( cx->onMasterProxiesChanged() ) ) {
when ( wait( cx->onMasterProxiesChanged() ) ) {
reply.cancel();
throw request_maybe_delivered();
}
@ -2446,7 +2453,7 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
TEST(true); // Waiting for dummy transaction to report commit_unknown_result
Void _ = wait( commitDummyTransaction( cx, singleKeyRange(selfConflictingRange.begin), info, tr->options ) );
wait( commitDummyTransaction( cx, singleKeyRange(selfConflictingRange.begin), info, tr->options ) );
}
// The user needs to be informed that we aren't sure whether the commit happened. Standard retry loops retry it anyway (relying on transaction idempotence) but a client might do something else.
@ -2483,11 +2490,11 @@ Future<Void> Transaction::commitMutations() {
size_t transactionSize = tr.transaction.mutations.expectedSize() + tr.transaction.read_conflict_ranges.expectedSize() + tr.transaction.write_conflict_ranges.expectedSize();
if (transactionSize > (uint64_t)FLOW_KNOBS->PACKET_WARNING) {
TraceEvent(!g_network->isSimulated() ? SevWarnAlways : SevWarn, "LargeTransaction")
.suppressFor(1.0)
.detail("Size", transactionSize)
.detail("NumMutations", tr.transaction.mutations.size())
.detail("ReadConflictSize", tr.transaction.read_conflict_ranges.expectedSize())
.detail("WriteConflictSize", tr.transaction.write_conflict_ranges.expectedSize())
.suppressFor(1.0);
.detail("WriteConflictSize", tr.transaction.write_conflict_ranges.expectedSize());
}
if(!apiVersionAtLeast(300)) {
@ -2547,7 +2554,7 @@ Future<Void> Transaction::commitMutations() {
ACTOR Future<Void> commitAndWatch(Transaction *self) {
try {
Void _ = wait(self->commitMutations());
wait(self->commitMutations());
if(!self->watches.empty()) {
self->setupWatches();
@ -2683,7 +2690,7 @@ ACTOR Future<GetReadVersionReply> getConsistentReadVersion( DatabaseContext *cx,
loop {
state GetReadVersionRequest req( transactionCount, flags, debugID );
choose {
when ( Void _ = wait( cx->onMasterProxiesChanged() ) ) {}
when ( wait( cx->onMasterProxiesChanged() ) ) {}
when ( GetReadVersionReply v = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getConsistentReadVersion, req, cx->taskID ) ) ) {
if( debugID.present() )
g_traceBatch.addEvent("TransactionDebug", debugID.get().first(), "NativeAPI.getConsistentReadVersion.After");
@ -2693,7 +2700,7 @@ ACTOR Future<GetReadVersionReply> getConsistentReadVersion( DatabaseContext *cx,
}
}
} catch (Error& e) {
if( e.code() != error_code_broken_promise && e.code() != error_code_actor_cancelled )
if( e.code() != error_code_broken_promise )
TraceEvent(SevError, "GetConsistentReadVersionError").error(e);
throw;
}
@ -2728,7 +2735,7 @@ ACTOR Future<Void> readVersionBatcher( DatabaseContext *cx, FutureStream< std::p
else if (!timeout.isValid())
timeout = delay(batchTime, cx->taskID);
}
when(Void _ = wait(timeout.isValid() ? timeout : Never())) {
when(wait(timeout.isValid() ? timeout : Never())) {
send_batch = true;
}
// dynamic batching monitors reply latencies
@ -2736,7 +2743,7 @@ ACTOR Future<Void> readVersionBatcher( DatabaseContext *cx, FutureStream< std::p
double target_latency = reply_latency * 0.5;
batchTime = min(0.1 * target_latency + 0.9 * batchTime, CLIENT_KNOBS->GRV_BATCH_TIMEOUT);
}
when(Void _ = wait(collection)){} // for errors
when(wait(collection)){} // for errors
}
if (send_batch) {
int count = requests.size();
@ -2876,7 +2883,7 @@ ACTOR Future< StorageMetrics > waitStorageMetricsMultipleLocations(
req.max.bytes = -1;
fx[i] = loadBalance( locations[i].second, &StorageServerInterface::waitMetrics, req, TaskDataDistribution );
}
Void _ = wait( waitForAll(fx) );
wait( waitForAll(fx) );
// invariant: true total is between (total-permittedError/2, total+permittedError/2)
for(int i=0; i<nLocs; i++)
@ -2914,7 +2921,7 @@ ACTOR Future< StorageMetrics > waitStorageMetrics(
.detail("Locations", locations.size())
.detail("Limit", CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT)
.detail("JitteredSecondsOfPenitence", CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY);
Void _ = wait(delayJittered(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
wait(delayJittered(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
// make sure that the next getKeyRangeLocations() call will actually re-fetch the range
cx->invalidateCache( keys );
} else {
@ -2935,7 +2942,7 @@ ACTOR Future< StorageMetrics > waitStorageMetrics(
throw;
}
cx->invalidateCache(keys);
Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskDataDistribution));
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskDataDistribution));
}
}
}
@ -2966,7 +2973,7 @@ ACTOR Future< Standalone<VectorRef<KeyRef>> > splitStorageMetrics( Database cx,
//SOMEDAY: Right now, if there are too many shards we delay and check again later. There may be a better solution to this.
if(locations.size() == CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT) {
Void _ = wait(delay(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
wait(delay(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
cx->invalidateCache(keys);
}
else {
@ -3003,7 +3010,7 @@ ACTOR Future< Standalone<VectorRef<KeyRef>> > splitStorageMetrics( Database cx,
throw;
}
cx->invalidateCache( keys );
Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskDataDistribution));
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskDataDistribution));
}
}
}

View File

@ -110,7 +110,7 @@ public:
static Reference<Cluster> createCluster(std::string connFileName, int apiVersion);
// See DatabaseContext::createDatabase
Future<Database> createDatabase( Standalone<StringRef> dbName, LocalityData locality = LocalityData() );
Future<Database> createDatabase( LocalityData locality = LocalityData() );
void setOption(FDBClusterOptions::Option option, Optional<StringRef> value);

View File

@ -18,12 +18,13 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include "ReadYourWrites.h"
#include "Atomic.h"
#include "DatabaseContext.h"
#include "StatusClient.h"
#include "MonitorLeader.h"
#include "flow/Util.h"
#include "flow/actorcompiler.h" // This must be the last #include.
class RYWImpl {
public:
@ -245,7 +246,7 @@ public:
when (typename Req::Result result = wait( readThrough( ryw, req, snapshot ) )) {
return result;
}
when (Void _ = wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
when (wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
}
}
ACTOR template <class Req> static Future<typename Req::Result> readWithConflictRangeSnapshot( ReadYourWritesTransaction* ryw, Req req ) {
@ -254,7 +255,7 @@ public:
when (typename Req::Result result = wait( read( ryw, req, &it ) )) {
return result;
}
when (Void _ = wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
when (wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
}
}
ACTOR template <class Req> static Future<typename Req::Result> readWithConflictRangeRYW( ReadYourWritesTransaction* ryw, Req req, bool snapshot ) {
@ -266,7 +267,7 @@ public:
addConflictRange( ryw, req, it.extractWriteMapIterator(), result );
return result;
}
when (Void _ = wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
when (wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
}
}
template <class Req> static inline Future<typename Req::Result> readWithConflictRange( ReadYourWritesTransaction* ryw, Req const& req, bool snapshot ) {
@ -936,16 +937,12 @@ public:
for( int i = 0; i < itCopy->value.size(); i++ ) {
if(itCopy->value[i]->onChangeTrigger.isSet()) {
if( i < itCopy->value.size() - 1 )
std::swap(itCopy->value[i--], itCopy->value.back());
itCopy->value.pop_back();
swapAndPop(&itCopy->value, i--);
} else if( !valueKnown ||
(itCopy->value[i]->setPresent && (itCopy->value[i]->setValue.present() != val.present() || (val.present() && itCopy->value[i]->setValue.get() != val.get()))) ||
(itCopy->value[i]->valuePresent && (itCopy->value[i]->value.present() != val.present() || (val.present() && itCopy->value[i]->value.get() != val.get()))) ) {
itCopy->value[i]->onChangeTrigger.send(Void());
if( i < itCopy->value.size() - 1 )
std::swap(itCopy->value[i--], itCopy->value.back());
itCopy->value.pop_back();
swapAndPop(&itCopy->value, i--);
} else {
itCopy->value[i]->setPresent = true;
itCopy->value[i]->setValue = val.cast_to<Value>();
@ -977,7 +974,7 @@ public:
val = ryw->tr.get(key);
try {
Void _ = wait(ryw->resetPromise.getFuture() || success(val) || watch->onChangeTrigger.getFuture());
wait(ryw->resetPromise.getFuture() || success(val) || watch->onChangeTrigger.getFuture());
} catch( Error &e ) {
done.send(Void());
throw;
@ -1002,7 +999,7 @@ public:
watchFuture = ryw->tr.watch(watch); // throws if there are too many outstanding watches
done.send(Void());
Void _ = wait(watchFuture);
wait(watchFuture);
return Void();
}
@ -1012,12 +1009,12 @@ public:
ryw->commitStarted = true;
Future<Void> ready = ryw->reading;
Void _ = wait( ryw->resetPromise.getFuture() || ready );
wait( ryw->resetPromise.getFuture() || ready );
if( ryw->options.readYourWritesDisabled ) {
if (ryw->resetPromise.isSet())
throw ryw->resetPromise.getFuture().getError();
Void _ = wait( ryw->resetPromise.getFuture() || ryw->tr.commit() );
wait( ryw->resetPromise.getFuture() || ryw->tr.commit() );
ryw->debugLogRetries();
@ -1037,7 +1034,7 @@ public:
}
}
Void _ = wait( ryw->resetPromise.getFuture() || ryw->tr.commit() );
wait( ryw->resetPromise.getFuture() || ryw->tr.commit() );
ryw->debugLogRetries();
if(!ryw->tr.apiVersionAtLeast(410)) {
@ -1071,7 +1068,7 @@ public:
throw e;
}
Void _ = wait( ryw->resetPromise.getFuture() || ryw->tr.onError(e) );
wait( ryw->resetPromise.getFuture() || ryw->tr.onError(e) );
ryw->debugLogRetries(e);
@ -1093,7 +1090,7 @@ public:
return v;
}
when(Void _ = wait(ryw->resetPromise.getFuture())) {
when(wait(ryw->resetPromise.getFuture())) {
throw internal_error();
}
}
@ -1104,10 +1101,10 @@ ReadYourWritesTransaction::ReadYourWritesTransaction( Database const& cx ) : cac
ACTOR Future<Void> timebomb(double totalSeconds, Promise<Void> resetPromise) {
if(totalSeconds == 0.0) {
Void _ = wait ( Never() );
wait ( Never() );
}
else if (now() < totalSeconds) {
Void _ = wait ( delayUntil( totalSeconds ) );
wait ( delayUntil( totalSeconds ) );
}
if( !resetPromise.isSet() )
resetPromise.sendError(transaction_timed_out());
@ -1153,7 +1150,7 @@ ACTOR Future<Standalone<RangeResultRef>> getWorkerInterfaces (Reference<ClusterC
return result;
}
when( Void _ = wait(clusterInterface->onChange()) ) {}
when( wait(clusterInterface->onChange()) ) {}
}
}
}
@ -1905,12 +1902,11 @@ void ReadYourWritesTransaction::debugLogRetries(Optional<Error> error) {
fprintf(stderr, "fdb WARNING: long transaction (%.2fs elapsed%s, %d retries, %s)\n", elapsed, transactionNameStr.c_str(), retries, committed ? "committed" : error.get().what());
{
TraceEvent trace = TraceEvent("LongTransaction");
if(!transactionDebugInfo->transactionName.empty())
trace.detail("TransactionName", printable(StringRef(transactionDebugInfo->transactionName)));
trace.detail("Elapsed", elapsed).detail("Retries", retries).detail("Committed", committed);
if(error.present())
trace.error(error.get(), true);
if(!transactionDebugInfo->transactionName.empty())
trace.detail("TransactionName", printable(StringRef(transactionDebugInfo->transactionName)));
trace.detail("Elapsed", elapsed).detail("Retries", retries).detail("Committed", committed);
}
transactionDebugInfo->lastRetryLogTime = now();
}

View File

@ -29,6 +29,7 @@
#include "flow/flow.h"
#include "ReadYourWrites.h"
#include "flow/actorcompiler.h" // This must be the last #include.
ACTOR template < class Function >
Future<decltype(fake<Function>()(Reference<ReadYourWritesTransaction>()).getValue())>
@ -37,11 +38,11 @@ runRYWTransaction(Database cx, Function func) {
loop{
try {
state decltype( fake<Function>()( Reference<ReadYourWritesTransaction>() ).getValue()) result = wait(func(tr));
Void _ = wait(tr->commit());
wait(tr->commit());
return result;
}
catch (Error& e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -53,13 +54,13 @@ runRYWTransactionFailIfLocked(Database cx, Function func) {
loop{
try {
state decltype( fake<Function>()( Reference<ReadYourWritesTransaction>() ).getValue()) result = wait(func(tr));
Void _ = wait(tr->commit());
wait(tr->commit());
return result;
}
catch (Error& e) {
if(e.code() == error_code_database_locked)
throw;
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -69,7 +70,9 @@ Future<decltype(fake<Function>()(Reference<ReadYourWritesTransaction>()).getValu
runRYWTransactionNoRetry(Database cx, Function func) {
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
state decltype(fake<Function>()(Reference<ReadYourWritesTransaction>()).getValue()) result = wait(func(tr));
Void _ = wait(tr->commit());
wait(tr->commit());
return result;
}
#endif
#include "flow/unactorcompiler.h"
#endif

637
fdbclient/Schemas.cpp Normal file
View File

@ -0,0 +1,637 @@
/*
* Schemas.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Schemas.h"
const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
{
"cluster":{
"layers":{
"_valid":true,
"_error":"some error description"
},
"processes":{
"$map":{
"version":"3.0.0",
"machine_id":"0ccb4e0feddb5583010f6b77d9d10ece",
"locality":{
"$map":"value"
},
"class_source":{
"$enum":[
"command_line",
"configure_auto",
"set_class"
]
},
"class_type":{
"$enum":[
"unset",
"storage",
"transaction",
"resolution",
"proxy",
"master",
"test"
]
},
"roles":[
{
"query_queue_max":0,
"input_bytes":{
"hz":0.0,
"counter":0,
"roughness":0.0
},
"stored_bytes":12341234,
"kvstore_used_bytes":12341234,
"kvstore_available_bytes":12341234,
"kvstore_free_bytes":12341234,
"kvstore_total_bytes":12341234,
"durable_bytes":{
"hz":0.0,
"counter":0,
"roughness":0.0
},
"queue_disk_used_bytes":12341234,
"queue_disk_available_bytes":12341234,
"queue_disk_free_bytes":12341234,
"queue_disk_total_bytes":12341234,
"role":{
"$enum":[
"master",
"proxy",
"log",
"storage",
"resolver",
"cluster_controller"
]
},
"data_version":12341234,
"data_lag": {
"seconds":5.0,
"versions":12341234
},
"id":"eb84471d68c12d1d26f692a50000003f",
"finished_queries":{
"hz":0.0,
"counter":0,
"roughness":0.0
}
}
],
"command_line":"-r simulation",
"memory":{
"available_bytes":0,
"limit_bytes":0,
"unused_allocated_memory":0,
"used_bytes":0
},
"messages":[
{
"time":12345.12312,
"type":"x",
"name":{
"$enum":[
"file_open_error",
"incorrect_cluster_file_contents",
"process_error",
"io_error",
"io_timeout",
"platform_error",
"storage_server_lagging",
"(other FDB error messages)"
]
},
"raw_log_message":"<stuff/>",
"description":"abc"
}
],
"fault_domain":"0ccb4e0fdbdb5583010f6b77d9d10ece",
"excluded":false,
"address":"1.2.3.4:1234",
"disk":{
"free_bytes":3451233456234,
"reads":{
"hz":0.0,
"counter":0,
"sectors":0
},
"busy":0.0,
"writes":{
"hz":0.0,
"counter":0,
"sectors":0
},
"total_bytes":123412341234
},
"uptime_seconds":1234.2345,
"cpu":{
"usage_cores":0.0
},
"network":{
"current_connections":0,
"connections_established":{
"hz":0.0
},
"connections_closed":{
"hz":0.0
},
"connection_errors":{
"hz":0.0
},
"megabits_sent":{
"hz":0.0
},
"megabits_received":{
"hz":0.0
}
}
}
},
"old_logs":[
{
"logs":[
{
"id":"7f8d623d0cb9966e",
"healthy":true,
"address":"1.2.3.4:1234"
}
],
"log_replication_factor":3,
"log_write_anti_quorum":0,
"log_fault_tolerance":2,
"remote_log_replication_factor":3,
"remote_log_fault_tolerance":2,
"satellite_log_replication_factor":3,
"satellite_log_write_anti_quorum":0,
"satellite_log_fault_tolerance":2
}
],
"fault_tolerance":{
"max_machine_failures_without_losing_availability":0,
"max_machine_failures_without_losing_data":0
},
"qos":{
"worst_queue_bytes_log_server":460,
"performance_limited_by":{
"reason_server_id":"7f8d623d0cb9966e",
"reason_id":0,
"name":{
"$enum":[
"workload",
"storage_server_write_queue_size",
"storage_server_write_bandwidth_mvcc",
"storage_server_readable_behind",
"log_server_mvcc_write_bandwidth",
"log_server_write_queue",
"storage_server_min_free_space",
"storage_server_min_free_space_ratio",
"log_server_min_free_space",
"log_server_min_free_space_ratio"
]
},
"description":"The database is not being saturated by the workload."
},
"transactions_per_second_limit":0,
"released_transactions_per_second":0,
"limiting_queue_bytes_storage_server":0,
"worst_queue_bytes_storage_server":0,
"limiting_version_lag_storage_server":0,
"worst_version_lag_storage_server":0
},
"incompatible_connections":[
],
"datacenter_version_difference":0,
"database_available":true,
"database_locked":false,
"generation":2,
"latency_probe":{
"read_seconds":7,
"immediate_priority_transaction_start_seconds":0.0,
"batch_priority_transaction_start_seconds":0.0,
"transaction_start_seconds":0.0,
"commit_seconds":0.02
},
"clients":{
"count":1,
"supported_versions":[
{
"client_version":"3.0.0",
"connected_clients":[
{
"address":"127.0.0.1:9898",
"log_group":"default"
}
],
"count" : 1,
"protocol_version" : "fdb00a400050001",
"source_version" : "9430e1127b4991cbc5ab2b17f41cfffa5de07e9d"
}
]
},
"messages":[
{
"reasons":[
{
"description":"Blah."
}
],
"unreachable_processes":[
{
"address":"1.2.3.4:1234"
}
],
"name":{
"$enum":[
"unreachable_master_worker",
"unreadable_configuration",
"full_replication_timeout",
"client_issues",
"unreachable_processes",
"immediate_priority_transaction_start_probe_timeout",
"batch_priority_transaction_start_probe_timeout",
"transaction_start_probe_timeout",
"read_probe_timeout",
"commit_probe_timeout",
"storage_servers_error",
"status_incomplete",
"layer_status_incomplete",
"database_availability_timeout"
]
},
"issues":[
{
"name":{
"$enum":[
"incorrect_cluster_file_contents"
]
},
"description":"Cluster file contents do not match current cluster connection string. Verify cluster file is writable and has not been overwritten externally."
}
],
"description":"abc"
}
],
)statusSchema" R"statusSchema(
"recovery_state":{
"required_resolvers":1,
"required_proxies":1,
"name":{
"$enum":[
"reading_coordinated_state",
"locking_coordinated_state",
"locking_old_transaction_servers",
"reading_transaction_system_state",
"configuration_missing",
"configuration_never_created",
"configuration_invalid",
"recruiting_transaction_servers",
"initializing_transaction_servers",
"recovery_transaction",
"writing_coordinated_state",
"accepting_commits",
"all_logs_recruited",
"storage_recovered",
"fully_recovered"
]
},
"required_logs":3,
"missing_logs":"7f8d623d0cb9966e",
"description":"Recovery complete."
},
"workload":{
"operations":{
"writes":{
"hz":0.0,
"counter":0,
"roughness":0.0
},
"reads":{
"hz":0.0,
"counter":0,
"roughness":0.0
}
},
"bytes":{
"written":{
"hz":0.0,
"counter":0,
"roughness":0.0
},
"read":{
"hz":0.0,
"counter":0,
"roughness":0.0
}
},
"keys":{
"read":{
"hz":0.0,
"counter":0,
"roughness":0.0
}
},
"transactions":{
"started":{
"hz":0.0,
"counter":0,
"roughness":0.0
},
"conflicted":{
"hz":0.0,
"counter":0,
"roughness":0.0
},
"committed":{
"hz":0.0,
"counter":0,
"roughness":0.0
}
}
},
"cluster_controller_timestamp":1415650089,
"protocol_version":"fdb00a400050001",
"connection_string":"a:a@127.0.0.1:4000",
"full_replication":true,
"configuration":{
"log_anti_quorum":0,
"log_replicas":2,
"log_replication_policy":"(zoneid^3x1)",
"redundancy_mode":{
"$enum":[
"single",
"double",
"triple",
"three_datacenter",
"three_datacenter_fallback",
"three_data_hall"
]},
"regions":[{
"datacenters":[{
"id":"mr",
"priority":1,
"satellite":1
}],
"satellite_redundancy_mode":{
"$enum":[
"one_satellite_single",
"one_satellite_double",
"one_satellite_triple",
"two_satellite_safe",
"two_satellite_fast"
]},
"satellite_log_replicas":1,
"satellite_usable_dcs":1,
"satellite_anti_quorum":0,
"satellite_log_policy":"(zoneid^3x1)",
"satellite_logs":2
}],
"remote_redundancy_mode":{
"$enum":[
"remote_default",
"remote_single",
"remote_double",
"remote_triple",
"remote_three_data_hall"
]},
"remote_log_replicas":3,
"remote_logs":5,
"log_routers":10,
"usable_regions":1,
"repopulate_anti_quorum":1,
"storage_replicas":1,
"resolvers":1,
"storage_replication_policy":"(zoneid^3x1)",
"logs":2,
"storage_engine":{
"$enum":[
"ssd",
"ssd-1",
"ssd-2",
"memory"
]},
"coordinators_count":1,
"excluded_servers":[
{
"address":"10.0.4.1"
}
],
"auto_proxies":3,
"auto_resolvers":1,
"auto_logs":3,
"proxies":5
},
"data":{
"least_operating_space_bytes_log_server":0,
"average_partition_size_bytes":0,
"state":{
"healthy":true,
"min_replicas_remaining":0,
"name":{
"$enum":[
"initializing",
"missing_data",
"healing",
"healthy_repartitioning",
"healthy_removing_server",
"healthy_rebalancing",
"healthy"
]
},
"description":""
},
"least_operating_space_ratio_storage_server":0.1,
"max_machine_failures_without_losing_availability":0,
"total_disk_used_bytes":0,
"total_kv_size_bytes":0,
"partitions_count":2,
"moving_data":{
"total_written_bytes":0,
"in_flight_bytes":0,
"in_queue_bytes":0,
"highest_priority":0
},
"team_trackers":[
{
"primary":true,
"in_flight_bytes":0,
"unhealthy_servers":0,
"state":{
"healthy":true,
"min_replicas_remaining":0,
"name":{
"$enum":[
"initializing",
"missing_data",
"healing",
"healthy_repartitioning",
"healthy_removing_server",
"healthy_rebalancing",
"healthy"
]
},
"description":""
}
}
],
"least_operating_space_bytes_storage_server":0,
"max_machine_failures_without_losing_data":0
},
"machines":{
"$map":{
"network":{
"megabits_sent":{
"hz":0.0
},
"megabits_received":{
"hz":0.0
},
"tcp_segments_retransmitted":{
"hz":0.0
}
},
"memory":{
"free_bytes":0,
"committed_bytes":0,
"total_bytes":0
},
"contributing_workers":4,
"datacenter_id":"6344abf1813eb05b",
"excluded":false,
"address":"1.2.3.4",
"machine_id":"6344abf1813eb05b",
"locality":{
"$map":"value"
},
"cpu":{
"logical_core_utilization":0.4
}
}
}
},
"client":{
"coordinators":{
"coordinators":[
{
"reachable":true,
"address":"127.0.0.1:4701"
}
],
"quorum_reachable":true
},
"database_status":{
"available":true,
"healthy":true
},
"messages":[
{
"name":{
"$enum":[
"inconsistent_cluster_file",
"unreachable_cluster_controller",
"no_cluster_controller",
"status_incomplete_client",
"status_incomplete_coordinators",
"status_incomplete_error",
"status_incomplete_timeout",
"status_incomplete_cluster",
"quorum_not_reachable"
]
},
"description":"The cluster file is not up to date."
}
],
"timestamp":1415650089,
"cluster_file":{
"path":"/etc/foundationdb/fdb.cluster",
"up_to_date":true
}
}
})statusSchema");
const KeyRef JSONSchemas::configurationSchema = LiteralStringRef(R"configSchema(
{
"create":{
"$enum":[
"new"
]},
"log_anti_quorum":0,
"log_replicas":2,
"log_replication_policy":"(zoneid^3x1)",
"redundancy_mode":{
"$enum":[
"single",
"double",
"triple",
"three_datacenter",
"three_datacenter_fallback",
"three_data_hall"
]},
"regions":[{
"datacenters":[{
"id":"mr",
"priority":1,
"satellite":1
}],
"satellite_redundancy_mode":{
"$enum":[
"one_satellite_single",
"one_satellite_double",
"one_satellite_triple",
"two_satellite_safe",
"two_satellite_fast"
]},
"satellite_log_replicas":1,
"satellite_usable_dcs":1,
"satellite_anti_quorum":0,
"satellite_log_policy":"(zoneid^3x1)",
"satellite_logs":2
}],
"remote_redundancy_mode":{
"$enum":[
"remote_default",
"remote_single",
"remote_double",
"remote_triple",
"remote_three_data_hall"
]},
"remote_log_replicas":3,
"remote_logs":5,
"log_routers":10,
"usable_regions":1,
"repopulate_anti_quorum":1,
"storage_replicas":1,
"resolvers":1,
"storage_replication_policy":"(zoneid^3x1)",
"logs":2,
"storage_engine":{
"$enum":[
"ssd",
"ssd-1",
"ssd-2",
"memory"
]},
"auto_proxies":3,
"auto_resolvers":1,
"auto_logs":3,
"proxies":5
})configSchema");

34
fdbclient/Schemas.h Normal file
View File

@ -0,0 +1,34 @@
/*
* Schemas.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDBCLIENT_SCHEMAS_H
#define FDBCLIENT_SCHEMAS_H
#pragma once
#include "flow/flow.h"
#include "FDBTypes.h"
struct JSONSchemas {
static const KeyRef statusSchema;
static const KeyRef configurationSchema;
};
#endif /* FDBCLIENT_SCHEMAS_H */

View File

@ -23,6 +23,11 @@
#include "../fdbrpc/JSONDoc.h"
// Reads the entire string s as a JSON value
// Throws if no value can be parsed or if s contains data after the first JSON value
// Trailing whitespace in s is allowed
json_spirit::mValue readJSONStrictly(const std::string &s);
struct StatusObject : json_spirit::mObject {
typedef json_spirit::mObject Map;
typedef json_spirit::mArray Array;
@ -73,7 +78,7 @@ static StatusObject makeMessage(const char *name, const char *description) {
// Typedef to cover older code that was written when this class was only a reader and called StatusObjectReader
typedef JSONDoc StatusObjectReader;
// Template specialization for get<JSONDoc> because is convenient to get() an
// Template specialization for get<JSONDoc> because is convenient to get() an
// element from an object directly into a JSONDoc to have a handle to that sub-doc.
template <> inline bool JSONDoc::get<JSONDoc>(const std::string path, StatusObjectReader &out, bool split) {
bool r = has(path, split);

View File

@ -26,8 +26,33 @@
#include "StatusClient.h"
#include "Status.h"
#include "json_spirit/json_spirit_writer_template.h"
#include "json_spirit/json_spirit_reader_template.h"
#include "fdbrpc/genericactors.actor.h"
json_spirit::mValue readJSONStrictly(const std::string &s) {
json_spirit::mValue val;
std::string::const_iterator i = s.begin();
if(!json_spirit::read_range(i, s.end(), val)) {
if(g_network->isSimulated()) {
printf("MALFORMED: %s\n", s.c_str());
}
throw json_malformed();
}
// Allow trailing whitespace
while(i != s.end()) {
if(!isspace(*i)) {
if(g_network->isSimulated()) {
printf("EXPECTED EOF: %s\n^^^\n%s\n", std::string(s.begin(), i).c_str(), std::string(i, s.end()).c_str());
}
throw json_eof_expected();
}
++i;
}
return val;
}
uint64_t JSONDoc::expires_reference_version = std::numeric_limits<uint64_t>::max();
// Template specializations for mergeOperator
@ -267,7 +292,7 @@ ACTOR Future<Optional<StatusObject>> clientCoordinatorsStatusFetcher(Reference<C
for (int i = 0; i < coord.clientLeaderServers.size(); i++)
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader, GetLeaderRequest(coord.clusterKey, UID()), TaskCoordinationReply));
Void _ = wait( smartQuorum(leaderServers, leaderServers.size() / 2 + 1, 1.5) || delay(2.0) );
wait( smartQuorum(leaderServers, leaderServers.size() / 2 + 1, 1.5) || delay(2.0) );
statusObj["quorum_reachable"] = *quorum_reachable = quorum(leaderServers, leaderServers.size() / 2 + 1).isReady();
@ -338,7 +363,7 @@ ACTOR Future<Optional<StatusObject>> clusterStatusFetcher(ClusterInterface cI, S
state Future<Void> clusterTimeout = delay(30.0);
state Optional<StatusObject> oStatusObj;
Void _ = wait(delay(0.0)); //make sure the cluster controller is marked as not failed
wait(delay(0.0)); //make sure the cluster controller is marked as not failed
state Future<ErrorOr<StatusReply>> statusReply = cI.databaseStatus.tryGetReply(req);
loop{
@ -356,7 +381,7 @@ ACTOR Future<Optional<StatusObject>> clusterStatusFetcher(ClusterInterface cI, S
}
break;
}
when(Void _ = wait(clusterTimeout)){
when(wait(clusterTimeout)){
messages->push_back(makeMessage("status_incomplete_timeout", "Timed out fetching cluster status."));
break;
}
@ -484,8 +509,8 @@ ACTOR Future<StatusObject> statusFetcherImpl( Reference<ClusterConnectionFile> f
break;
}
choose{
when(Void _ = wait(clusterInterface->onChange())) {}
when(Void _ = wait(interfaceTimeout)) {
when(wait(clusterInterface->onChange())) {}
when(wait(interfaceTimeout)) {
clientMessages.push_back(makeMessage("no_cluster_controller", "Unable to locate a cluster controller within 2 seconds. Check that there are server processes running."));
break;
}

View File

@ -558,3 +558,4 @@ std::pair<MetricNameRef, KeyRef> decodeMetricConfKey( KeyRef const& prefix, KeyR
const KeyRef maxUIDKey = LiteralStringRef("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff");
const KeyRef databaseLockedKey = LiteralStringRef("\xff/dbLocked");
const KeyRef mustContainSystemMutationsKey = LiteralStringRef("\xff/mustContainSystemMutations");

View File

@ -250,5 +250,6 @@ extern const KeyRef metricConfPrefix;
extern const KeyRef maxUIDKey;
extern const KeyRef databaseLockedKey;
extern const KeyRef mustContainSystemMutationsKey;
#endif

View File

@ -41,7 +41,7 @@ struct UnblockFutureTaskFunc : TaskFuncBase {
bool is_set = wait(future->isSet(tr));
if (is_set) {
Void _ = wait(future->performAllActions(tr, taskBucket));
wait(future->performAllActions(tr, taskBucket));
}
return Void();
@ -277,7 +277,7 @@ public:
return verified;
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -294,10 +294,10 @@ public:
validTask = _validTask;
}
if (!validTask) {
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
}
else {
Void _ = wait(taskFunc->finish(tr, taskBucket, futureBucket, task));
wait(taskFunc->finish(tr, taskBucket, futureBucket, task));
}
return Void();
@ -322,10 +322,10 @@ public:
state FlowLock::Releaser releaser;
// Wait until we are half way to the timeout version of this task
Void _ = wait(delay(0.8 * (BUGGIFY ? (2 * g_random->random01()) : 1.0) * (double)(task->timeoutVersion - (uint64_t)versionNow) / CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
wait(delay(0.8 * (BUGGIFY ? (2 * g_random->random01()) : 1.0) * (double)(task->timeoutVersion - (uint64_t)versionNow) / CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
// Take the extendMutex lock until we either succeed or stop trying to extend due to failure
Void _ = wait(task->extendMutex.take());
wait(task->extendMutex.take());
releaser = FlowLock::Releaser(task->extendMutex, 1);
loop {
@ -335,12 +335,12 @@ public:
// Attempt to extend the task's timeout
state Version newTimeout = wait(taskBucket->extendTimeout(tr, task, false));
Void _ = wait(tr->commit());
wait(tr->commit());
task->timeoutVersion = newTimeout;
versionNow = tr->getCommittedVersion();
break;
} catch(Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -368,40 +368,40 @@ public:
if (!validTask) {
bool isFinished = wait(taskBucket->isFinished(tr, task));
if (!isFinished) {
Void _ = wait(taskBucket->finish(tr, task));
wait(taskBucket->finish(tr, task));
}
Void _ = wait(tr->commit());
wait(tr->commit());
return true;
}
break;
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
Void _ = wait(taskFunc->execute(cx, taskBucket, futureBucket, task) || extendTimeoutRepeatedly(cx, taskBucket, task));
wait(taskFunc->execute(cx, taskBucket, futureBucket, task) || extendTimeoutRepeatedly(cx, taskBucket, task));
if (BUGGIFY) Void _ = wait(delay(10.0));
Void _ = wait(runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
if (BUGGIFY) wait(delay(10.0));
wait(runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
return finishTaskRun(tr, taskBucket, futureBucket, task, taskFunc, verifyTask);
}));
}
} catch(Error &e) {
TraceEvent(SevWarn, "TB_ExecuteFailure")
.error(e)
.detail("TaskUID", task->key.printable())
.detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable())
.detail("Priority", task->getPriority())
.error(e);
.detail("Priority", task->getPriority());
try {
Void _ = wait(taskFunc->handleError(cx, task, e));
wait(taskFunc->handleError(cx, task, e));
} catch(Error &e) {
TraceEvent(SevWarn, "TB_ExecuteFailureLogErrorFailed")
.error(e) // output handleError() error instead of original task error
.detail("TaskUID", task->key.printable())
.detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable())
.detail("Priority", task->getPriority())
.error(e); // output handleError() error instead of original task error
.detail("Priority", task->getPriority());
}
}
@ -429,7 +429,7 @@ public:
getTasks.clear();
for(int i = 0, imax = std::min<unsigned int>(getBatchSize, availableSlots.size()); i < imax; ++i)
getTasks.push_back(taskBucket->getOne(cx));
Void _ = wait(waitForAllReady(getTasks));
wait(waitForAllReady(getTasks));
bool done = false;
for(int i = 0; i < getTasks.size(); ++i) {
@ -460,7 +460,7 @@ public:
Future<Void> w = ready(waitForAny(tasks));
if(!availableSlots.empty())
w = w || delay(*pollDelay * (0.9 + g_random->random01() / 5)); // Jittered by 20 %, so +/- 10%
Void _ = wait(w);
wait(w);
// Check all of the task slots, any that are finished should be replaced with Never() and their slots added back to availableSlots
for(int i = 0; i < tasks.size(); ++i) {
@ -480,11 +480,11 @@ public:
Optional<Value> pausedVal = wait(tr->get(taskBucket->pauseKey));
paused->set(pausedVal.present());
state Future<Void> watchPausedFuture = tr->watch(taskBucket->pauseKey);
Void _ = wait(tr->commit());
Void _ = wait(watchPausedFuture);
wait(tr->commit());
wait(watchPausedFuture);
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
}
@ -495,10 +495,10 @@ public:
loop {
while(paused->get()) {
Void _ = wait(paused->onChange() || watchPausedFuture);
wait(paused->onChange() || watchPausedFuture);
}
Void _ = wait(dispatch(cx, taskBucket, futureBucket, pollDelay, maxConcurrentTasks) || paused->onChange() || watchPausedFuture);
wait(dispatch(cx, taskBucket, futureBucket, pollDelay, maxConcurrentTasks) || paused->onChange() || watchPausedFuture);
}
}
@ -597,11 +597,11 @@ public:
Optional<Value> val = wait(tr->get(taskBucket->active.key()));
startingValue = val;
Void _ = wait(tr->commit());
wait(tr->commit());
break;
}
catch (Error &e) {
Void _ = wait(tr->onError(e));
wait(tr->onError(e));
}
}
@ -612,7 +612,7 @@ public:
try {
taskBucket->setOptions(tr);
Void _ = wait(delay(CLIENT_KNOBS->TASKBUCKET_CHECK_ACTIVE_DELAY));
wait(delay(CLIENT_KNOBS->TASKBUCKET_CHECK_ACTIVE_DELAY));
bool isActiveKey = wait(getActiveKey(tr, taskBucket, startingValue));
if (isActiveKey) {
TEST(true); // checkActive return true
@ -620,7 +620,7 @@ public:
}
break;
} catch( Error &e ) {
Void _ = wait( tr->onError(e) );
wait( tr->onError(e) );
}
}
}
@ -721,7 +721,7 @@ public:
taskBucket->setOptions(tr);
// First make sure it's safe to keep running
Void _ = wait(taskBucket->keepRunning(tr, task));
wait(taskBucket->keepRunning(tr, task));
// This is where the task definition currently exists
@ -980,7 +980,7 @@ public:
tr->clear(taskFuture->blocks.pack(StringRef()));
Void _ = wait(_join(tr, taskBucket, taskFuture, vectorFuture));
wait(_join(tr, taskBucket, taskFuture, vectorFuture));
return Void();
}
@ -997,7 +997,7 @@ public:
onSetFutures.push_back( vectorFuture[i]->onSet(tr, taskBucket, task) );
}
Void _ = wait( waitForAll(onSetFutures) );
wait( waitForAll(onSetFutures) );
return Void();
}
@ -1019,7 +1019,7 @@ public:
if (is_set) {
TEST(true); // is_set == true
Void _ = wait(performAction(tr, taskBucket, taskFuture, task));
wait(performAction(tr, taskBucket, taskFuture, task));
}
else {
TEST(true); // is_set == false
@ -1037,7 +1037,7 @@ public:
tr->clear(taskFuture->blocks.range());
Void _ = wait(performAllActions(tr, taskBucket, taskFuture));
wait(performAllActions(tr, taskBucket, taskFuture));
return Void();
}
@ -1048,7 +1048,7 @@ public:
if (task && TaskFuncBase::isValidTask(task)) {
Reference<TaskFuncBase> taskFunc = TaskFuncBase::create(task->params[Task::reservedTaskParamKeyType]);
if (taskFunc.getPtr()) {
Void _ = wait(taskFunc->finish(tr, taskBucket, taskFuture->futureBucket, task));
wait(taskFunc->finish(tr, taskBucket, taskFuture->futureBucket, task));
}
}
@ -1082,7 +1082,7 @@ public:
actions.push_back(performAction(tr, taskBucket, taskFuture, task));
}
Void _ = wait(waitForAll(actions));
wait(waitForAll(actions));
return Void();
}
@ -1092,7 +1092,7 @@ public:
task->params[Task::reservedTaskParamKeyAddTask] = task->params[Task::reservedTaskParamKeyType];
task->params[Task::reservedTaskParamKeyType] = LiteralStringRef("AddTask");
Void _ = wait(onSet(tr, taskBucket, taskFuture, task));
wait(onSet(tr, taskBucket, taskFuture, task));
return Void();
}
@ -1112,7 +1112,7 @@ public:
task->params[Task::reservedTaskParamValidKey] = validationKey;
task->params[Task::reservedTaskParamValidValue] = validationValue.get();
Void _ = wait(onSetAddTask(tr, taskBucket, taskFuture, task));
wait(onSetAddTask(tr, taskBucket, taskFuture, task));
return Void();
}
@ -1132,7 +1132,7 @@ public:
std::vector<Reference<TaskFuture>> vectorFuture;
state Reference<TaskFuture> future = taskFuture->futureBucket->future(tr);
vectorFuture.push_back(future);
Void _ = wait(join(tr, taskBucket, taskFuture, vectorFuture));
wait(join(tr, taskBucket, taskFuture, vectorFuture));
return future;
}
};

View File

@ -65,17 +65,17 @@ Future<Reference<IDatabase>> threadSafeCreateDatabase( Database db ) {
return Reference<IDatabase>(new ThreadSafeDatabase(db.getPtr()));
}
ACTOR Future<Reference<IDatabase>> threadSafeCreateDatabase( Cluster* cluster, Standalone<StringRef> name ) {
Database db = wait( cluster->createDatabase(name) );
ACTOR Future<Reference<IDatabase>> threadSafeCreateDatabase( Cluster* cluster ) {
Database db = wait( cluster->createDatabase() );
Reference<IDatabase> threadSafeDb = wait(threadSafeCreateDatabase(db));
return threadSafeDb;
}
ThreadFuture<Reference<IDatabase>> ThreadSafeCluster::createDatabase( Standalone<StringRef> dbName ) {
ThreadFuture<Reference<IDatabase>> ThreadSafeCluster::createDatabase() {
Cluster* cluster = this->cluster;
return onMainThread( [cluster, dbName](){
return onMainThread( [cluster](){
cluster->checkDeferredError();
return threadSafeCreateDatabase(cluster, dbName);
return threadSafeCreateDatabase(cluster);
} );
}

View File

@ -33,7 +33,7 @@ class ThreadSafeCluster : public ICluster, public ThreadSafeReferenceCounted<Thr
public:
static ThreadFuture<Reference<ICluster>> create( std::string connFilename, int apiVersion = -1 );
~ThreadSafeCluster();
ThreadFuture<Reference<IDatabase>> createDatabase( Standalone<StringRef> dbName );
ThreadFuture<Reference<IDatabase>> createDatabase();
void setOption( FDBClusterOptions::Option option, Optional<StringRef> value = Optional<StringRef>() );

View File

@ -28,6 +28,7 @@
#define FDBCLIENT_VERSIONEDMAP_ACTOR_H
#include "flow/flow.h"
#include "flow/actorcompiler.h" // This must be the last #include.
ACTOR template <class Tree>
Future<Void> deferredCleanupActor( std::vector<Tree> toFree, int taskID = 7000 ) {
@ -42,10 +43,11 @@ Future<Void> deferredCleanupActor( std::vector<Tree> toFree, int taskID = 7000 )
}
if(++freeCount % 100 == 0)
Void _ = wait( yield(taskID) );
wait( yield(taskID) );
}
return Void();
}
#endif
#include "flow/unactorcompiler.h"
#endif

View File

@ -65,6 +65,7 @@
<ClInclude Include="ReadYourWrites.h" />
<ActorCompiler Include="RunTransaction.actor.h" />
<ClInclude Include="RYWIterator.h" />
<ClInclude Include="Schemas.h" />
<ClInclude Include="SnapshotCache.h" />
<ClInclude Include="Status.h" />
<ClInclude Include="StatusClient.h" />
@ -80,6 +81,7 @@
<ClInclude Include="WriteMap.h" />
<ClInclude Include="Subspace.h" />
<ClInclude Include="Tuple.h" />
<ClInclude Include="JsonBuilder.h" />
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="FailureMonitorClient.actor.cpp" />
@ -97,11 +99,13 @@
<ActorCompiler Include="MultiVersionTransaction.actor.cpp" />
<ClCompile Include="RYWIterator.cpp" />
<ActorCompiler Include="StatusClient.actor.cpp" />
<ClCompile Include="Schemas.cpp" />
<ClCompile Include="SystemData.cpp" />
<ActorCompiler Include="ThreadSafeTransaction.actor.cpp" />
<ActorCompiler Include="TaskBucket.actor.cpp" />
<ClCompile Include="Subspace.cpp" />
<ClCompile Include="Tuple.cpp" />
<ClCompile Include="JsonBuilder.cpp" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGUID>{E2939DAA-238E-4970-96C4-4C57980F93BD}</ProjectGUID>

View File

@ -53,7 +53,7 @@ description is not currently required but encouraged.
description="Set internal tuning or debugging knobs"/>
<Option name="TLS_plugin" code="41"
paramType="String" paramDescription="file path or linker-resolved name"
description="Set the TLS plugin to load. This option, if used, must be set before any other TLS options" />
description="Deprecated" />
<Option name="TLS_cert_bytes" code="42"
paramType="Bytes" paramDescription="certificates"
description="Set the certificate chain" />

View File

@ -160,7 +160,7 @@ ACTOR Future<int> actorFuzz6( FutureStream<int> inputStream, PromiseStream<int>
outputStream.send( 228205 );
state int i213703; for(i213703 = 0; i213703 < 5; i213703++) {
outputStream.send( 686961 );
Void _ = wait( error ); // throw operation_failed()
wait( error ); // throw operation_failed()
outputStream.send( 453057 );
}
outputStream.send( 318329 );
@ -360,7 +360,7 @@ ACTOR Future<int> actorFuzz19( FutureStream<int> inputStream, PromiseStream<int>
outputStream.send( 507597 );
if ( (++ifstate&1) == 0 ) {
outputStream.send( 165706 );
Void _ = wait( error ); // throw operation_failed()
wait( error ); // throw operation_failed()
outputStream.send( 517934 );
} else {
outputStream.send( 310975 );

View File

@ -18,9 +18,9 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include "flow/flow.h"
#include <vector>
#include "flow/actorcompiler.h"
using std::vector;
@ -40,3 +40,5 @@ bool testFuzzActor( Future<int>(*actor)(FutureStream<int> const&, PromiseStream<
// This is defined by ActorFuzz.actor.cpp (generated by actorFuzz.py)
// Returns (tests passed, tests total)
std::pair<int,int> actorFuzzTests();
#include "flow/unactorcompiler.h"

View File

@ -39,7 +39,7 @@ ACTOR Future<Void> sendStuff(int id, Reference<IRateControl> t, int bytes) {
state int total = 0;
while(total < bytes) {
state int r = std::min<int>(g_random->randomInt(0,1000), bytes - total);
Void _ = wait(t->getAllowance(r));
wait(t->getAllowance(r));
total += r;
}
double dur = timer() - ts;
@ -69,7 +69,7 @@ TEST_CASE("backup/throttling") {
s = 5000;
f.push_back(sendStuff(id++, t, s)); total += s;
Void _ = wait(waitForAll(f));
wait(waitForAll(f));
double dur = timer() - ts;
int speed = int(total / dur);
printf("Speed limit was %d, measured speed was %d\n", limit, speed);

View File

@ -37,10 +37,11 @@
#include "BlobStore.h"
#include "md5/md5.h"
#include "libb64/encode.h"
#include "flow/actorcompiler.h" // This must be the last #include.
ACTOR template<typename T> static Future<T> joinErrorGroup(Future<T> f, Promise<Void> p) {
try {
Void _ = wait(success(f) || p.getFuture());
wait(success(f) || p.getFuture());
return f.get();
} catch(Error &e) {
if(p.canBeSet())
@ -106,7 +107,7 @@ public:
data = (const uint8_t *)data + finishlen;
// End current part (and start new one)
Void _ = wait(f->endCurrentPart(f.getPtr(), true));
wait(f->endCurrentPart(f.getPtr(), true));
p = f->m_parts.back().getPtr();
}
@ -140,12 +141,12 @@ public:
if(f->m_parts.size() == 1) {
Reference<Part> part = f->m_parts.back();
part->finalizeMD5();
Void _ = wait(f->m_bstore->writeEntireFileFromBuffer(f->m_bucket, f->m_object, &part->content, part->length, part->md5string));
wait(f->m_bstore->writeEntireFileFromBuffer(f->m_bucket, f->m_object, &part->content, part->length, part->md5string));
return Void();
}
// There are at least 2 parts. End the last part (which could be empty)
Void _ = wait(f->endCurrentPart(f));
wait(f->endCurrentPart(f));
state BlobStoreEndpoint::MultiPartSetT partSet;
state std::vector<Reference<Part>>::iterator p;
@ -158,7 +159,7 @@ public:
}
// No need to wait for the upload ID here because the above loop waited for all the parts and each part required the upload ID so it is ready
Void _ = wait(f->m_bstore->finishMultiPartUpload(f->m_bucket, f->m_object, f->m_upload_id.get(), partSet));
wait(f->m_bstore->finishMultiPartUpload(f->m_bucket, f->m_object, f->m_upload_id.get(), partSet));
return Void();
}
@ -220,7 +221,7 @@ private:
return Void();
// Wait for an upload slot to be available
Void _ = wait(f->m_concurrentUploads.take());
wait(f->m_concurrentUploads.take());
// Do the upload, and if it fails forward errors to m_error and also stop if anything else sends an error to m_error
// Also, hold a releaser for the concurrent upload slot while all that is going on.
@ -291,4 +292,5 @@ public:
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -164,7 +164,7 @@ void AsyncFileCached::releaseZeroCopy( void* data, int length, int64_t offset )
}
}
Future<Void> AsyncFileCached::truncate_impl( int64_t size ) {
Future<Void> AsyncFileCached::changeFileSize( int64_t size ) {
++countFileCacheWrites;
++countCacheWrites;
@ -214,7 +214,11 @@ Future<Void> AsyncFileCached::truncate_impl( int64_t size ) {
++p;
}
return truncate_underlying( this, size, waitForAll( actors ) );
// Wait for the page truncations to finish, then truncate the underlying file
// Template types are being provided explicitly because they can't be automatically deduced for some reason.
return mapAsync<Void, std::function<Future<Void>(Void)>, Void>(waitForAll(actors), [=](Void _) -> Future<Void> {
return uncached->truncate(size);
});
}
Future<Void> AsyncFileCached::flush() {

View File

@ -32,6 +32,7 @@
#include "flow/Knobs.h"
#include "flow/TDMetric.actor.h"
#include "flow/network.h"
#include "flow/actorcompiler.h" // This must be the last #include.
struct EvictablePage {
void* data;
@ -122,7 +123,7 @@ public:
// If there is a truncate in progress before the the write position then we must
// wait for it to complete.
if(length + offset > self->currentTruncateSize)
Void _ = wait(self->currentTruncate);
wait(self->currentTruncate);
++self->countFileCacheWrites;
++self->countCacheWrites;
Future<Void> f = read_write_impl(self, const_cast<void*>(data), length, offset, true);
@ -130,8 +131,8 @@ public:
++self->countFileCacheWritesBlocked;
++self->countCacheWritesBlocked;
}
Void r = wait(f);
return r;
wait(f);
return Void();
}
virtual Future<Void> write( void const* data, int length, int64_t offset ) {
@ -141,26 +142,21 @@ public:
virtual Future<Void> readZeroCopy( void** data, int* length, int64_t offset );
virtual void releaseZeroCopy( void* data, int length, int64_t offset );
Future<Void> truncate_impl( int64_t size );
// Enforces ordering of truncates and maintains currentTruncate and currentTruncateSize
// so writers can wait behind truncates that would affect them.
ACTOR static Future<Void> wait_then_truncate(AsyncFileCached *self, int64_t size) {
Void _ = wait(self->currentTruncate);
self->currentTruncateSize = size;
self->currentTruncate = self->truncate_impl(size);
Void _ = wait(self->currentTruncate);
return Void();
}
// This waits for previously started truncates to finish and then truncates
virtual Future<Void> truncate( int64_t size ) {
return wait_then_truncate(this, size);
return truncate_impl(this, size);
}
// This is the 'real' truncate that does the actual removal of cache blocks and then shortens the file
Future<Void> changeFileSize( int64_t size );
ACTOR Future<Void> truncate_underlying( AsyncFileCached* self, int64_t size, Future<Void> truncates ) {
Void _ = wait( truncates );
Void _ = wait( self->uncached->truncate( size ) );
// This wrapper for the actual truncation operation enforces ordering of truncates.
// It maintains currentTruncate and currentTruncateSize so writers can wait behind truncates that would affect them.
ACTOR static Future<Void> truncate_impl(AsyncFileCached *self, int64_t size) {
wait(self->currentTruncate);
self->currentTruncateSize = size;
self->currentTruncate = self->changeFileSize(size);
wait(self->currentTruncate);
return Void();
}
@ -284,8 +280,8 @@ private:
Future<Void> quiesce();
ACTOR static Future<Void> waitAndSync( AsyncFileCached* self, Future<Void> flush ) {
Void _ = wait( flush );
Void _ = wait( self->uncached->sync() );
wait( flush );
wait( self->uncached->sync() );
return Void();
}
@ -341,7 +337,7 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
}
ACTOR static Future<Void> waitAndWrite( AFCPage* self, void const* data, int length, int offset ) {
Void _ = wait( self->notReading );
wait( self->notReading );
memcpy( static_cast<uint8_t*>(self->data) + offset, data, length );
return Void();
}
@ -385,7 +381,7 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
}
ACTOR static Future<Void> waitAndRead( AFCPage* self, void* data, int length, int offset ) {
Void _ = wait( self->notReading );
wait( self->notReading );
memcpy( data, static_cast<uint8_t const*>(self->data) + offset, length );
return Void();
}
@ -415,7 +411,7 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
++self->writeThroughCount;
self->updateFlushableIndex();
Void _ = wait( self->notReading && self->notFlushing );
wait( self->notReading && self->notFlushing );
if (dirty) {
if ( self->pageOffset + self->pageCache->pageSize > self->owner->length ) {
@ -425,7 +421,7 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
auto f = self->owner->uncached->write( self->data, self->pageCache->pageSize, self->pageOffset );
Void _ = wait( f );
wait( f );
}
}
catch(Error& e) {
@ -482,7 +478,7 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
}
ACTOR static Future<Void> truncate_impl( AFCPage* self ) {
Void _ = wait( self->notReading && self->notFlushing && yield() );
wait( self->notReading && self->notFlushing && yield() );
delete self;
return Void();
}
@ -536,4 +532,5 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
int zeroCopyRefCount; // references held by "zero-copy" reads
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -31,14 +31,15 @@
#elif !defined(FLOW_ASYNCFILEEIO_ACTOR_H)
#define FLOW_ASYNCFILEEIO_ACTOR_H
#include <fcntl.h>
#include <sys/stat.h>
#include "eio.h"
#include "flow/flow.h"
#include "flow/ThreadHelper.actor.h"
#include "IAsyncFile.h"
#include "flow/TDMetric.actor.h"
#include <fcntl.h>
#include <sys/stat.h>
#include "flow/actorcompiler.h" // This must be the last #include.
class AsyncFileEIO : public IAsyncFile, public ReferenceCounted<AsyncFileEIO> {
@ -75,7 +76,7 @@ public:
state Promise<Void> p;
state eio_req* r = eio_open( open_filename.c_str(), openFlags(flags), mode, 0, eio_callback, &p );
try { Void _ = wait( p.getFuture() ); } catch (...) { eio_cancel(r); throw; }
try { wait( p.getFuture() ); } catch (...) { eio_cancel(r); throw; }
if (r->result < 0) {
errno = r->errorno;
bool notFound = errno == ENOENT;
@ -83,7 +84,7 @@ public:
TraceEvent(notFound ? SevWarn : SevWarnAlways, "FileOpenError").error(e).GetLastError().detail("File", filename).detail("Flags", flags).detail("Mode", mode);
throw e;
}
TraceEvent("AsyncFileOpened").detail("Filename", filename).detail("Fd", r->result).detail("Flags", flags).suppressFor(1.0);
TraceEvent("AsyncFileOpened").suppressFor(1.0).detail("Filename", filename).detail("Fd", r->result).detail("Flags", flags);
if ((flags & OPEN_LOCK) && !lock_fd(r->result)) {
TraceEvent(SevError, "UnableToLockFile").detail("Filename", filename).GetLastError();
@ -101,6 +102,11 @@ public:
return Void();
}
ACTOR static Future<std::time_t> lastWriteTime( std::string filename ) {
EIO_STRUCT_STAT statdata = wait(stat_impl(filename));
return statdata.st_mtime;
}
virtual void addref() { ReferenceCounted<AsyncFileEIO>::addref(); }
virtual void delref() { ReferenceCounted<AsyncFileEIO>::delref(); }
@ -139,15 +145,7 @@ public:
virtual Future<int64_t> size() {
++countFileLogicalReads;
++countLogicalReads;
struct stat buf;
if (fstat( fd, &buf )) {
TraceEvent("AsyncFileEIOFStatError").detail("Fd",fd).GetLastError();
return io_error();
}
return buf.st_size;
//return size_impl(fd);
return size_impl(fd);
}
virtual std::string getFilename() {
return filename;
@ -160,7 +158,7 @@ public:
if (folderFD<0)
throw io_error();
try {
Void _ = wait( async_fsync(folderFD) ); // not sure if fdatasync on the folder has the same effect
wait( async_fsync(folderFD) ); // not sure if fdatasync on the folder has the same effect
} catch (...) {
close(folderFD);
throw;
@ -179,7 +177,7 @@ public:
}
ACTOR static Future<Void> waitAndAtomicRename( Future<Void> fsync, std::string part_filename, std::string final_filename ) {
// First wait for the data in the part file to be durable
Void _ = wait(fsync);
wait(fsync);
// rename() is atomic
if (rename( part_filename.c_str(), final_filename.c_str() )) {
@ -188,7 +186,7 @@ public:
}
// fsync the parent directory to make it durable as well
Void _ = wait( async_fsync_parent(final_filename) );
wait( async_fsync_parent(final_filename) );
return Void();
}
@ -254,7 +252,7 @@ private:
static void error( const char* context, int fd, eio_req* r, Reference<ErrorInfo> const& err = Reference<ErrorInfo>() ) {
Error e = io_error();
errno = r->errorno;
TraceEvent(context).detail("Fd", fd).detail("Result", r->result).GetLastError().error(e);
TraceEvent(context).error(e).detail("Fd", fd).detail("Result", r->result).GetLastError();
if (err) err->set(e);
else throw e;
}
@ -262,9 +260,9 @@ private:
ACTOR static void close_impl( int fd ) {
state Promise<Void> p;
state eio_req* r = eio_close(fd, 0, eio_callback, &p);
Void _ = wait( p.getFuture() );
wait( p.getFuture() );
if (r->result) error( "CloseError", fd, r );
TraceEvent("AsyncFileClosed").detail("Fd", fd).suppressFor(1.0);
TraceEvent("AsyncFileClosed").suppressFor(1.0).detail("Fd", fd);
}
ACTOR static Future<int> read_impl( int fd, void* data, int length, int64_t offset ) {
@ -272,7 +270,7 @@ private:
state Promise<Void> p;
//fprintf(stderr, "eio_read (fd=%d length=%d offset=%lld)\n", fd, length, offset);
state eio_req* r = eio_read(fd, data, length, offset, 0, eio_callback, &p);
try { Void _ = wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
try {
state int result = r->result;
//printf("eio read: %d/%d\n", r->result, length);
@ -280,12 +278,12 @@ private:
error("ReadError", fd, r);
throw internal_error();
} else {
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
return result;
}
} catch( Error &_e ) {
state Error e = _e;
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
throw e;
}
}
@ -294,9 +292,9 @@ private:
state int taskID = g_network->getCurrentTask();
state Promise<Void> p;
state eio_req* r = eio_write(fd, (void*)data.begin(), data.size(), offset, 0, eio_callback, &p);
try { Void _ = wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
if (r->result != data.size()) error("WriteError", fd, r, err);
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
return Void();
}
@ -304,9 +302,9 @@ private:
state int taskID = g_network->getCurrentTask();
state Promise<Void> p;
state eio_req* r = eio_ftruncate(fd, size, 0, eio_callback, &p);
try { Void _ = wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
if (r->result) error("TruncateError", fd, r, err);
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
return Void();
}
@ -336,17 +334,17 @@ private:
state Promise<Void> p;
state eio_req* r = start_fsync( fd, p, sync_metadata );
try { Void _ = wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
try {
// Report any errors from prior write() or truncate() calls
err->report();
if (r->result) error("SyncError", fd, r);
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
return Void();
} catch( Error &_e ) {
state Error e = _e;
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
throw e;
}
}
@ -355,16 +353,28 @@ private:
state int taskID = g_network->getCurrentTask();
state Promise<Void> p;
state eio_req* r = eio_fstat( fd, 0, eio_callback, &p );
try { Void _ = wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
if (r->result) error("StatError", fd, r);
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
if (r->result) error("FStatError", fd, r);
EIO_STRUCT_STAT *statdata = (EIO_STRUCT_STAT *)r->ptr2;
if (!statdata) error("StatBufferError", fd, r);
if (!statdata) error("FStatBufferError", fd, r);
state int64_t size = statdata->st_size;
free(statdata);
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
return size;
}
ACTOR static Future<EIO_STRUCT_STAT> stat_impl( std::string filename ) {
state int taskID = g_network->getCurrentTask();
state Promise<Void> p;
state EIO_STRUCT_STAT statdata;
state eio_req* r = eio_stat( filename.c_str(), 0, eio_callback, &p );
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
if (r->result) error("StatError", 0, r);
if (!r->ptr2) error("StatBufferError", 0, r);
statdata = *EIO_STAT_BUF(r);
wait( delay (0, taskID) );
return statdata;
}
ACTOR template <class R> static Future<R> dispatch_impl( std::function<R()> func) {
state Dispatch<R> data( func );
state int taskID = g_network->getCurrentTask();
@ -390,9 +400,9 @@ private:
p.send(Void());
return 0;
}, &data);
try { Void _ = wait( data.done.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
try { wait( data.done.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
Void _ = wait( delay(0, taskID) );
wait( delay(0, taskID) );
if (data.result.isError()) throw data.result.getError();
return data.result.get();
}
@ -401,7 +411,7 @@ private:
ACTOR static void poll_eio() {
while (eio_poll() == -1)
Void _ = wait( yield() );
wait( yield() );
want_poll = 0;
}
@ -430,5 +440,6 @@ private:
volatile int32_t AsyncFileEIO::want_poll = 0;
#endif
#include "flow/unactorcompiler.h"
#endif
#endif

View File

@ -39,6 +39,7 @@
#include <stdio.h>
#include "flow/Hash3.h"
#include "flow/genericactors.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// Set this to true to enable detailed KAIO request logging, which currently is written to a hardcoded location /data/v7/fdb/
#define KAIO_LOGGING 0
@ -112,8 +113,8 @@ public:
Error e = errno==ENOENT ? file_not_found() : io_error();
int ecode = errno; // Save errno in case it is modified before it is used below
TraceEvent ev("AsyncFileKAIOOpenFailed");
ev.detail("Filename", filename).detailf("Flags", "%x", flags)
.detailf("OSFlags", "%x", openFlags(flags) | O_DIRECT).detailf("Mode", "0%o", mode).error(e).GetLastError();
ev.error(e).detail("Filename", filename).detailf("Flags", "%x", flags)
.detailf("OSFlags", "%x", openFlags(flags) | O_DIRECT).detailf("Mode", "0%o", mode).GetLastError();
if(ecode == EINVAL)
ev.detail("Description", "Invalid argument - Does the target filesystem support KAIO?");
return e;
@ -290,7 +291,7 @@ public:
}
ACTOR static Future<Void> throwErrorIfFailed( Reference<AsyncFileKAIO> self, Future<Void> sync ) {
Void _ = wait( sync );
wait( sync );
if(self->failed) {
throw io_timeout();
}
@ -466,7 +467,7 @@ private:
int getTask() const { return (prio>>32)+1; }
ACTOR static void deliver( Promise<int> result, bool failed, int r, int task ) {
Void _ = wait( delay(0, task) );
wait( delay(0, task) );
if (failed) result.sendError(io_timeout());
else if (r < 0) result.sendError(io_error());
else result.send(r);
@ -640,7 +641,7 @@ private:
loop {
int64_t evfd_count = wait( ev->read() );
Void _ = wait(delay(0, TaskDiskIOComplete));
wait(delay(0, TaskDiskIOComplete));
linux_ioresult ev[FLOW_KNOBS->MAX_OUTSTANDING];
timespec tm; tm.tv_sec = 0; tm.tv_nsec = 0;
@ -768,7 +769,7 @@ ACTOR Future<Void> runTestOps(Reference<IAsyncFile> f, int numIterations, int fi
state int fIndex = 0;
for(; fIndex < futures.size(); ++fIndex) {
try {
Void _ = wait(futures[fIndex]);
wait(futures[fIndex]);
}
catch(Error &e) {
ASSERT(!expectedToSucceed);
@ -778,7 +779,7 @@ ACTOR Future<Void> runTestOps(Reference<IAsyncFile> f, int numIterations, int fi
}
try {
Void _ = wait(f->sync() && delay(0.1));
wait(f->sync() && delay(0.1));
ASSERT(expectedToSucceed);
}
catch(Error &e) {
@ -797,32 +798,32 @@ TEST_CASE("fdbrpc/AsyncFileKAIO/RequestList") {
try {
state Reference<IAsyncFile> f = wait(AsyncFileKAIO::open("/tmp/__KAIO_TEST_FILE__", IAsyncFile::OPEN_UNBUFFERED | IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_CREATE, 0666, nullptr));
state int fileSize = 2<<27; // ~100MB
Void _ = wait(f->truncate(fileSize));
wait(f->truncate(fileSize));
// Test that the request list works as intended with default timeout
AsyncFileKAIO::setTimeout(0.0);
Void _ = wait(runTestOps(f, 100, fileSize, true));
wait(runTestOps(f, 100, fileSize, true));
ASSERT(!((AsyncFileKAIO*)f.getPtr())->failed);
// Test that the request list works as intended with long timeout
AsyncFileKAIO::setTimeout(20.0);
Void _ = wait(runTestOps(f, 100, fileSize, true));
wait(runTestOps(f, 100, fileSize, true));
ASSERT(!((AsyncFileKAIO*)f.getPtr())->failed);
// Test that requests timeout correctly
AsyncFileKAIO::setTimeout(0.0001);
Void _ = wait(runTestOps(f, 10, fileSize, false));
wait(runTestOps(f, 10, fileSize, false));
ASSERT(((AsyncFileKAIO*)f.getPtr())->failed);
}
catch(Error &e) {
state Error err = e;
if(f) {
Void _ = wait(AsyncFileEIO::deleteFile(f->getFilename(), true));
wait(AsyncFileEIO::deleteFile(f->getFilename(), true));
}
throw err;
}
Void _ = wait(AsyncFileEIO::deleteFile(f->getFilename(), true));
wait(AsyncFileEIO::deleteFile(f->getFilename(), true));
}
return Void();
@ -830,5 +831,6 @@ TEST_CASE("fdbrpc/AsyncFileKAIO/RequestList") {
AsyncFileKAIO::Context AsyncFileKAIO::ctx;
#include "flow/unactorcompiler.h"
#endif
#endif

View File

@ -23,13 +23,13 @@
std::map<std::string, Future<Void>> AsyncFileNonDurable::filesBeingDeleted;
ACTOR Future<Void> sendOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, int taskID ) {
Void _ = wait( g_simulator.onProcess( process, taskID ) );
wait( g_simulator.onProcess( process, taskID ) );
promise.send(Void());
return Void();
}
ACTOR Future<Void> sendErrorOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, Error e, int taskID ) {
Void _ = wait( g_simulator.onProcess( process, taskID ) );
wait( g_simulator.onProcess( process, taskID ) );
promise.sendError(e);
return Void();
}

View File

@ -33,6 +33,7 @@
#include "simulator.h"
#include "TraceFileIO.h"
#include "RangeMap.h"
#include "flow/actorcompiler.h" // This must be the last #include.
#undef max
#undef min
@ -201,9 +202,9 @@ public:
state Future<Void> shutdown = success(currentProcess->shutdownSignal.getFuture());
//TraceEvent("AsyncFileNonDurableOpenBegin").detail("Filename", filename).detail("Addr", g_simulator.getCurrentProcess()->address);
Void _ = wait( g_simulator.onMachine( currentProcess ) );
wait( g_simulator.onMachine( currentProcess ) );
try {
Void _ = wait(success(wrappedFile) || shutdown);
wait(success(wrappedFile) || shutdown);
if(shutdown.isReady())
throw io_error().asInjectedFault();
@ -214,7 +215,7 @@ public:
state std::map<std::string, Future<Void>>::iterator deletedFile = filesBeingDeleted.find(filename);
if(deletedFile != filesBeingDeleted.end()) {
//TraceEvent("AsyncFileNonDurableOpenWaitOnDelete1").detail("Filename", filename);
Void _ = wait( deletedFile->second || shutdown );
wait( deletedFile->second || shutdown );
//TraceEvent("AsyncFileNonDurableOpenWaitOnDelete2").detail("Filename", filename);
if(shutdown.isReady())
throw io_error().asInjectedFault();
@ -224,22 +225,22 @@ public:
//Causes the approximateSize member to be set
state Future<int64_t> sizeFuture = nonDurableFile->size();
Void _ = wait(success(sizeFuture) || shutdown);
wait(success(sizeFuture) || shutdown);
if(shutdown.isReady())
throw io_error().asInjectedFault();
//TraceEvent("AsyncFileNonDurableOpenComplete").detail("Filename", filename);
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
return nonDurableFile;
} catch( Error &e ) {
state Error err = e;
std::string currentFilename = ( wrappedFile.isReady() && !wrappedFile.isError() ) ? wrappedFile.get()->getFilename() : actualFilename;
currentProcess->machine->openFiles.erase( currentFilename );
//TraceEvent("AsyncFileNonDurableOpenError").detail("Filename", filename).detail("Address", currentProcess->address).error(e, true).detail("Addr", g_simulator.getCurrentProcess()->address);
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
//TraceEvent("AsyncFileNonDurableOpenError").error(e, true).detail("Filename", filename).detail("Address", currentProcess->address).detail("Addr", g_simulator.getCurrentProcess()->address);
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
throw err;
}
}
@ -360,7 +361,7 @@ private:
ACTOR Future<Void> checkKilled(AsyncFileNonDurable *self, std::string context) {
if(self->killed.isSet()) {
//TraceEvent("AsyncFileNonDurable_KilledInCheck", self->id).detail("In", context).detail("Filename", self->filename);
Void _ = wait(self->killComplete.getFuture());
wait(self->killComplete.getFuture());
TraceEvent("AsyncFileNonDurable_KilledFileOperation", self->id).detail("In", context).detail("Filename", self->filename);
TEST(true); // AsyncFileNonDurable operation killed
throw io_error().asInjectedFault();
@ -371,14 +372,14 @@ private:
//Passes along reads straight to the underlying file, waiting for any outstanding changes that could affect the results
ACTOR Future<int> onRead(AsyncFileNonDurable *self, void *data, int length, int64_t offset) {
Void _ = wait(self->checkKilled(self, "Read"));
wait(self->checkKilled(self, "Read"));
vector<Future<Void>> priorModifications = self->getModificationsAndInsert(offset, length);
Void _ = wait(waitForAll(priorModifications));
wait(waitForAll(priorModifications));
state Future<int> readFuture = self->file->read(data, length, offset);
Void _ = wait( success( readFuture ) || self->killed.getFuture() );
wait( success( readFuture ) || self->killed.getFuture() );
// throws if we were killed
Void _ = wait(self->checkKilled(self, "ReadEnd"));
wait(self->checkKilled(self, "ReadEnd"));
debugFileCheck("AsyncFileNonDurableRead", self->filename, data, offset, length);
@ -391,16 +392,16 @@ private:
ACTOR Future<int> read(AsyncFileNonDurable *self, void *data, int length, int64_t offset) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state int currentTaskID = g_network->getCurrentTask();
Void _ = wait( g_simulator.onMachine( currentProcess ) );
wait( g_simulator.onMachine( currentProcess ) );
try {
state int rep = wait( self->onRead( self, data, length, offset ) );
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
return rep;
} catch( Error &e ) {
state Error err = e;
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
throw err;
}
}
@ -411,7 +412,7 @@ private:
ACTOR Future<Void> write(AsyncFileNonDurable *self, Promise<Void> writeStarted, Future<Future<Void>> ownFuture, void const* data, int length, int64_t offset) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state int currentTaskID = g_network->getCurrentTask();
Void _ = wait( g_simulator.onMachine( currentProcess ) );
wait( g_simulator.onMachine( currentProcess ) );
state double delayDuration = g_random->random01() * self->maxWriteDelay;
state Standalone<StringRef> dataCopy(StringRef((uint8_t*)data, length));
@ -420,7 +421,7 @@ private:
try {
//TraceEvent("AsyncFileNonDurable_Write", self->id).detail("Delay", delayDuration).detail("Filename", self->filename).detail("WriteLength", length).detail("Offset", offset);
Void _ = wait(self->checkKilled(self, "Write"));
wait(self->checkKilled(self, "Write"));
Future<Void> writeEnded = wait(ownFuture);
std::vector<Future<Void>> priorModifications = self->getModificationsAndInsert(offset, length, true, writeEnded);
@ -430,7 +431,7 @@ private:
else
priorModifications.push_back(waitUntilDiskReady(self->diskParameters, length) || self->killed.getFuture());
Void _ = wait(waitForAll(priorModifications));
wait(waitForAll(priorModifications));
self->approximateSize = std::max(self->approximateSize, length + offset);
@ -446,7 +447,7 @@ private:
//Wait a random amount of time or until a sync/kill is issued
state bool saveDurable = true;
choose {
when(Void _ = wait(delay(delayDuration))) { }
when(wait(delay(delayDuration))) { }
when(bool durable = wait(startSyncFuture)) {
saveDurable = durable;
}
@ -525,7 +526,7 @@ private:
}
}
Void _ = wait(waitForAll(writeFutures));
wait(waitForAll(writeFutures));
//TraceEvent("AsyncFileNonDurable_WriteDone", self->id).detail("Delay", delayDuration).detail("Filename", self->filename).detail("WriteLength", length).detail("Offset", offset);
return Void();
}
@ -535,14 +536,14 @@ private:
ACTOR Future<Void> truncate(AsyncFileNonDurable *self, Promise<Void> truncateStarted, Future<Future<Void>> ownFuture, int64_t size) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state int currentTaskID = g_network->getCurrentTask();
Void _ = wait( g_simulator.onMachine( currentProcess ) );
wait( g_simulator.onMachine( currentProcess ) );
state double delayDuration = g_random->random01() * self->maxWriteDelay;
state Future<bool> startSyncFuture = self->startSyncPromise.getFuture();
try {
//TraceEvent("AsyncFileNonDurable_Truncate", self->id).detail("Delay", delayDuration).detail("Filename", self->filename);
Void _ = wait(self->checkKilled(self, "Truncate"));
wait(self->checkKilled(self, "Truncate"));
Future<Void> truncateEnded = wait(ownFuture);
std::vector<Future<Void>> priorModifications = self->getModificationsAndInsert(size, -1, true, truncateEnded);
@ -552,7 +553,7 @@ private:
else
priorModifications.push_back(waitUntilDiskReady(self->diskParameters, 0) || self->killed.getFuture());
Void _ = wait(waitForAll(priorModifications));
wait(waitForAll(priorModifications));
self->approximateSize = size;
@ -566,19 +567,19 @@ private:
//Wait a random amount of time or until a sync/kill is issued
state bool saveDurable = true;
choose {
when(Void _ = wait(delay(delayDuration))) { }
when(wait(delay(delayDuration))) { }
when(bool durable = wait(startSyncFuture)) {
saveDurable = durable;
}
}
if(g_network->check_yield(TaskDefaultYield)) {
Void _ = wait(delay(0, TaskDefaultYield));
wait(delay(0, TaskDefaultYield));
}
//If performing a durable truncate, then pass it through to the file. Otherwise, pass it through with a 1/2 chance
if(saveDurable || self->killMode == NO_CORRUPTION || g_random->random01() < 0.5)
Void _ = wait(self->file->truncate(size));
wait(self->file->truncate(size));
else {
TraceEvent("AsyncFileNonDurable_DroppedTruncate", self->id).detail("Size", size);
TEST(true); //AsyncFileNonDurable dropped truncate
@ -596,10 +597,10 @@ private:
if(durable) {
self->hasBeenSynced = true;
Void _ = wait(waitUntilDiskReady(self->diskParameters, 0, true) || self->killed.getFuture());
wait(waitUntilDiskReady(self->diskParameters, 0, true) || self->killed.getFuture());
}
Void _ = wait(self->checkKilled(self, durable ? "Sync" : "Kill"));
wait(self->checkKilled(self, durable ? "Sync" : "Kill"));
if(!durable)
self->killed.send( Void() );
@ -634,7 +635,7 @@ private:
//Wait for outstanding writes to complete
if(durable)
Void _ = wait(allModifications);
wait(allModifications);
else
ErrorOr<Void> _ = wait(errorOr(allModifications));
@ -652,8 +653,8 @@ private:
}
//A killed file cannot be allowed to report that it successfully synced
else {
Void _ = wait(self->checkKilled(self, "SyncEnd"));
Void _ = wait(self->file->sync());
wait(self->checkKilled(self, "SyncEnd"));
wait(self->file->sync());
//TraceEvent("AsyncFileNonDurable_ImplSyncEnd", self->id).detail("Filename", self->filename).detail("Durable", durable);
}
@ -663,16 +664,16 @@ private:
ACTOR Future<Void> sync(AsyncFileNonDurable *self, bool durable) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state int currentTaskID = g_network->getCurrentTask();
Void _ = wait( g_simulator.onMachine( currentProcess ) );
wait( g_simulator.onMachine( currentProcess ) );
try {
Void _ = wait( self->onSync( self, durable ) );
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( self->onSync( self, durable ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
return Void();
} catch( Error &e ) {
state Error err = e;
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
throw err;
}
}
@ -680,11 +681,11 @@ private:
//Passes along size requests to the underlying file, augmenting with any writes past the end of the file
ACTOR Future<int64_t> onSize(AsyncFileNonDurable *self) {
//TraceEvent("AsyncFileNonDurable_Size", self->id).detail("Filename", self->filename);
Void _ = wait(self->checkKilled(self, "Size"));
wait(self->checkKilled(self, "Size"));
state Future<int64_t> sizeFuture = self->file->size();
Void _ = wait( success( sizeFuture ) || self->killed.getFuture() );
wait( success( sizeFuture ) || self->killed.getFuture() );
Void _ = wait(self->checkKilled(self, "SizeEnd"));
wait(self->checkKilled(self, "SizeEnd"));
//Include any modifications which extend past the end of the file
uint64_t maxModification = self->pendingModifications.lastItem().begin();
@ -696,16 +697,16 @@ private:
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state int currentTaskID = g_network->getCurrentTask();
Void _ = wait( g_simulator.onMachine( currentProcess ) );
wait( g_simulator.onMachine( currentProcess ) );
try {
state int64_t rep = wait( self->onSize( self ) );
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
return rep;
} catch( Error &e ) {
state Error err = e;
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
throw err;
}
}
@ -716,7 +717,7 @@ private:
state int currentTaskID = g_network->getCurrentTask();
state std::string filename = self->filename;
Void _ = wait( g_simulator.onMachine( currentProcess ) );
wait( g_simulator.onMachine( currentProcess ) );
try {
//Make sure all writes have gone through.
Promise<bool> startSyncPromise = self->startSyncPromise;
@ -730,11 +731,11 @@ private:
outstandingModifications.push_back(itr->value());
//Ignore errors here so that all modifications can finish
Void _ = wait(waitForAllReady(outstandingModifications));
wait(waitForAllReady(outstandingModifications));
//Make sure we aren't in the process of killing the file
if(self->killed.isSet())
Void _ = wait(self->killComplete.getFuture());
wait(self->killComplete.getFuture());
//Remove this file from the filesBeingDeleted map so that new files can be created with this filename
g_simulator.getMachineByNetworkAddress( self->openedAddress )->closingFiles.erase(self->getFilename());
@ -743,14 +744,15 @@ private:
//TraceEvent("AsyncFileNonDurable_FinishDelete", self->id).detail("Filename", self->filename);
delete self;
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
return Void();
} catch( Error &e ) {
state Error err = e;
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
throw err;
}
}
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -29,6 +29,7 @@
#include "flow/flow.h"
#include "IAsyncFile.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// Read-only file type that wraps another file instance, reads in large blocks, and reads ahead of the actual range requested
class AsyncFileReadAheadCache : public IAsyncFile, public ReferenceCounted<AsyncFileReadAheadCache> {
@ -45,7 +46,7 @@ public:
// Read from the underlying file to a CacheBlock
ACTOR static Future<Reference<CacheBlock>> readBlock(AsyncFileReadAheadCache *f, int length, int64_t offset) {
Void _ = wait(f->m_max_concurrent_reads.take());
wait(f->m_max_concurrent_reads.take());
state Reference<CacheBlock> block(new CacheBlock(length));
try {
@ -192,4 +193,5 @@ public:
};
#endif
#include "flow/unactorcompiler.h"
#endif

View File

@ -76,6 +76,14 @@ public:
// SOMEDAY: What is necessary to implement mustBeDurable on Windows? Does DeleteFile take care of it? DeleteFileTransacted?
return Void();
}
static Future<std::time_t> lastWriteTime( std::string filename ) {
// TODO(alexmiller): I have no idea about windows
struct _stat buf;
if (_stat( filename.c_str(), &buf ) != 0) {
throw io_error();
}
return buf.st_mtime;
}
virtual void addref() { ReferenceCounted<AsyncFileWinASIO>::addref(); }
virtual void delref() { ReferenceCounted<AsyncFileWinASIO>::delref(); }
@ -85,7 +93,7 @@ public:
static void onReadReady( Promise<int> onReady, const boost::system::error_code& error, size_t bytesRead ) {
if (error) {
Error e = io_error();
TraceEvent("AsyncReadError").GetLastError().error(e)
TraceEvent("AsyncReadError").error(e).GetLastError()
.detail("ASIOCode", error.value())
.detail("ASIOMessage", error.message());
onReady.sendError(e);
@ -96,7 +104,7 @@ public:
static void onWriteReady( Promise<Void> onReady, size_t bytesExpected, const boost::system::error_code& error, size_t bytesWritten ) {
if (error) {
Error e = io_error();
TraceEvent("AsyncWriteError").GetLastError().error(e)
TraceEvent("AsyncWriteError").error(e).GetLastError()
.detail("ASIOCode", error.value())
.detail("ASIOMessage", error.message());
onReady.sendError(e);
@ -174,4 +182,4 @@ private:
#endif
#endif
#endif

View File

@ -134,12 +134,12 @@ private:
if(history.checksum != 0 && history.checksum != checksum) {
// For reads, verify the stored sum if it is not 0. If it fails, clear it.
TraceEvent (SevError, "AsyncFileLostWriteDetected")
.error(checksum_failed())
.detail("Filename", m_f->getFilename())
.detail("PageNumber", page)
.detail("ChecksumOfPage", checksum)
.detail("ChecksumHistory", history.checksum)
.detail("LastWriteTime", history.timestamp)
.error(checksum_failed());
.detail("LastWriteTime", history.timestamp);
history.checksum = 0;
}
}

View File

@ -182,7 +182,7 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
} catch(std::string &err) {
if(error != nullptr)
*error = err;
TraceEvent(SevWarnAlways, "BlobStoreEndpointBadURL").detail("Description", err).detail("Format", getURLFormat()).detail("URL", url).suppressFor(60, true);
TraceEvent(SevWarnAlways, "BlobStoreEndpointBadURL").suppressFor(60).detail("Description", err).detail("Format", getURLFormat()).detail("URL", url);
throw backup_invalid_url();
}
}
@ -207,7 +207,7 @@ std::string BlobStoreEndpoint::getResourceURL(std::string resource) {
}
ACTOR Future<bool> objectExists_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
Void _ = wait(b->requestRateRead->getAllowance(1));
wait(b->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
@ -221,7 +221,7 @@ Future<bool> BlobStoreEndpoint::objectExists(std::string const &bucket, std::str
}
ACTOR Future<Void> deleteObject_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
Void _ = wait(b->requestRateDelete->getAllowance(1));
wait(b->requestRateDelete->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
@ -249,7 +249,7 @@ ACTOR Future<Void> deleteRecursively_impl(Reference<BlobStoreEndpoint> b, std::s
loop {
choose {
// Throw if done throws, otherwise don't stop until end_of_stream
when(Void _ = wait(done)) {
when(wait(done)) {
done = Never();
}
@ -267,7 +267,7 @@ ACTOR Future<Void> deleteRecursively_impl(Reference<BlobStoreEndpoint> b, std::s
// This is just a precaution to avoid having too many outstanding delete actors waiting to run
while(deleteFutures.size() > CLIENT_KNOBS->BLOBSTORE_CONCURRENT_REQUESTS) {
Void _ = wait(deleteFutures.front());
wait(deleteFutures.front());
deleteFutures.pop_front();
}
}
@ -277,7 +277,7 @@ ACTOR Future<Void> deleteRecursively_impl(Reference<BlobStoreEndpoint> b, std::s
}
while(deleteFutures.size() > 0) {
Void _ = wait(deleteFutures.front());
wait(deleteFutures.front());
deleteFutures.pop_front();
}
@ -289,7 +289,7 @@ Future<Void> BlobStoreEndpoint::deleteRecursively(std::string const &bucket, std
}
ACTOR Future<Void> createBucket_impl(Reference<BlobStoreEndpoint> b, std::string bucket) {
Void _ = wait(b->requestRateWrite->getAllowance(1));
wait(b->requestRateWrite->getAllowance(1));
std::string resource = std::string("/") + bucket;
HTTP::Headers headers;
@ -302,7 +302,7 @@ Future<Void> BlobStoreEndpoint::createBucket(std::string const &bucket) {
}
ACTOR Future<int64_t> objectSize_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
Void _ = wait(b->requestRateRead->getAllowance(1));
wait(b->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
@ -341,11 +341,11 @@ ACTOR Future<Optional<json_spirit::mObject>> tryReadJSONFile(std::string path) {
if(json.type() == json_spirit::obj_type)
return json.get_obj();
else
TraceEvent(SevWarn, "BlobCredentialFileNotJSONObject").detail("File", path).suppressFor(60, true);
TraceEvent(SevWarn, "BlobCredentialFileNotJSONObject").suppressFor(60).detail("File", path);
} catch(Error &e) {
if(e.code() != error_code_actor_cancelled)
TraceEvent(SevWarn, errorEventType).detail("File", path).error(e).suppressFor(60, true);
TraceEvent(SevWarn, errorEventType).error(e).suppressFor(60).detail("File", path);
}
return Optional<json_spirit::mObject>();
@ -360,7 +360,7 @@ ACTOR Future<Void> updateSecret_impl(Reference<BlobStoreEndpoint> b) {
for(auto &f : *pFiles)
reads.push_back(tryReadJSONFile(f));
Void _ = wait(waitForAll(reads));
wait(waitForAll(reads));
std::string key = b->key + "@" + b->host;
@ -408,10 +408,9 @@ ACTOR Future<BlobStoreEndpoint::ReusableConnection> connect_impl(Reference<BlobS
// If the connection expires in the future then return it
if(rconn.expirationTime > now()) {
TraceEvent("BlobStoreEndpointReusingConnected")
TraceEvent("BlobStoreEndpointReusingConnected").suppressFor(60)
.detail("RemoteEndpoint", rconn.conn->getPeerAddress())
.detail("ExpiresIn", rconn.expirationTime - now())
.suppressFor(60, true);
.detail("ExpiresIn", rconn.expirationTime - now());
return rconn;
}
}
@ -420,13 +419,12 @@ ACTOR Future<BlobStoreEndpoint::ReusableConnection> connect_impl(Reference<BlobS
service = b->knobs.secure_connection ? "https" : "http";
state Reference<IConnection> conn = wait(INetworkConnections::net()->connect(b->host, service, b->knobs.secure_connection ? true : false));
TraceEvent("BlobStoreEndpointNewConnection")
TraceEvent("BlobStoreEndpointNewConnection").suppressFor(60)
.detail("RemoteEndpoint", conn->getPeerAddress())
.detail("ExpiresIn", b->knobs.max_connection_life)
.suppressFor(60, true);
.detail("ExpiresIn", b->knobs.max_connection_life);
if(b->lookupSecret)
Void _ = wait(b->updateSecret());
wait(b->updateSecret());
return BlobStoreEndpoint::ReusableConnection({conn, now() + b->knobs.max_connection_life});
}
@ -450,7 +448,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
headers["Content-Length"] = format("%d", contentLen);
headers["Host"] = bstore->host;
Void _ = wait(bstore->concurrentRequests.take());
wait(bstore->concurrentRequests.take());
state FlowLock::Releaser globalReleaser(bstore->concurrentRequests, 1);
state int maxTries = std::min(bstore->knobs.request_tries, bstore->knobs.connect_tries);
@ -489,7 +487,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
// when a new connection is established and setAuthHeaders() would need the updated secret.
bstore->setAuthHeaders(verb, resource, headers);
remoteAddress = rconn.conn->getPeerAddress();
Void _ = wait(bstore->requestRate->getAllowance(1));
wait(bstore->requestRate->getAllowance(1));
state Reference<HTTP::Response> r = wait(timeoutError(HTTP::doRequest(rconn.conn, verb, resource, headers, &contentCopy, contentLen, bstore->sendRate, &bstore->s_stats.bytes_sent, bstore->recvRate), bstore->knobs.request_timeout));
r->convertToJSONifXML();
@ -521,6 +519,16 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
retryable = retryable && (thisTry < maxTries);
TraceEvent event(SevWarn, retryable ? "BlobStoreEndpointRequestFailedRetryable" : "BlobStoreEndpointRequestFailed");
// Attach err to trace event if present, otherwise extract some stuff from the response
if(err.present()) {
event.error(err.get());
}
event.suppressFor(60);
if(!err.present()) {
event.detail("ResponseCode", r->code);
}
event.detail("ConnectionEstablished", connectionEstablished);
if(remoteAddress.present())
@ -530,8 +538,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
event.detail("Verb", verb)
.detail("Resource", resource)
.detail("ThisTry", thisTry)
.suppressFor(60, true);
.detail("ThisTry", thisTry);
// If r is not valid or not code 429 then increment the try count. 429's will not count against the attempt limit.
if(!r || r->code != 429)
@ -542,13 +549,6 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
// Double but limit the *next* nextRetryDelay.
nextRetryDelay = std::min(nextRetryDelay * 2, 60.0);
// Attach err to trace event if present, otherwise extract some stuff from the response
if(err.present())
event.error(err.get());
else {
event.detail("ResponseCode", r->code);
}
if(retryable) {
// If r is valid then obey the Retry-After response header if present.
if(r) {
@ -566,7 +566,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
// Log the delay then wait.
event.detail("RetryDelay", delay);
Void _ = wait(::delay(delay));
wait(::delay(delay));
}
else {
// We can't retry, so throw something.
@ -618,7 +618,7 @@ ACTOR Future<Void> listBucketStream_impl(Reference<BlobStoreEndpoint> bstore, st
state std::vector<Future<Void>> subLists;
while(more) {
Void _ = wait(bstore->concurrentLists.take());
wait(bstore->concurrentLists.take());
state FlowLock::Releaser listReleaser(bstore->concurrentLists, 1);
HTTP::Headers headers;
@ -702,18 +702,18 @@ ACTOR Future<Void> listBucketStream_impl(Reference<BlobStoreEndpoint> bstore, st
lastFile = result.commonPrefixes.back();
if(lastFile.empty()) {
TraceEvent(SevWarn, "BlobStoreEndpointListNoNextMarker").detail("Resource", fullResource).suppressFor(60, true);
TraceEvent(SevWarn, "BlobStoreEndpointListNoNextMarker").suppressFor(60).detail("Resource", fullResource);
throw backup_error();
}
}
} catch(Error &e) {
if(e.code() != error_code_actor_cancelled)
TraceEvent(SevWarn, "BlobStoreEndpointListResultParseError").detail("Resource", fullResource).error(e).suppressFor(60, true);
TraceEvent(SevWarn, "BlobStoreEndpointListResultParseError").error(e).suppressFor(60).detail("Resource", fullResource);
throw http_bad_response();
}
}
Void _ = wait(waitForAll(subLists));
wait(waitForAll(subLists));
return Void();
}
@ -736,7 +736,7 @@ ACTOR Future<BlobStoreEndpoint::ListResult> listBucket_impl(Reference<BlobStoreE
loop {
choose {
// Throw if done throws, otherwise don't stop until end_of_stream
when(Void _ = wait(done)) {
when(wait(done)) {
done = Never();
}
@ -826,7 +826,7 @@ void BlobStoreEndpoint::setAuthHeaders(std::string const &verb, std::string cons
}
ACTOR Future<std::string> readEntireFile_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object) {
Void _ = wait(bstore->requestRateRead->getAllowance(1));
wait(bstore->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
@ -844,8 +844,8 @@ ACTOR Future<Void> writeEntireFileFromBuffer_impl(Reference<BlobStoreEndpoint> b
if(contentLen > bstore->knobs.multipart_max_part_size)
throw file_too_large();
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
Void _ = wait(bstore->concurrentUploads.take());
wait(bstore->requestRateWrite->getAllowance(1));
wait(bstore->concurrentUploads.take());
state FlowLock::Releaser uploadReleaser(bstore->concurrentUploads, 1);
std::string resource = std::string("/") + bucket + "/" + object;
@ -870,7 +870,7 @@ ACTOR Future<Void> writeEntireFile_impl(Reference<BlobStoreEndpoint> bstore, std
// Yield because we may have just had to copy several MB's into packet buffer chain and next we have to calculate an MD5 sum of it.
// TODO: If this actor is used to send large files then combine the summing and packetization into a loop with a yield() every 20k or so.
Void _ = wait(yield());
wait(yield());
MD5_CTX sum;
::MD5_Init(&sum);
@ -881,7 +881,7 @@ ACTOR Future<Void> writeEntireFile_impl(Reference<BlobStoreEndpoint> bstore, std
std::string contentMD5 = base64::encoder::from_string(sumBytes);
contentMD5.resize(contentMD5.size() - 1);
Void _ = wait(writeEntireFileFromBuffer_impl(bstore, bucket, object, &packets, content.size(), contentMD5));
wait(writeEntireFileFromBuffer_impl(bstore, bucket, object, &packets, content.size(), contentMD5));
return Void();
}
@ -896,7 +896,7 @@ Future<Void> BlobStoreEndpoint::writeEntireFileFromBuffer(std::string const &buc
ACTOR Future<int> readObject_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, void *data, int length, int64_t offset) {
if(length <= 0)
return 0;
Void _ = wait(bstore->requestRateRead->getAllowance(1));
wait(bstore->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
@ -916,7 +916,7 @@ Future<int> BlobStoreEndpoint::readObject(std::string const &bucket, std::string
}
ACTOR static Future<std::string> beginMultiPartUpload_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object) {
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
wait(bstore->requestRateWrite->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object + "?uploads";
HTTP::Headers headers;
@ -936,8 +936,8 @@ Future<std::string> BlobStoreEndpoint::beginMultiPartUpload(std::string const &b
}
ACTOR Future<std::string> uploadPart_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string uploadID, unsigned int partNumber, UnsentPacketQueue *pContent, int contentLen, std::string contentMD5) {
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
Void _ = wait(bstore->concurrentUploads.take());
wait(bstore->requestRateWrite->getAllowance(1));
wait(bstore->concurrentUploads.take());
state FlowLock::Releaser uploadReleaser(bstore->concurrentUploads, 1);
std::string resource = format("/%s/%s?partNumber=%d&uploadId=%s", bucket.c_str(), object.c_str(), partNumber, uploadID.c_str());
@ -966,7 +966,7 @@ Future<std::string> BlobStoreEndpoint::uploadPart(std::string const &bucket, std
ACTOR Future<Void> finishMultiPartUpload_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string uploadID, BlobStoreEndpoint::MultiPartSetT parts) {
state UnsentPacketQueue part_list(); // NonCopyable state var so must be declared at top of actor
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
wait(bstore->requestRateWrite->getAllowance(1));
std::string manifest = "<CompleteMultipartUpload>";
for(auto &p : parts)

View File

@ -18,22 +18,22 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include "FailureMonitor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
ACTOR Future<Void> waitForStateEqual( IFailureMonitor* monitor, Endpoint endpoint, FailureStatus status ) {
loop {
Future<Void> change = monitor->onStateChanged(endpoint);
if (monitor->getState(endpoint) == status)
return Void();
Void _ = wait( change );
wait( change );
}
}
ACTOR Future<Void> waitForContinuousFailure( IFailureMonitor* monitor, Endpoint endpoint, double sustainedFailureDuration, double slope ) {
state double startT = now();
loop {
Void _ = wait( monitor->onFailed( endpoint ) );
wait( monitor->onFailed( endpoint ) );
if(monitor->permanentlyFailed(endpoint))
return Void();
@ -44,8 +44,8 @@ ACTOR Future<Void> waitForContinuousFailure( IFailureMonitor* monitor, Endpoint
if(waitDelay < std::min(FLOW_KNOBS->CLIENT_REQUEST_INTERVAL, FLOW_KNOBS->SERVER_REQUEST_INTERVAL)) //We will not get a failure monitoring update in this amount of time, so there is no point in waiting for changes
waitDelay = 0;
choose {
when (Void _ = wait( monitor->onStateEqual( endpoint, FailureStatus(false) ) )) {} // SOMEDAY: Use onStateChanged() for efficiency
when (Void _ = wait( delay(waitDelay) )) {
when (wait( monitor->onStateEqual( endpoint, FailureStatus(false) ) )) {} // SOMEDAY: Use onStateChanged() for efficiency
when (wait( delay(waitDelay) )) {
return Void();
}
}
@ -92,7 +92,7 @@ void SimpleFailureMonitor::setStatus( NetworkAddress const& address, FailureStat
void SimpleFailureMonitor::endpointNotFound( Endpoint const& endpoint ) {
// SOMEDAY: Expiration (this "leaks" memory)
TraceEvent("EndpointNotFound").detail("Address", endpoint.address).detail("Token", endpoint.token).suppressFor(1.0);
TraceEvent("EndpointNotFound").suppressFor(1.0).detail("Address", endpoint.address).detail("Token", endpoint.token);
endpointKnownFailed.set( endpoint, true );
}

View File

@ -20,12 +20,12 @@
// Unit tests for the flow language and libraries
#include "flow/actorcompiler.h"
#include "flow/UnitTest.h"
#include "flow/DeterministicRandom.h"
#include "flow/IThreadPool.h"
#include "fdbrpc.h"
#include "IAsyncFile.h"
#include "flow/actorcompiler.h" // This must be the last #include.
void forceLinkFlowTests() {}
@ -75,17 +75,17 @@ ACTOR static Future<Void> emptyActor() {
}
ACTOR static void oneWaitVoidActor(Future<Void> f) {
Void _ = wait(f);
wait(f);
}
ACTOR static Future<Void> oneWaitActor(Future<Void> f) {
Void _ = wait(f);
wait(f);
return Void();
}
Future<Void> g_cheese;
ACTOR static Future<Void> cheeseWaitActor() {
Void _ = wait(g_cheese);
wait(g_cheese);
return Void();
}
@ -109,8 +109,8 @@ ACTOR static Future<int> addOneActor(Future<int> in) {
ACTOR static Future<Void> chooseTwoActor(Future<Void> f, Future<Void> g) {
choose{
when(Void _ = wait(f)) {}
when(Void _ = wait(g)) {}
when(wait(f)) {}
when(wait(g)) {}
}
return Void();
}
@ -209,11 +209,11 @@ ACTOR static Future<NonserializableThing> testNonserializableThing() {
ACTOR Future<Void> testCancelled(bool *exits, Future<Void> f) {
try {
Void _ = wait(Future<Void>(Never()));
wait(Future<Void>(Never()));
} catch( Error &e ) {
state Error err = e;
try {
Void _ = wait(Future<Void>(Never()));
wait(Future<Void>(Never()));
} catch( Error &e ) {
*exits = true;
throw;
@ -241,7 +241,7 @@ TEST_CASE("flow/flow/cancel1")
ACTOR static Future<Void> noteCancel(int* cancelled) {
*cancelled = 0;
try {
Void _ = wait(Future<Void>(Never()));
wait(Future<Void>(Never()));
throw internal_error();
}
catch (...) {
@ -1069,7 +1069,7 @@ TEST_CASE("flow/flow/YieldedAsyncMap/randomized")
state int it;
for (it = 0; it < 100000; it++) {
yamr.randomOp();
Void _ = wait(yield());
wait(yield());
}
return Void();
}
@ -1080,7 +1080,7 @@ TEST_CASE("flow/flow/AsyncMap/randomized")
state int it;
for (it = 0; it < 100000; it++) {
yamr.randomOp();
Void _ = wait(yield());
wait(yield());
}
return Void();
}
@ -1098,11 +1098,11 @@ TEST_CASE("flow/flow/YieldedAsyncMap/basic")
//yam.triggerRange(0, 4);
state Future<Void> y2 = yam.onChange(1);
Void _ = wait(reportErrors(y0, "Y0"));
Void _ = wait(reportErrors(y1, "Y1"));
Void _ = wait(reportErrors(y1a, "Y1a"));
Void _ = wait(reportErrors(y1b, "Y1b"));
Void _ = wait(reportErrors(timeout(y2, 5, Void()), "Y2"));
wait(reportErrors(y0, "Y0"));
wait(reportErrors(y1, "Y1"));
wait(reportErrors(y1a, "Y1a"));
wait(reportErrors(y1b, "Y1b"));
wait(reportErrors(timeout(y2, 5, Void()), "Y2"));
return Void();
}
@ -1147,7 +1147,7 @@ TEST_CASE("flow/flow/YieldedAsyncMap/cancel2")
},
delay(1)));
Void _ = wait(y1);
wait(y1);
printf("Got y1\n");
y2.cancel();
@ -1175,9 +1175,9 @@ TEST_CASE("flow/flow/AsyncVar/basic")
ACTOR static Future<Void> waitAfterCancel( int* output ) {
*output = 0;
try {
Void _ = wait( Never() );
wait( Never() );
} catch (...) {
Void _ = wait( (*output=1, Future<Void>(Void())) );
wait( (*output=1, Future<Void>(Void())) );
}
ASSERT(false);
return Void();

View File

@ -18,6 +18,7 @@
* limitations under the License.
*/
#include "flow/flow.h"
#include "FlowTransport.h"
#include "genericactors.actor.h"
#include "fdbrpc.h"
@ -31,6 +32,7 @@
#if VALGRIND
#include <memcheck.h>
#endif
#include "flow/actorcompiler.h" // This must be the last #include.
static NetworkAddress g_currentDeliveryPeerAddress;
@ -148,6 +150,8 @@ public:
numIncompatibleConnections(0)
{}
~TransportData();
void initMetrics() {
bytesSent.init(LiteralStringRef("Net2.BytesSent"));
countPacketsReceived.init(LiteralStringRef("Net2.CountPacketsReceived"));
@ -227,9 +231,10 @@ struct Peer : NonCopyable {
double lastConnectTime;
double reconnectionDelay;
int peerReferences;
bool incompatibleProtocolVersionNewer;
explicit Peer( TransportData* transport, NetworkAddress const& destination )
: transport(transport), destination(destination), outgoingConnectionIdle(false), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true), peerReferences(-1)
: transport(transport), destination(destination), outgoingConnectionIdle(false), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true), incompatibleProtocolVersionNewer(false), peerReferences(-1)
{
connect = connectionKeeper(this);
}
@ -280,6 +285,7 @@ struct Peer : NonCopyable {
if ( !destination.isPublic() || outgoingConnectionIdle || destination > transport->localAddress ) {
// Keep the new connection
TraceEvent("IncomingConnection", conn->getDebugID())
.suppressFor(1.0)
.detail("FromAddr", conn->getPeerAddress())
.detail("CanonicalAddr", destination)
.detail("IsPublic", destination.isPublic());
@ -289,6 +295,7 @@ struct Peer : NonCopyable {
connect = connectionKeeper( this, conn, reader );
} else {
TraceEvent("RedundantConnection", conn->getDebugID())
.suppressFor(1.0)
.detail("FromAddr", conn->getPeerAddress().toString())
.detail("CanonicalAddr", destination);
@ -307,11 +314,10 @@ struct Peer : NonCopyable {
loop {
if(peer->peerReferences == 0 && peer->reliable.empty() && peer->unsent.empty()) {
//FIXME: closing connections is causing client connection issues
//throw connection_failed();
throw connection_failed();
}
Void _ = wait( delayJittered( FLOW_KNOBS->CONNECTION_MONITOR_LOOP_TIME ) );
wait( delayJittered( FLOW_KNOBS->CONNECTION_MONITOR_LOOP_TIME ) );
// SOMEDAY: Stop monitoring and close the connection after a long period of inactivity with no reliable or onDisconnect requests outstanding
@ -319,9 +325,9 @@ struct Peer : NonCopyable {
FlowTransport::transport().sendUnreliable( SerializeSource<ReplyPromise<Void>>(reply), remotePing.getEndpoint() );
choose {
when (Void _ = wait( delay( FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT ) )) { TraceEvent("ConnectionTimeout").detail("WithAddr", peer->destination); throw connection_failed(); }
when (Void _ = wait( reply.getFuture() )) {}
when (Void _ = wait( peer->incompatibleDataRead.onTrigger())) {}
when (wait( delay( FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT ) )) { TraceEvent("ConnectionTimeout").suppressFor(1.0).detail("WithAddr", peer->destination); throw connection_failed(); }
when (wait( reply.getFuture() )) {}
when (wait( peer->incompatibleDataRead.onTrigger())) {}
}
}
}
@ -329,10 +335,10 @@ struct Peer : NonCopyable {
ACTOR static Future<Void> connectionWriter( Peer* self, Reference<IConnection> conn ) {
state double lastWriteTime = now();
loop {
//Void _ = wait( delay(0, TaskWriteSocket) );
Void _ = wait( delayJittered(std::max<double>(FLOW_KNOBS->MIN_COALESCE_DELAY, FLOW_KNOBS->MAX_COALESCE_DELAY - (now() - lastWriteTime)), TaskWriteSocket) );
//Void _ = wait( delay(500e-6, TaskWriteSocket) );
//Void _ = wait( yield(TaskWriteSocket) );
//wait( delay(0, TaskWriteSocket) );
wait( delayJittered(std::max<double>(FLOW_KNOBS->MIN_COALESCE_DELAY, FLOW_KNOBS->MAX_COALESCE_DELAY - (now() - lastWriteTime)), TaskWriteSocket) );
//wait( delay(500e-6, TaskWriteSocket) );
//wait( yield(TaskWriteSocket) );
// Send until there is nothing left to send
loop {
@ -346,13 +352,13 @@ struct Peer : NonCopyable {
if (self->unsent.empty()) break;
TEST(true); // We didn't write everything, so apparently the write buffer is full. Wait for it to be nonfull.
Void _ = wait( conn->onWritable() );
Void _ = wait( yield(TaskWriteSocket) );
wait( conn->onWritable() );
wait( yield(TaskWriteSocket) );
}
// Wait until there is something to send
while ( self->unsent.empty() )
Void _ = wait( self->dataToSend.onTrigger() );
wait( self->dataToSend.onTrigger() );
}
}
@ -368,20 +374,20 @@ struct Peer : NonCopyable {
self->outgoingConnectionIdle = true;
// Wait until there is something to send
while ( self->unsent.empty() )
Void _ = wait( self->dataToSend.onTrigger() );
wait( self->dataToSend.onTrigger() );
ASSERT( self->destination.isPublic() );
self->outgoingConnectionIdle = false;
Void _ = wait( delayJittered( std::max(0.0, self->lastConnectTime+self->reconnectionDelay - now()) ) ); // Don't connect() to the same peer more than once per 2 sec
wait( delayJittered( std::max(0.0, self->lastConnectTime+self->reconnectionDelay - now()) ) ); // Don't connect() to the same peer more than once per 2 sec
self->lastConnectTime = now();
TraceEvent("ConnectingTo", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination).suppressFor(1.0);
TraceEvent("ConnectingTo", conn ? conn->getDebugID() : UID()).suppressFor(1.0).detail("PeerAddr", self->destination);
Reference<IConnection> _conn = wait( timeout( INetworkConnections::net()->connect(self->destination), FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT, Reference<IConnection>() ) );
if (_conn) {
conn = _conn;
TraceEvent("ConnectionExchangingConnectPacket", conn->getDebugID()).detail("PeerAddr", self->destination).suppressFor(1.0);
TraceEvent("ConnectionExchangingConnectPacket", conn->getDebugID()).suppressFor(1.0).detail("PeerAddr", self->destination);
self->prependConnectPacket();
} else {
TraceEvent("ConnectionTimedOut", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination).suppressFor(1.0);
TraceEvent("ConnectionTimedOut", conn ? conn->getDebugID() : UID()).suppressFor(1.0).detail("PeerAddr", self->destination);
throw connection_failed();
}
@ -392,7 +398,7 @@ struct Peer : NonCopyable {
try {
self->transport->countConnEstablished++;
Void _ = wait( connectionWriter( self, conn ) || reader || connectionMonitor(self) );
wait( connectionWriter( self, conn ) || reader || connectionMonitor(self) );
} catch (Error& e) {
if (e.code() == error_code_connection_failed || e.code() == error_code_actor_cancelled || ( g_network->isSimulated() && e.code() == error_code_checksum_failed ))
self->transport->countConnClosedWithoutError++;
@ -413,10 +419,10 @@ struct Peer : NonCopyable {
bool ok = e.code() == error_code_connection_failed || e.code() == error_code_actor_cancelled || ( g_network->isSimulated() && e.code() == error_code_checksum_failed );
if(self->compatible) {
TraceEvent(ok ? SevInfo : SevWarnAlways, "ConnectionClosed", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination).error(e, true).suppressFor(1.0);
TraceEvent(ok ? SevInfo : SevWarnAlways, "ConnectionClosed", conn ? conn->getDebugID() : UID()).error(e, true).suppressFor(1.0).detail("PeerAddr", self->destination);
}
else {
TraceEvent(ok ? SevInfo : SevWarnAlways, "IncompatibleConnectionClosed", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination).error(e, true);
TraceEvent(ok ? SevInfo : SevWarnAlways, "IncompatibleConnectionClosed", conn ? conn->getDebugID() : UID()).error(e, true).detail("PeerAddr", self->destination);
}
if (conn) {
@ -428,21 +434,28 @@ struct Peer : NonCopyable {
// Try to recover, even from serious errors, by retrying
if(self->peerReferences <= 0 && self->reliable.empty() && self->unsent.empty()) {
//FIXME: closing connections is causing client connection issues
//self->connect.cancel();
//self->transport->peers.erase(self->destination);
//delete self;
//return Void();
TraceEvent("PeerDestroy").error(e).suppressFor(1.0).detail("PeerAddr", self->destination);
self->connect.cancel();
self->transport->peers.erase(self->destination);
delete self;
return Void();
}
}
}
}
};
TransportData::~TransportData() {
for(auto &p : peers) {
p.second->connect.cancel();
delete p.second;
}
}
ACTOR static void deliver( TransportData* self, Endpoint destination, ArenaReader reader, bool inReadSocket ) {
int priority = self->endpoints.getPriority(destination.token);
if (priority < TaskReadSocket || !inReadSocket) {
Void _ = wait( delay(0, priority) );
wait( delay(0, priority) );
} else {
g_network->setCurrentTask( priority );
}
@ -550,10 +563,10 @@ static void scanPackets( TransportData* transport, uint8_t*& unprocessed_begin,
if (packetLen > FLOW_KNOBS->PACKET_WARNING) {
TraceEvent(transport->warnAlwaysForLargePacket ? SevWarnAlways : SevWarn, "Net2_LargePacket")
.suppressFor(1.0)
.detail("FromPeer", peerAddress.toString())
.detail("Length", (int)packetLen)
.detail("Token", token)
.suppressFor(1.0);
.detail("Token", token);
if(g_network->isSimulated())
transport->warnAlwaysForLargePacket = false;
@ -581,6 +594,7 @@ ACTOR static Future<Void> connectionReader(
state bool expectConnectPacket = true;
state bool compatible = false;
state bool incompatiblePeerCounted = false;
state bool incompatibleProtocolVersionNewer = false;
state NetworkAddress peerAddress;
state uint64_t peerProtocolVersion = 0;
@ -621,7 +635,8 @@ ACTOR static Future<Void> connectionReader(
connectionId = p->connectionId;
}
if( (p->protocolVersion&compatibleProtocolVersionMask) != (currentProtocolVersion&compatibleProtocolVersionMask) ) {
if( (p->protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) {
incompatibleProtocolVersionNewer = p->protocolVersion > currentProtocolVersion;
NetworkAddress addr = p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress();
if(connectionId != 1) addr.port = 0;
@ -652,8 +667,9 @@ ACTOR static Future<Void> connectionReader(
else {
compatible = true;
TraceEvent("ConnectionEstablished", conn->getDebugID())
.suppressFor(1.0)
.detail("Peer", conn->getPeerAddress())
.detail("ConnectionId", connectionId).suppressFor(1.0);
.detail("ConnectionId", connectionId);
}
if(connectionId > 1) {
@ -665,8 +681,9 @@ ACTOR static Future<Void> connectionReader(
peerProtocolVersion = p->protocolVersion;
if (peer != nullptr) {
// Outgoing connection; port information should be what we expect
TraceEvent("ConnectedOutgoing").detail("PeerAddr", NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) ).suppressFor(1.0);
TraceEvent("ConnectedOutgoing").suppressFor(1.0).detail("PeerAddr", NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) );
peer->compatible = compatible;
peer->incompatibleProtocolVersionNewer = incompatibleProtocolVersionNewer;
if (!compatible) {
peer->transport->numIncompatibleConnections++;
incompatiblePeerCounted = true;
@ -678,12 +695,13 @@ ACTOR static Future<Void> connectionReader(
}
peer = transport->getPeer(peerAddress);
peer->compatible = compatible;
peer->incompatibleProtocolVersionNewer = incompatibleProtocolVersionNewer;
if (!compatible) {
peer->transport->numIncompatibleConnections++;
incompatiblePeerCounted = true;
}
onConnected.send( peer );
Void _ = wait( delay(0) ); // Check for cancellation
wait( delay(0) ); // Check for cancellation
}
}
}
@ -698,11 +716,11 @@ ACTOR static Future<Void> connectionReader(
if (readWillBlock)
break;
Void _ = wait(yield(TaskReadSocket));
wait(yield(TaskReadSocket));
}
Void _ = wait( conn->onReadable() );
Void _ = wait(delay(0, TaskReadSocket)); // We don't want to call conn->read directly from the reactor - we could get stuck in the reactor reading 1 packet at a time
wait( conn->onReadable() );
wait(delay(0, TaskReadSocket)); // We don't want to call conn->read directly from the reactor - we could get stuck in the reactor reading 1 packet at a time
}
}
catch (Error& e) {
@ -719,18 +737,18 @@ ACTOR static Future<Void> connectionIncoming( TransportData* self, Reference<ICo
state Promise<Peer*> onConnected;
state Future<Void> reader = connectionReader( self, conn, nullptr, onConnected );
choose {
when( Void _ = wait( reader ) ) { ASSERT(false); return Void(); }
when( wait( reader ) ) { ASSERT(false); return Void(); }
when( Peer *p = wait( onConnected.getFuture() ) ) {
p->onIncomingConnection( conn, reader );
}
when( Void _ = wait( delayJittered(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT) ) ) {
when( wait( delayJittered(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT) ) ) {
TEST(true); // Incoming connection timed out
throw timed_out();
}
}
return Void();
} catch (Error& e) {
TraceEvent("IncomingConnectionError", conn->getDebugID()).error(e).detail("FromAddress", conn->getPeerAddress()).suppressFor(1.0);
TraceEvent("IncomingConnectionError", conn->getDebugID()).error(e).suppressFor(1.0).detail("FromAddress", conn->getPeerAddress());
conn->close();
return Void();
}
@ -742,8 +760,9 @@ ACTOR static Future<Void> listen( TransportData* self, NetworkAddress listenAddr
try {
loop {
Reference<IConnection> conn = wait( listener->accept() );
TraceEvent("ConnectionFrom", conn->getDebugID()).detail("FromAddress", conn->getPeerAddress()).suppressFor(1.0);
TraceEvent("ConnectionFrom", conn->getDebugID()).suppressFor(1.0).detail("FromAddress", conn->getPeerAddress());
incoming.add( connectionIncoming(self, conn) );
wait(delay(0) || delay(FLOW_KNOBS->CONNECTION_ACCEPT_DELAY, TaskWriteSocket));
}
} catch (Error& e) {
TraceEvent(SevError, "ListenError").error(e);
@ -766,7 +785,7 @@ Peer* TransportData::getPeer( NetworkAddress const& address, bool openConnection
ACTOR static Future<Void> multiVersionCleanupWorker( TransportData* self ) {
loop {
Void _ = wait(delay(FLOW_KNOBS->CONNECTION_CLEANUP_DELAY));
wait(delay(FLOW_KNOBS->CONNECTION_CLEANUP_DELAY));
for(auto it = self->incompatiblePeers.begin(); it != self->incompatiblePeers.end();) {
if( self->multiVersionConnections.count(it->second.first) ) {
it = self->incompatiblePeers.erase(it);
@ -895,7 +914,7 @@ static PacketID sendPacket( TransportData* self, ISerializeSource const& what, c
Peer* peer = self->getPeer(destination.address, openConnection);
// If there isn't an open connection, a public address, or the peer isn't compatible, we can't send
if (!peer || (peer->outgoingConnectionIdle && !destination.address.isPublic()) || (!peer->compatible && destination.token != WLTOKEN_PING_PACKET)) {
if (!peer || (peer->outgoingConnectionIdle && !destination.address.isPublic()) || (peer->incompatibleProtocolVersionNewer && destination.token != WLTOKEN_PING_PACKET)) {
TEST(true); // Can't send to private address without a compatible open connection
return (PacketID)NULL;
}
@ -956,11 +975,11 @@ static PacketID sendPacket( TransportData* self, ISerializeSource const& what, c
}
else if (len > FLOW_KNOBS->PACKET_WARNING) {
TraceEvent(self->warnAlwaysForLargePacket ? SevWarnAlways : SevWarn, "Net2_LargePacket")
.suppressFor(1.0)
.detail("ToPeer", destination.address)
.detail("Length", (int)len)
.detail("Token", destination.token)
.backtrace()
.suppressFor(1.0);
.backtrace();
if(g_network->isSimulated())
self->warnAlwaysForLargePacket = false;

View File

@ -101,8 +101,8 @@ namespace HTTP {
ACTOR Future<int> read_into_string(Reference<IConnection> conn, std::string *buf, int maxlen) {
loop {
// Wait for connection to have something to read
Void _ = wait(conn->onReadable());
Void _ = wait( delay( 0, TaskReadSocket ) );
wait(conn->onReadable());
wait( delay( 0, TaskReadSocket ) );
// Read into buffer
int originalSize = buf->size();
@ -203,7 +203,7 @@ namespace HTTP {
// Read headers
r->headers.clear();
Void _ = wait(read_http_response_headers(conn, &r->headers, &buf, &pos));
wait(read_http_response_headers(conn, &r->headers, &buf, &pos));
auto i = r->headers.find("Content-Length");
if(i != r->headers.end())
@ -230,7 +230,7 @@ namespace HTTP {
pos = 0;
// Read until there are at least contentLen bytes available at pos
Void _ = wait(read_fixed_into_string(conn, r->contentLen, &r->content, pos));
wait(read_fixed_into_string(conn, r->contentLen, &r->content, pos));
// There shouldn't be any bytes after content.
if(r->content.size() != r->contentLen)
@ -256,7 +256,7 @@ namespace HTTP {
break;
// Read (if needed) until chunkLen bytes are available at pos, then advance pos by chunkLen
Void _ = wait(read_fixed_into_string(conn, chunkLen, &r->content, pos));
wait(read_fixed_into_string(conn, chunkLen, &r->content, pos));
pos += chunkLen;
// Read the final empty line at the end of the chunk (the required "\r\n" after the chunk bytes)
@ -272,7 +272,7 @@ namespace HTTP {
r->contentLen = pos;
// Next is the post-chunk header block, so read that.
Void _ = wait(read_http_response_headers(conn, &r->headers, &r->content, &pos));
wait(read_http_response_headers(conn, &r->headers, &r->content, &pos));
// If the header parsing did not consume all of the buffer then something is wrong
if(pos != r->content.size())
@ -330,8 +330,8 @@ namespace HTTP {
state double send_start = timer();
loop {
Void _ = wait(conn->onWritable());
Void _ = wait( delay( 0, TaskWriteSocket ) );
wait(conn->onWritable());
wait( delay( 0, TaskWriteSocket ) );
// If we already got a response, before finishing sending the request, then close the connection,
// set the Connection header to "close" as a hint to the caller that this connection can't be used
@ -344,7 +344,7 @@ namespace HTTP {
}
state int trySend = CLIENT_KNOBS->HTTP_SEND_SIZE;
Void _ = wait(sendRate->getAllowance(trySend));
wait(sendRate->getAllowance(trySend));
int len = conn->write(pContent->getUnsent(), trySend);
if(pSent != nullptr)
*pSent += len;
@ -355,7 +355,7 @@ namespace HTTP {
break;
}
Void _ = wait(responseReading);
wait(responseReading);
double elapsed = timer() - send_start;
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0)

View File

@ -37,9 +37,9 @@ ACTOR static Future<Void> zeroRangeHelper( Reference<IAsyncFile> f, int64_t offs
while(pos < offset+length) {
state int len = std::min<int64_t>( ONE_MEGABYTE, offset+length-pos );
Void _ = wait( f->write( zeros, len, pos ) );
wait( f->write( zeros, len, pos ) );
pos += len;
Void _ = wait( yield() );
wait( yield() );
}
aligned_free(zeros);
@ -59,14 +59,14 @@ TEST_CASE( "fileio/zero" ) {
0));
// Verify that we can grow a file with zero().
Void _ = wait(f->sync());
Void _ = wait(f->zeroRange(0, ONE_MEGABYTE));
wait(f->sync());
wait(f->zeroRange(0, ONE_MEGABYTE));
int64_t size = wait(f->size());
ASSERT( ONE_MEGABYTE == size );
// Verify that zero() does, in fact, zero.
Void _ = wait(zeroRangeHelper(f, 0, ONE_MEGABYTE, 0xff));
Void _ = wait(f->zeroRange(0, ONE_MEGABYTE));
wait(zeroRangeHelper(f, 0, ONE_MEGABYTE, 0xff));
wait(f->zeroRange(0, ONE_MEGABYTE));
state uint8_t* page = (uint8_t*)malloc(FOUR_KILOBYTES);
int n = wait( f->read(page, FOUR_KILOBYTES, 0) );
ASSERT( n == FOUR_KILOBYTES );
@ -77,7 +77,7 @@ TEST_CASE( "fileio/zero" ) {
// Destruct our file and remove it.
f.clear();
Void _ = wait( IAsyncFileSystem::filesystem()->deleteFile(filename, true) );
wait( IAsyncFileSystem::filesystem()->deleteFile(filename, true) );
return Void();
}
@ -94,13 +94,13 @@ ACTOR static Future<Void> incrementalDeleteHelper( std::string filename, bool mu
remainingFileSize = fileSize;
}
Void _ = wait(IAsyncFileSystem::filesystem()->deleteFile(filename, mustBeDurable));
wait(IAsyncFileSystem::filesystem()->deleteFile(filename, mustBeDurable));
if(exists) {
for( ; remainingFileSize > 0; remainingFileSize -= truncateAmt ){
Void _ = wait(file->truncate(remainingFileSize));
Void _ = wait(file->sync());
Void _ = wait(delay(interval));
wait(file->truncate(remainingFileSize));
wait(file->sync());
wait(delay(interval));
}
}
@ -124,10 +124,10 @@ TEST_CASE( "fileio/incrementalDelete" ) {
filename,
IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_READWRITE,
0));
Void _ = wait(f->sync());
Void _ = wait(f->truncate(fileSize));
wait(f->sync());
wait(f->truncate(fileSize));
//close the file by deleting the reference
f.clear();
Void _ = wait(IAsyncFileSystem::filesystem()->incrementalDeleteFile(filename, true));
wait(IAsyncFileSystem::filesystem()->incrementalDeleteFile(filename, true));
return Void();
}

View File

@ -22,6 +22,7 @@
#define FLOW_IASYNCFILE_H
#pragma once
#include <ctime>
#include "flow/flow.h"
// All outstanding operations must be cancelled before the destructor of IAsyncFile is called.
@ -96,6 +97,9 @@ public:
// If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
virtual Future<Void> incrementalDeleteFile( std::string filename, bool mustBeDurable );
// Returns the time of the last modification of the file.
virtual Future<std::time_t> lastWriteTime( std::string filename ) = 0;
static IAsyncFileSystem* filesystem() { return filesystem(g_network); }
static runCycleFuncPtr runCycleFunc() { return reinterpret_cast<runCycleFuncPtr>(reinterpret_cast<flowGlobalType>(g_network->global(INetwork::enRunCycleFunc))); }

View File

@ -35,6 +35,7 @@
#include "Locality.h"
#include "QueueModel.h"
#include "MultiInterface.h"
#include "flow/actorcompiler.h" // This must be the last #include.
using std::vector;
@ -116,7 +117,7 @@ bool checkAndProcessResult(ErrorOr<T> result, Reference<ModelHolder> holder, boo
ACTOR template <class Request>
Future<Optional<REPLY_TYPE(Request)>> makeRequest(RequestStream<Request> const* stream, Request request, double backoff, Future<Void> requestUnneeded, QueueModel *model, bool isFirstRequest, bool atMostOnce, bool triedAllOptions) {
if(backoff > 0.0) {
Void _ = wait(delay(backoff) || requestUnneeded);
wait(delay(backoff) || requestUnneeded);
}
if(requestUnneeded.isReady()) {
@ -301,8 +302,8 @@ Future< REPLY_TYPE(Request) > loadBalance(
for(int i=0; i<ok.size(); i++)
ok[i] = IFailureMonitor::failureMonitor().onStateEqual( alternatives->get(i, channel).getEndpoint(), FailureStatus(false) );
choose {
when ( Void _ = wait( quorum( ok, 1 ) ) ) {}
when ( Void _ = wait( ::delayJittered( delay ) ) ) {
when ( wait( quorum( ok, 1 ) ) ) {}
when ( wait( ::delayJittered( delay ) ) ) {
throw all_alternatives_failed();
}
}
@ -385,7 +386,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
firstRequestEndpoint = Optional<uint64_t>();
break;
}
when(Void _ = wait(secondDelay)) {
when(wait(secondDelay)) {
secondDelay = Never();
if(model && model->secondBudget >= 1.0) {
model->secondMultiplier += FLOW_KNOBS->SECOND_REQUEST_MULTIPLIER_GROWTH;
@ -421,4 +422,6 @@ inline Future< REPLY_TYPE(Request) > loadBalance(
return loadBalance( Reference<MultiInterface<Interface>>(alternatives), channel, request, taskID, atMostOnce, model );
}
#include "flow/unactorcompiler.h"
#endif

View File

@ -74,6 +74,10 @@ Future< Void > Net2FileSystem::deleteFile( std::string filename, bool mustBeDura
return Net2AsyncFile::deleteFile(filename, mustBeDurable);
}
Future< std::time_t > Net2FileSystem::lastWriteTime( std::string filename ) {
return Net2AsyncFile::lastWriteTime( filename );
}
void Net2FileSystem::newFileSystem(double ioTimeout, std::string fileSystemPath)
{
g_network->setGlobal(INetwork::enFileSystem, (flowGlobalType) new Net2FileSystem(ioTimeout, fileSystemPath));

View File

@ -26,11 +26,14 @@
class Net2FileSystem : public IAsyncFileSystem {
public:
virtual Future< Reference<class IAsyncFile> > open( std::string filename, int64_t flags, int64_t mode );
// Opens a file for asynchronous I/O
virtual Future< Reference<class IAsyncFile> > open( std::string filename, int64_t flags, int64_t mode );
virtual Future< Void > deleteFile( std::string filename, bool mustBeDurable );
// Deletes the given file. If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
virtual Future< Void > deleteFile( std::string filename, bool mustBeDurable );
// Returns the time of the last modification of the file.
virtual Future< std::time_t > lastWriteTime( std::string filename );
//void init();

View File

@ -84,7 +84,7 @@ bool IReplicationPolicy::validateFull(
auto missingEntry = totalSolution[lastSolutionIndex];
totalSolution[lastSolutionIndex] = totalSolution.back();
totalSolution.pop_back();
for (int index = 0; index < solutionSet.size(); index ++) {
for (int index = 0; index < solutionSet.size() && index < totalSolution.size(); index ++) {
if (g_replicationdebug > 3) {
auto fromServer = fromServers->getRecordViaEntry(missingEntry);
printf("Test remove entry: %s test:%3d of%3lu\n", fromServers->getEntryInfo(missingEntry).c_str(), index+1, solutionSet.size());

View File

@ -18,15 +18,16 @@
* limitations under the License.
*/
#include "flow/actorcompiler.h"
#include <memory>
#include "flow/flow.h"
#include "flow/network.h"
#include "flow/Knobs.h"
#include "TLSConnection.h"
#include "ITLSPlugin.h"
#include "LoadPlugin.h"
#include "Platform.h"
#include <memory>
#include "IAsyncFile.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// Name of specialized TLS Plugin
const char* tlsPluginName = "fdb-libressl-plugin";
@ -78,11 +79,10 @@ ACTOR static Future<Void> handshake( TLSConnection* self ) {
throw connection_failed();
}
ASSERT( r == ITLSSession::WANT_WRITE || r == ITLSSession::WANT_READ );
Void _ = wait( r == ITLSSession::WANT_WRITE ? self->conn->onWritable() : self->conn->onReadable() );
wait( r == ITLSSession::WANT_WRITE ? self->conn->onWritable() : self->conn->onReadable() );
}
TraceEvent("TLSConnectionHandshakeSuccessful", self->getDebugID())
.detail("Peer", self->getPeerAddress());
TraceEvent("TLSConnectionHandshakeSuccessful", self->getDebugID()).suppressFor(1.0).detail("Peer", self->getPeerAddress());
return Void();
}
@ -161,7 +161,7 @@ ACTOR Future<Reference<IConnection>> wrap( Reference<ITLSPolicy> policy, bool is
}
Future<Reference<IConnection>> TLSListener::accept() {
return wrap( policy, false, listener->accept(), "");
return wrap( options->get_policy(TLSOptions::POLICY_VERIFY_PEERS), false, listener->accept(), "");
}
TLSNetworkConnections::TLSNetworkConnections( Reference<TLSOptions> options ) : options(options) {
@ -172,7 +172,11 @@ TLSNetworkConnections::TLSNetworkConnections( Reference<TLSOptions> options ) :
Future<Reference<IConnection>> TLSNetworkConnections::connect( NetworkAddress toAddr, std::string host) {
if ( toAddr.isTLS() ) {
NetworkAddress clearAddr( toAddr.ip, toAddr.port, toAddr.isPublic(), false );
TraceEvent("TLSConnectionConnecting").detail("ToAddr", toAddr);
TraceEvent("TLSConnectionConnecting").suppressFor(1.0).detail("ToAddr", toAddr);
// For FDB<->FDB connections, we don't have hostnames and can't verify IP
// addresses against certificates, so we have our own peer verifying logic
// to use. For FDB<->external system connections, we can use the standard
// hostname-based certificate verification logic.
if (host.empty() || host == toIPString(toAddr.ip))
return wrap(options->get_policy(TLSOptions::POLICY_VERIFY_PEERS), true, network->connect(clearAddr), std::string(""));
else
@ -189,7 +193,7 @@ Reference<IListener> TLSNetworkConnections::listen( NetworkAddress localAddr ) {
if ( localAddr.isTLS() ) {
NetworkAddress clearAddr( localAddr.ip, localAddr.port, localAddr.isPublic(), false );
TraceEvent("TLSConnectionListening").detail("OnAddr", localAddr);
return Reference<IListener>(new TLSListener( options->get_policy(TLSOptions::POLICY_VERIFY_PEERS), network->listen( clearAddr ) ));
return Reference<IListener>(new TLSListener( options, network->listen( clearAddr ) ));
}
return network->listen( localAddr );
}
@ -200,6 +204,7 @@ Reference<IListener> TLSNetworkConnections::listen( NetworkAddress localAddr ) {
void TLSOptions::set_cert_file( std::string const& cert_file ) {
try {
TraceEvent("TLSConnectionSettingCertFile").detail("CertFilePath", cert_file);
policyInfo.cert_path = cert_file;
set_cert_data( readFileBytes( cert_file, CERT_FILE_MAX_SIZE ) );
} catch ( Error& ) {
TraceEvent(SevError, "TLSOptionsSetCertFileError").detail("Filename", cert_file);
@ -210,6 +215,7 @@ void TLSOptions::set_cert_file( std::string const& cert_file ) {
void TLSOptions::set_ca_file(std::string const& ca_file) {
try {
TraceEvent("TLSConnectionSettingCAFile").detail("CAPath", ca_file);
policyInfo.ca_path = ca_file;
set_ca_data(readFileBytes(ca_file, CERT_FILE_MAX_SIZE));
}
catch (Error&) {
@ -219,26 +225,28 @@ void TLSOptions::set_ca_file(std::string const& ca_file) {
}
void TLSOptions::set_ca_data(std::string const& ca_data) {
if (!policyVerifyPeersSet || !policyVerifyPeersNotSet)
if (!policyVerifyPeersSet.get() || !policyVerifyPeersNotSet.get())
init_plugin();
TraceEvent("TLSConnectionSettingCAData").detail("CADataSize", ca_data.size());
if (!policyVerifyPeersSet->set_ca_data((const uint8_t*)&ca_data[0], ca_data.size()))
policyInfo.ca_contents = Standalone<StringRef>(ca_data);
if (!policyVerifyPeersSet.get()->set_ca_data((const uint8_t*)&ca_data[0], ca_data.size()))
throw tls_error();
if (!policyVerifyPeersNotSet->set_ca_data((const uint8_t*)&ca_data[0], ca_data.size()))
if (!policyVerifyPeersNotSet.get()->set_ca_data((const uint8_t*)&ca_data[0], ca_data.size()))
throw tls_error();
ca_set = true;
}
void TLSOptions::set_cert_data( std::string const& cert_data ) {
if (!policyVerifyPeersSet || !policyVerifyPeersNotSet)
if (!policyVerifyPeersSet.get() || !policyVerifyPeersNotSet.get())
init_plugin();
TraceEvent("TLSConnectionSettingCertData").detail("CertDataSize", cert_data.size());
if ( !policyVerifyPeersSet->set_cert_data( (const uint8_t*)&cert_data[0], cert_data.size() ) )
policyInfo.cert_contents = Standalone<StringRef>(cert_data);
if ( !policyVerifyPeersSet.get()->set_cert_data( (const uint8_t*)&cert_data[0], cert_data.size() ) )
throw tls_error();
if (!policyVerifyPeersNotSet->set_cert_data((const uint8_t*)&cert_data[0], cert_data.size()))
if (!policyVerifyPeersNotSet.get()->set_cert_data((const uint8_t*)&cert_data[0], cert_data.size()))
throw tls_error();
certs_set = true;
@ -246,12 +254,13 @@ void TLSOptions::set_cert_data( std::string const& cert_data ) {
void TLSOptions::set_key_password(std::string const& password) {
TraceEvent("TLSConnectionSettingPassword");
keyPassword = password;
policyInfo.keyPassword = password;
}
void TLSOptions::set_key_file( std::string const& key_file ) {
try {
TraceEvent("TLSConnectionSettingKeyFile").detail("KeyFilePath", key_file);
policyInfo.key_path = key_file;
set_key_data( readFileBytes( key_file, CERT_FILE_MAX_SIZE ) );
} catch ( Error& ) {
TraceEvent(SevError, "TLSOptionsSetKeyFileError").detail("Filename", key_file);
@ -260,20 +269,21 @@ void TLSOptions::set_key_file( std::string const& key_file ) {
}
void TLSOptions::set_key_data( std::string const& key_data ) {
if (!policyVerifyPeersSet || !policyVerifyPeersNotSet)
if (!policyVerifyPeersSet.get() || !policyVerifyPeersNotSet.get())
init_plugin();
const char *passphrase = keyPassword.empty() ? NULL : keyPassword.c_str();
const char *passphrase = policyInfo.keyPassword.empty() ? NULL : policyInfo.keyPassword.c_str();
TraceEvent("TLSConnectionSettingKeyData").detail("KeyDataSize", key_data.size());
if ( !policyVerifyPeersSet->set_key_data( (const uint8_t*)&key_data[0], key_data.size(), passphrase) )
policyInfo.key_contents = Standalone<StringRef>(key_data);
if ( !policyVerifyPeersSet.get()->set_key_data( (const uint8_t*)&key_data[0], key_data.size(), passphrase) )
throw tls_error();
if (!policyVerifyPeersNotSet->set_key_data((const uint8_t*)&key_data[0], key_data.size(), passphrase))
if (!policyVerifyPeersNotSet.get()->set_key_data((const uint8_t*)&key_data[0], key_data.size(), passphrase))
throw tls_error();
key_set = true;
}
void TLSOptions::set_verify_peers( std::vector<std::string> const& verify_peers ) {
if (!policyVerifyPeersSet)
if (!policyVerifyPeersSet.get())
init_plugin();
{
TraceEvent e("TLSConnectionSettingVerifyPeers");
@ -287,9 +297,10 @@ void TLSOptions::set_verify_peers( std::vector<std::string> const& verify_peers
verify_peers_len[i] = verify_peers[i].size();
}
if (!policyVerifyPeersSet->set_verify_peers(verify_peers.size(), verify_peers_arr.get(), verify_peers_len.get()))
if (!policyVerifyPeersSet.get()->set_verify_peers(verify_peers.size(), verify_peers_arr.get(), verify_peers_len.get()))
throw tls_error();
policyInfo.verify_peers = verify_peers;
verify_peers_set = true;
}
@ -299,43 +310,151 @@ void TLSOptions::register_network() {
new TLSNetworkConnections( Reference<TLSOptions>::addRef( this ) );
}
ACTOR static Future<ErrorOr<Standalone<StringRef>>> readEntireFile( std::string filename ) {
state Reference<IAsyncFile> file = wait(IAsyncFileSystem::filesystem()->open(filename, IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_UNCACHED, 0));
state int64_t filesize = wait(file->size());
state Standalone<StringRef> buf = makeString(filesize);
int rc = wait(file->read(mutateString(buf), filesize, 0));
if (rc != filesize) {
// File modified during read, probably. The mtime should change, and thus we'll be called again.
return tls_error();
}
return buf;
}
ACTOR static Future<Void> watchFileForChanges( std::string filename, AsyncVar<Standalone<StringRef>> *contents_var ) {
state std::time_t lastModTime = wait(IAsyncFileSystem::filesystem()->lastWriteTime(filename));
loop {
wait(delay(FLOW_KNOBS->TLS_CERT_REFRESH_DELAY_SECONDS));
std::time_t modtime = wait(IAsyncFileSystem::filesystem()->lastWriteTime(filename));
if (lastModTime != modtime) {
lastModTime = modtime;
ErrorOr<Standalone<StringRef>> contents = wait(readEntireFile(filename));
if (contents.present()) {
contents_var->set(contents.get());
}
}
}
}
ACTOR static Future<Void> reloadConfigurationOnChange( TLSOptions::PolicyInfo *pci, Reference<ITLSPlugin> plugin, AsyncVar<Reference<ITLSPolicy>> *realVerifyPeersPolicy, AsyncVar<Reference<ITLSPolicy>> *realNoVerifyPeersPolicy ) {
if (FLOW_KNOBS->TLS_CERT_REFRESH_DELAY_SECONDS <= 0) {
return Void();
}
loop {
// Early in bootup, the filesystem might not be initialized yet. Wait until it is.
if (IAsyncFileSystem::filesystem() != nullptr) {
break;
}
wait(delay(1.0));
}
state int mismatches = 0;
state AsyncVar<Standalone<StringRef>> ca_var;
state AsyncVar<Standalone<StringRef>> key_var;
state AsyncVar<Standalone<StringRef>> cert_var;
state std::vector<Future<Void>> lifetimes;
if (!pci->ca_path.empty()) lifetimes.push_back(watchFileForChanges(pci->ca_path, &ca_var));
if (!pci->key_path.empty()) lifetimes.push_back(watchFileForChanges(pci->key_path, &key_var));
if (!pci->cert_path.empty()) lifetimes.push_back(watchFileForChanges(pci->cert_path, &cert_var));
loop {
state Future<Void> ca_changed = ca_var.onChange();
state Future<Void> key_changed = key_var.onChange();
state Future<Void> cert_changed = cert_var.onChange();
wait( ca_changed || key_changed || cert_changed );
if (ca_changed.isReady()) {
TraceEvent(SevInfo, "TLSRefreshCAChanged").detail("path", pci->ca_path).detail("length", ca_var.get().size());
pci->ca_contents = ca_var.get();
}
if (key_changed.isReady()) {
TraceEvent(SevInfo, "TLSRefreshKeyChanged").detail("path", pci->key_path).detail("length", key_var.get().size());
pci->key_contents = key_var.get();
}
if (cert_changed.isReady()) {
TraceEvent(SevInfo, "TLSRefreshCertChanged").detail("path", pci->cert_path).detail("length", cert_var.get().size());
pci->cert_contents = cert_var.get();
}
bool rc = true;
Reference<ITLSPolicy> verifypeers = Reference<ITLSPolicy>(plugin->create_policy());
Reference<ITLSPolicy> noverifypeers = Reference<ITLSPolicy>(plugin->create_policy());
loop {
// Don't actually loop. We're just using loop/break as a `goto err`.
// This loop always ends with an unconditional break.
rc = verifypeers->set_ca_data(pci->ca_contents.begin(), pci->ca_contents.size());
if (!rc) break;
rc = verifypeers->set_key_data(pci->key_contents.begin(), pci->key_contents.size(), pci->keyPassword.c_str());
if (!rc) break;
rc = verifypeers->set_cert_data(pci->cert_contents.begin(), pci->cert_contents.size());
if (!rc) break;
{
std::unique_ptr<const uint8_t *[]> verify_peers_arr(new const uint8_t*[pci->verify_peers.size()]);
std::unique_ptr<int[]> verify_peers_len(new int[pci->verify_peers.size()]);
for (int i = 0; i < pci->verify_peers.size(); i++) {
verify_peers_arr[i] = (const uint8_t *)&pci->verify_peers[i][0];
verify_peers_len[i] = pci->verify_peers[i].size();
}
rc = verifypeers->set_verify_peers(pci->verify_peers.size(), verify_peers_arr.get(), verify_peers_len.get());
if (!rc) break;
}
rc = noverifypeers->set_ca_data(pci->ca_contents.begin(), pci->ca_contents.size());
if (!rc) break;
rc = noverifypeers->set_key_data(pci->key_contents.begin(), pci->key_contents.size(), pci->keyPassword.c_str());
if (!rc) break;
rc = noverifypeers->set_cert_data(pci->cert_contents.begin(), pci->cert_contents.size());
if (!rc) break;
break;
}
if (rc) {
TraceEvent(SevInfo, "TLSCertificateRefreshSucceeded");
realVerifyPeersPolicy->set(verifypeers);
realNoVerifyPeersPolicy->set(noverifypeers);
mismatches = 0;
} else {
// Some files didn't match up, they should in the future, and we'll retry then.
mismatches++;
TraceEvent(SevWarn, "TLSCertificateRefreshMismatch").detail("mismatches", mismatches);
}
}
}
const char *defaultCertFileName = "fdb.pem";
Reference<ITLSPolicy> TLSOptions::get_policy(PolicyType type) {
if ( !certs_set ) {
std::string certFile;
if ( !platform::getEnvironmentVar( "FDB_TLS_CERTIFICATE_FILE", certFile ) )
certFile = fileExists(defaultCertFileName) ? defaultCertFileName : joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
set_cert_file( certFile );
if ( !platform::getEnvironmentVar( "FDB_TLS_CERTIFICATE_FILE", policyInfo.cert_path ) )
policyInfo.cert_path = fileExists(defaultCertFileName) ? defaultCertFileName : joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
set_cert_file( policyInfo.cert_path );
}
if ( !key_set ) {
std::string keyFile;
if ( keyPassword.empty() )
platform::getEnvironmentVar( "FDB_TLS_PASSWORD", keyPassword );
if ( !platform::getEnvironmentVar( "FDB_TLS_KEY_FILE", keyFile ) )
keyFile = fileExists(defaultCertFileName) ? defaultCertFileName : joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
set_key_file( keyFile );
if ( policyInfo.keyPassword.empty() )
platform::getEnvironmentVar( "FDB_TLS_PASSWORD", policyInfo.keyPassword );
if ( !platform::getEnvironmentVar( "FDB_TLS_KEY_FILE", policyInfo.key_path ) )
policyInfo.key_path = fileExists(defaultCertFileName) ? defaultCertFileName : joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
set_key_file( policyInfo.key_path );
}
if( !verify_peers_set ) {
std::string verifyPeerString;
if (platform::getEnvironmentVar("FDB_TLS_VERIFY_PEERS", verifyPeerString))
set_verify_peers({ verifyPeerString });
std::string verify_peers;
if (platform::getEnvironmentVar("FDB_TLS_VERIFY_PEERS", verify_peers))
set_verify_peers({ verify_peers });
else
set_verify_peers({ std::string("Check.Valid=1")});
}
if (!ca_set) {
std::string caFile;
if (platform::getEnvironmentVar("FDB_TLS_CA_FILE", caFile))
set_ca_file(caFile);
if (platform::getEnvironmentVar("FDB_TLS_CA_FILE", policyInfo.ca_path))
set_ca_file(policyInfo.ca_path);
}
if (!configurationReloader.present()) {
configurationReloader = reloadConfigurationOnChange(&policyInfo, plugin, &policyVerifyPeersSet, &policyVerifyPeersNotSet);
}
Reference<ITLSPolicy> policy;
switch (type) {
case POLICY_VERIFY_PEERS:
policy = policyVerifyPeersSet;
policy = policyVerifyPeersSet.get();
break;
case POLICY_NO_VERIFY_PEERS:
policy = policyVerifyPeersNotSet;
policy = policyVerifyPeersNotSet.get();
break;
default:
ASSERT_ABORT(0);
@ -354,15 +473,15 @@ void TLSOptions::init_plugin() {
throw tls_error();
}
policyVerifyPeersSet = Reference<ITLSPolicy>( plugin->create_policy() );
if ( !policyVerifyPeersSet) {
policyVerifyPeersSet = AsyncVar<Reference<ITLSPolicy>>(Reference<ITLSPolicy>(plugin->create_policy()));
if ( !policyVerifyPeersSet.get()) {
// Hopefully create_policy logged something with the log func
TraceEvent(SevError, "TLSConnectionCreatePolicyVerifyPeersSetError");
throw tls_error();
}
policyVerifyPeersNotSet = Reference<ITLSPolicy>(plugin->create_policy());
if (!policyVerifyPeersNotSet) {
policyVerifyPeersNotSet = AsyncVar<Reference<ITLSPolicy>>(Reference<ITLSPolicy>(plugin->create_policy()));
if (!policyVerifyPeersNotSet.get()) {
// Hopefully create_policy logged something with the log func
TraceEvent(SevError, "TLSConnectionCreatePolicyVerifyPeersNotSetError");
throw tls_error();
@ -370,5 +489,5 @@ void TLSOptions::init_plugin() {
}
bool TLSOptions::enabled() {
return !!policyVerifyPeersSet && !!policyVerifyPeersNotSet;
return policyVerifyPeersSet.get().isValid() && policyVerifyPeersNotSet.get().isValid();
}

View File

@ -65,20 +65,6 @@ struct TLSConnection : IConnection, ReferenceCounted<TLSConnection> {
virtual UID getDebugID() { return uid; }
};
struct TLSListener : IListener, ReferenceCounted<TLSListener> {
Reference<IListener> listener;
Reference<ITLSPolicy> policy;
TLSListener( Reference<ITLSPolicy> policy, Reference<IListener> listener ) : policy(policy), listener(listener) {}
virtual void addref() { ReferenceCounted<TLSListener>::addref(); }
virtual void delref() { ReferenceCounted<TLSListener>::delref(); }
virtual Future<Reference<IConnection>> accept();
virtual NetworkAddress getListenAddress() { return listener->getListenAddress(); }
};
struct TLSOptions : ReferenceCounted<TLSOptions> {
enum { OPT_TLS = 100000, OPT_TLS_PLUGIN, OPT_TLS_CERTIFICATES, OPT_TLS_KEY, OPT_TLS_VERIFY_PEERS, OPT_TLS_CA_FILE, OPT_TLS_PASSWORD };
enum PolicyType { POLICY_VERIFY_PEERS = 1, POLICY_NO_VERIFY_PEERS };
@ -103,14 +89,41 @@ struct TLSOptions : ReferenceCounted<TLSOptions> {
Reference<ITLSPolicy> get_policy(PolicyType type);
bool enabled();
struct PolicyInfo {
std::string ca_path;
Standalone<StringRef> ca_contents;
std::string key_path;
std::string keyPassword;
Standalone<StringRef> key_contents;
std::string cert_path;
Standalone<StringRef> cert_contents;
std::vector<std::string> verify_peers;
};
private:
void init_plugin( );
void init_plugin();
Reference<ITLSPlugin> plugin;
Reference<ITLSPolicy> policyVerifyPeersSet;
Reference<ITLSPolicy> policyVerifyPeersNotSet;
PolicyInfo policyInfo;
AsyncVar<Reference<ITLSPolicy>> policyVerifyPeersSet;
AsyncVar<Reference<ITLSPolicy>> policyVerifyPeersNotSet;
Optional<Future<Void>> configurationReloader;
bool certs_set, key_set, verify_peers_set, ca_set;
std::string keyPassword;
};
struct TLSListener : IListener, ReferenceCounted<TLSListener> {
Reference<IListener> listener;
Reference<TLSOptions> options;
TLSListener( Reference<TLSOptions> options, Reference<IListener> listener ) : options(options), listener(listener) {}
virtual void addref() { ReferenceCounted<TLSListener>::addref(); }
virtual void delref() { ReferenceCounted<TLSListener>::delref(); }
virtual Future<Reference<IConnection>> accept();
virtual NetworkAddress getListenAddress() { return listener->getListenAddress(); }
};
struct TLSNetworkConnections : INetworkConnections {

View File

@ -347,7 +347,7 @@ class throwF2(throwF):
class throwF3(throwF):
def __str__(self):
return indent(self.cx) + "Void _ = wait( error ); // throw operation_failed()\n"
return indent(self.cx) + "wait( error ); // throw operation_failed()\n"
def unreachable(self):
return False # The actor compiler doesn't know that 'error' always contains an error

View File

@ -27,9 +27,9 @@
#elif !defined(FLOW_BATCHER_ACTOR_H)
#define FLOW_BATCHER_ACTOR_H
#include "flow/actorcompiler.h"
#include "flow/flow.h"
#include "flow/Stats.h"
#include "flow/actorcompiler.h" // This must be the last #include.
template <class X>
void logOnReceive(X x) { }
@ -49,7 +49,7 @@ bool firstInBatch(CommitTransactionRequest x) {
ACTOR template <class X>
Future<Void> batcher(PromiseStream<std::pair<std::vector<X>, int> > out, FutureStream<X> in, double avgMinDelay, double* avgMaxDelay, double emptyBatchTimeout, int maxCount, int desiredBytes, int maxBytes, Optional<PromiseStream<Void>> batchStartedStream, int64_t *commitBatchesMemBytesCount, int64_t commitBatchesMemBytesLimit, int taskID = TaskDefaultDelay, Counter* counter = 0)
{
Void _ = wait( delayJittered(*avgMaxDelay, taskID) ); // smooth out
wait( delayJittered(*avgMaxDelay, taskID) ); // smooth out
// This is set up to deliver even zero-size batches if emptyBatchTimeout elapses, because that's what master proxy wants. The source control history
// contains a version that does not.
@ -72,7 +72,7 @@ Future<Void> batcher(PromiseStream<std::pair<std::vector<X>, int> > out, FutureS
// Drop requests if memory is under severe pressure
if (*commitBatchesMemBytesCount + bytes > commitBatchesMemBytesLimit) {
x.reply.sendError(proxy_memory_limit_exceeded());
TraceEvent(SevWarnAlways, "ProxyCommitBatchMemoryThresholdExceeded").detail("CommitBatchesMemBytesCount", *commitBatchesMemBytesCount).detail("CommitBatchesMemLimit", commitBatchesMemBytesLimit).suppressFor(60, true);
TraceEvent(SevWarnAlways, "ProxyCommitBatchMemoryThresholdExceeded").suppressFor(60).detail("CommitBatchesMemBytesCount", *commitBatchesMemBytesCount).detail("CommitBatchesMemLimit", commitBatchesMemBytesLimit);
continue;
}
@ -103,7 +103,7 @@ Future<Void> batcher(PromiseStream<std::pair<std::vector<X>, int> > out, FutureS
batchBytes += bytes;
*commitBatchesMemBytesCount += bytes;
}
when ( Void _ = wait( timeout ) ) {}
when ( wait( timeout ) ) {}
}
}
out.send({std::move(batch), batchBytes});
@ -111,4 +111,6 @@ Future<Void> batcher(PromiseStream<std::pair<std::vector<X>, int> > out, FutureS
}
}
#include "flow/unactorcompiler.h"
#endif

View File

@ -18,15 +18,15 @@
* limitations under the License.
*/
#include <iostream>
#include <algorithm>
#include "flow/FastRef.h"
#undef ERROR
#include "flow/actorcompiler.h"
#include "simulator.h"
#include "ActorFuzz.h"
#include "flow/DeterministicRandom.h"
#include "flow/ThreadHelper.actor.h"
#include <iostream>
#include <algorithm>
#include "flow/actorcompiler.h" // This must be the last #include.
using std::cout;
using std::endl;
@ -378,7 +378,7 @@ void fastAllocTest() {
}
return true;
}));
Void _ = waitForAll( results ).getBlocking();
waitForAll( results ).getBlocking();
t = timer()-t;
cout << "Threaded Allocate/Release TestB interleaved (100): " << results.size() << " x " << (1/t) << "M/sec" << endl;
#endif
@ -463,12 +463,12 @@ Future<Void> threadSafetySender( vector<PromiseT>& v, Event &start, Event &ready
}
ACTOR void threadSafetyWaiter( Future<Void> f, int32_t* count ) {
Void _ = wait(f);
wait(f);
interlockedIncrement(count);
}
ACTOR void threadSafetyWaiter( FutureStream<Void> f, int n, int32_t* count ) {
while (n--) {
Void _ = waitNext(f);
waitNext(f);
interlockedIncrement(count);
}
}
@ -546,7 +546,7 @@ volatile int32_t cancelled = 0, returned = 0;
ACTOR Future<Void> returnCancelRacer( Future<Void> f ) {
try {
Void _ = wait(f);
wait(f);
} catch ( Error& ) {
interlockedIncrement( &cancelled );
throw;
@ -693,7 +693,7 @@ ACTOR Future<Void> actorTest4(bool b) {
if (b)
throw operation_failed();
} catch (...) {
Void _ = wait( delay(1) );
wait( delay(1) );
}
if (now() < tstart + 1)
printf("actorTest4 failed");
@ -783,13 +783,13 @@ ACTOR Future<bool> actorTest9A(Future<Void> setAfterCalling) {
if (count && count!=4) { printf("\nactorTest9 failed\n"); return false; }
loop {
loop {
Void _ = wait( setAfterCalling );
wait( setAfterCalling );
loop {
loop {
count++;
break;
}
Void _ = wait( Future<Void>(Void()) );
wait( Future<Void>(Void()) );
count++;
break;
}
@ -813,7 +813,7 @@ Future<bool> actorTest9() {
ACTOR Future<Void> actorTest10A(FutureStream<int> inputStream, Future<Void> go) {
state int i;
for(i = 0; i < 5; i++) {
Void _ = wait( go );
wait( go );
int input = waitNext( inputStream );
}
return Void();
@ -835,7 +835,7 @@ void actorTest10() {
}
ACTOR Future<Void> cancellable() {
Void _ = wait( Never() );
wait( Never() );
return Void();
}
@ -844,7 +844,7 @@ ACTOR Future<Void> simple() {
}
ACTOR Future<Void> simpleWait() {
Void _ = wait( Future<Void>(Void()) );
wait( Future<Void>(Void()) );
return Void();
}
@ -882,7 +882,7 @@ ACTOR Future<int> chain2( Future<int> x, int i ) {
ACTOR Future<Void> cancellable2() {
try {
Void _ = wait( Never() );
wait( Never() );
return Void();
} catch (Error& e) {
throw;
@ -1015,7 +1015,7 @@ void chainTest() {
ACTOR void cycle(FutureStream<Void> in, PromiseStream<Void> out, int* ptotal){
loop{
Void _ = waitNext(in);
waitNext(in);
(*ptotal)++;
out.send(_);
}
@ -1032,7 +1032,7 @@ ACTOR Future<Void> cycleTime(int nodes, int times){
state double startT = timer();
n[1].send(Void());
loop {
Void _ = waitNext(n[0].getFuture());
waitNext(n[0].getFuture());
if (!--times) break;
n[1].send(Void());
}

View File

@ -25,7 +25,7 @@
#include "flow/flow.h"
#include "FlowTransport.h" // NetworkMessageReceiver Endpoint
#include "FailureMonitor.h"
#include "networksender.actor.h"
struct FlowReceiver : private NetworkMessageReceiver {
// Common endpoint code for NetSAV<> and NetNotifiedQueue<>
@ -367,7 +367,5 @@ void load(Ar& ar, RequestStream<T>& value) {
value = RequestStream<T>(endpoint);
}
#endif
#include "genericactors.actor.g.h"
#include "genericactors.actor.h"

Some files were not shown because too many files have changed in this diff Show More