diff --git a/CMakeLists.txt b/CMakeLists.txt index f6e85984f1..08df8edfe0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,7 +18,7 @@ # limitations under the License. cmake_minimum_required(VERSION 3.13) project(foundationdb - VERSION 7.0.0 + VERSION 7.1.0 DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions." HOMEPAGE_URL "http://www.foundationdb.org/" LANGUAGES C CXX ASM) diff --git a/README.md b/README.md index 44b451c135..9e0ddb78a5 100755 --- a/README.md +++ b/README.md @@ -157,11 +157,11 @@ The build under MacOS will work the same way as on Linux. To get boost and ninja cmake -G Ninja ``` -To generate a installable package, you can use cpack: +To generate a installable package, ```sh ninja -cpack -G productbuild +$SRCDIR/packaging/osx/buildpkg.sh . $SRCDIR ``` ### Windows @@ -171,7 +171,7 @@ that Visual Studio is used to compile. 1. Install Visual Studio 2017 (Community Edition is tested) 1. Install cmake Version 3.12 or higher [CMake](https://cmake.org/) -1. Download version 1.72 of [Boost](https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2) +1. Download version 1.72 of [Boost](https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2) 1. Unpack boost (you don't need to compile it) 1. Install [Mono](http://www.mono-project.com/download/stable/) 1. (Optional) Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8 diff --git a/bindings/CMakeLists.txt b/bindings/CMakeLists.txt index e363695ac2..378ea504b1 100644 --- a/bindings/CMakeLists.txt +++ b/bindings/CMakeLists.txt @@ -1,6 +1,6 @@ -add_subdirectory(c) if(NOT OPEN_FOR_IDE) # flow bindings currently doesn't support that + add_subdirectory(c) add_subdirectory(flow) endif() add_subdirectory(python) diff --git a/bindings/bindingtester/__init__.py b/bindings/bindingtester/__init__.py index f8ad0030e2..17d06cf4fe 100644 --- a/bindings/bindingtester/__init__.py +++ b/bindings/bindingtester/__init__.py @@ -26,7 +26,7 @@ sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', import util -FDB_API_VERSION = 700 +FDB_API_VERSION = 710 LOGGING = { 'version': 1, diff --git a/bindings/bindingtester/bindingtester.py b/bindings/bindingtester/bindingtester.py index 58db70f5db..9c178a09d5 100755 --- a/bindings/bindingtester/bindingtester.py +++ b/bindings/bindingtester/bindingtester.py @@ -157,7 +157,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers api_version = min_version elif random.random() < 0.9: api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430, - 440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700] if v >= min_version and v <= max_version]) + 440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700, 710] if v >= min_version and v <= max_version]) else: api_version = random.randint(min_version, max_version) diff --git a/bindings/bindingtester/known_testers.py b/bindings/bindingtester/known_testers.py index e1522039db..0fe5ad638f 100644 --- a/bindings/bindingtester/known_testers.py +++ b/bindings/bindingtester/known_testers.py @@ -20,7 +20,7 @@ import os -MAX_API_VERSION = 700 +MAX_API_VERSION = 710 COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple'] ALL_TYPES = COMMON_TYPES + ['versionstamp'] diff --git a/bindings/bindingtester/tests/scripted.py b/bindings/bindingtester/tests/scripted.py index c113ebc07f..c250b9d8af 100644 --- a/bindings/bindingtester/tests/scripted.py +++ b/bindings/bindingtester/tests/scripted.py @@ -34,7 +34,7 @@ fdb.api_version(FDB_API_VERSION) class ScriptedTest(Test): - TEST_API_VERSION = 700 + TEST_API_VERSION = 710 def __init__(self, subspace): super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION) diff --git a/bindings/c/fdb_c.cpp b/bindings/c/fdb_c.cpp index 66bb974b71..16fbddf1c9 100644 --- a/bindings/c/fdb_c.cpp +++ b/bindings/c/fdb_c.cpp @@ -19,7 +19,7 @@ */ #include -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #define FDB_INCLUDE_LEGACY_TYPES #include "fdbclient/MultiVersionTransaction.h" diff --git a/bindings/c/foundationdb/fdb_c.h b/bindings/c/foundationdb/fdb_c.h index 4ea59ac11e..81bf10d8a8 100644 --- a/bindings/c/foundationdb/fdb_c.h +++ b/bindings/c/foundationdb/fdb_c.h @@ -27,10 +27,10 @@ #endif #if !defined(FDB_API_VERSION) -#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 700) +#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 710) #elif FDB_API_VERSION < 13 #error API version no longer supported (upgrade to 13) -#elif FDB_API_VERSION > 700 +#elif FDB_API_VERSION > 710 #error Requested API version requires a newer version of this header #endif @@ -97,7 +97,7 @@ typedef struct key { const uint8_t* key; int key_length; } FDBKey; -#if FDB_API_VERSION >= 700 +#if FDB_API_VERSION >= 710 typedef struct keyvalue { const uint8_t* key; int key_length; diff --git a/bindings/c/test/fdb_c90_test.c b/bindings/c/test/fdb_c90_test.c index 1569d98250..bbfb7f6dbf 100644 --- a/bindings/c/test/fdb_c90_test.c +++ b/bindings/c/test/fdb_c90_test.c @@ -1,9 +1,9 @@ -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include int main(int argc, char* argv[]) { (void)argc; (void)argv; - fdb_select_api_version(700); + fdb_select_api_version(710); return 0; } diff --git a/bindings/c/test/mako/mako.c b/bindings/c/test/mako/mako.c index 5ed7ab9a50..ed24ba5a39 100644 --- a/bindings/c/test/mako/mako.c +++ b/bindings/c/test/mako/mako.c @@ -1172,6 +1172,14 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi #endif } + /* Set client Log group */ + if (strlen(args->log_group) != 0) { + err = fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP, (uint8_t*)args->log_group, strlen(args->log_group)); + if (err) { + fprintf(stderr, "ERROR: fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP): %s\n", fdb_get_error(err)); + } + } + /* enable tracing if specified */ if (args->trace) { fprintf(debugme, @@ -1345,6 +1353,7 @@ int init_args(mako_args_t* args) { args->verbose = 1; args->flatbuffers = 0; /* internal */ args->knobs[0] = '\0'; + args->log_group[0] = '\0'; args->trace = 0; args->tracepath[0] = '\0'; args->traceformat = 0; /* default to client's default (XML) */ @@ -1505,6 +1514,7 @@ void usage() { printf("%-24s %s\n", "-m, --mode=MODE", "Specify the mode (build, run, clean)"); printf("%-24s %s\n", "-z, --zipf", "Use zipfian distribution instead of uniform distribution"); printf("%-24s %s\n", " --commitget", "Commit GETs"); + printf("%-24s %s\n", " --loggroup=LOGGROUP", "Set client log group"); printf("%-24s %s\n", " --trace", "Enable tracing"); printf("%-24s %s\n", " --tracepath=PATH", "Set trace file path"); printf("%-24s %s\n", " --trace_format ", "Set trace format (Default: json)"); @@ -1546,6 +1556,7 @@ int parse_args(int argc, char* argv[], mako_args_t* args) { { "verbose", required_argument, NULL, 'v' }, { "mode", required_argument, NULL, 'm' }, { "knobs", required_argument, NULL, ARG_KNOBS }, + { "loggroup", required_argument, NULL, ARG_LOGGROUP }, { "tracepath", required_argument, NULL, ARG_TRACEPATH }, { "trace_format", required_argument, NULL, ARG_TRACEFORMAT }, { "streaming", required_argument, NULL, ARG_STREAMING_MODE }, @@ -1656,6 +1667,9 @@ int parse_args(int argc, char* argv[], mako_args_t* args) { case ARG_KNOBS: memcpy(args->knobs, optarg, strlen(optarg) + 1); break; + case ARG_LOGGROUP: + memcpy(args->log_group, optarg, strlen(optarg) + 1); + break; case ARG_TRACE: args->trace = 1; break; diff --git a/bindings/c/test/mako/mako.h b/bindings/c/test/mako/mako.h index c065b44c13..214e3e6fc6 100644 --- a/bindings/c/test/mako/mako.h +++ b/bindings/c/test/mako/mako.h @@ -3,7 +3,7 @@ #pragma once #ifndef FDB_API_VERSION -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #endif #include @@ -68,6 +68,7 @@ enum Arguments { ARG_VERSION, ARG_KNOBS, ARG_FLATBUFFERS, + ARG_LOGGROUP, ARG_TRACE, ARG_TRACEPATH, ARG_TRACEFORMAT, @@ -97,6 +98,7 @@ typedef struct { int ops[MAX_OP][3]; } mako_txnspec_t; +#define LOGGROUP_MAX 256 #define KNOB_MAX 256 #define TAGPREFIXLENGTH_MAX 8 @@ -122,6 +124,7 @@ typedef struct { int verbose; mako_txnspec_t txnspec; char cluster_file[PATH_MAX]; + char log_group[LOGGROUP_MAX]; int trace; char tracepath[PATH_MAX]; int traceformat; /* 0 - XML, 1 - JSON */ diff --git a/bindings/c/test/performance_test.c b/bindings/c/test/performance_test.c index d2f8655b87..f73f673bcf 100644 --- a/bindings/c/test/performance_test.c +++ b/bindings/c/test/performance_test.c @@ -641,7 +641,7 @@ void runTests(struct ResultSet* rs) { int main(int argc, char** argv) { srand(time(NULL)); struct ResultSet* rs = newResultSet(); - checkError(fdb_select_api_version(700), "select API version", rs); + checkError(fdb_select_api_version(710), "select API version", rs); printf("Running performance test at client version: %s\n", fdb_get_client_version()); valueStr = (uint8_t*)malloc((sizeof(uint8_t)) * valueSize); diff --git a/bindings/c/test/ryw_benchmark.c b/bindings/c/test/ryw_benchmark.c index 8021a1fc9d..98f92208c0 100644 --- a/bindings/c/test/ryw_benchmark.c +++ b/bindings/c/test/ryw_benchmark.c @@ -285,7 +285,7 @@ void runTests(struct ResultSet* rs) { int main(int argc, char** argv) { srand(time(NULL)); struct ResultSet* rs = newResultSet(); - checkError(fdb_select_api_version(700), "select API version", rs); + checkError(fdb_select_api_version(710), "select API version", rs); printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version()); keys = generateKeys(numKeys, keySize); diff --git a/bindings/c/test/test.h b/bindings/c/test/test.h index 1e0622dd3a..0b79e232c6 100644 --- a/bindings/c/test/test.h +++ b/bindings/c/test/test.h @@ -29,7 +29,7 @@ #include #ifndef FDB_API_VERSION -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #endif #include diff --git a/bindings/c/test/txn_size_test.c b/bindings/c/test/txn_size_test.c index ca0261edf2..f1c90cd720 100644 --- a/bindings/c/test/txn_size_test.c +++ b/bindings/c/test/txn_size_test.c @@ -97,7 +97,7 @@ void runTests(struct ResultSet* rs) { int main(int argc, char** argv) { srand(time(NULL)); struct ResultSet* rs = newResultSet(); - checkError(fdb_select_api_version(700), "select API version", rs); + checkError(fdb_select_api_version(710), "select API version", rs); printf("Running performance test at client version: %s\n", fdb_get_client_version()); keys = generateKeys(numKeys, KEY_SIZE); diff --git a/bindings/c/test/unit/fdb_api.hpp b/bindings/c/test/unit/fdb_api.hpp index fc4b3e8e6b..17f25d55ee 100644 --- a/bindings/c/test/unit/fdb_api.hpp +++ b/bindings/c/test/unit/fdb_api.hpp @@ -39,7 +39,7 @@ #pragma once -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include #include diff --git a/bindings/c/test/unit/setup_tests.cpp b/bindings/c/test/unit/setup_tests.cpp index a5109b68f0..602af99845 100644 --- a/bindings/c/test/unit/setup_tests.cpp +++ b/bindings/c/test/unit/setup_tests.cpp @@ -20,7 +20,7 @@ // Unit tests for API setup, network initialization functions from the FDB C API. -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include #include #include @@ -42,13 +42,13 @@ TEST_CASE("setup") { CHECK(err); // Select current API version - fdb_check(fdb_select_api_version(700)); + fdb_check(fdb_select_api_version(710)); // Error to call again after a successful return - err = fdb_select_api_version(700); + err = fdb_select_api_version(710); CHECK(err); - CHECK(fdb_get_max_api_version() >= 700); + CHECK(fdb_get_max_api_version() >= 710); fdb_check(fdb_setup_network()); // Calling a second time should fail diff --git a/bindings/c/test/unit/unit_tests.cpp b/bindings/c/test/unit/unit_tests.cpp index 54f763fb5c..360284e55d 100644 --- a/bindings/c/test/unit/unit_tests.cpp +++ b/bindings/c/test/unit/unit_tests.cpp @@ -20,7 +20,7 @@ // Unit tests for the FoundationDB C API. -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include #include #include @@ -2151,7 +2151,7 @@ int main(int argc, char** argv) { << "Usage: fdb_c_unit_tests /path/to/cluster_file key_prefix [externalClient]" << std::endl; return 1; } - fdb_check(fdb_select_api_version(700)); + fdb_check(fdb_select_api_version(710)); if (argc == 4) { std::string externalClientLibrary = argv[3]; fdb_check(fdb_network_set_option( diff --git a/bindings/c/test/workloads/SimpleWorkload.cpp b/bindings/c/test/workloads/SimpleWorkload.cpp index 6d1adbefdf..2be433b9c1 100644 --- a/bindings/c/test/workloads/SimpleWorkload.cpp +++ b/bindings/c/test/workloads/SimpleWorkload.cpp @@ -18,7 +18,7 @@ * limitations under the License. */ -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include "foundationdb/fdb_c.h" #undef DLLEXPORT #include "workloads.h" @@ -266,7 +266,7 @@ struct SimpleWorkload : FDBWorkload { insertsPerTx = context->getOption("insertsPerTx", 100ul); opsPerTx = context->getOption("opsPerTx", 100ul); runFor = context->getOption("runFor", 10.0); - auto err = fdb_select_api_version(700); + auto err = fdb_select_api_version(710); if (err) { context->trace( FDBSeverity::Info, "SelectAPIVersionFailed", { { "Error", std::string(fdb_get_error(err)) } }); diff --git a/bindings/flow/fdb_flow.actor.cpp b/bindings/flow/fdb_flow.actor.cpp index fc753a6fbe..90e1a68621 100644 --- a/bindings/flow/fdb_flow.actor.cpp +++ b/bindings/flow/fdb_flow.actor.cpp @@ -37,7 +37,7 @@ THREAD_FUNC networkThread(void* fdb) { } ACTOR Future _test() { - API* fdb = FDB::API::selectAPIVersion(700); + API* fdb = FDB::API::selectAPIVersion(710); auto db = fdb->createDatabase(); state Reference tr = db->createTransaction(); @@ -81,7 +81,7 @@ ACTOR Future _test() { } void fdb_flow_test() { - API* fdb = FDB::API::selectAPIVersion(700); + API* fdb = FDB::API::selectAPIVersion(710); fdb->setupNetwork(); startThread(networkThread, fdb); diff --git a/bindings/flow/fdb_flow.h b/bindings/flow/fdb_flow.h index 28eab34e3c..f1b87c16ba 100644 --- a/bindings/flow/fdb_flow.h +++ b/bindings/flow/fdb_flow.h @@ -23,7 +23,7 @@ #include -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include #undef DLLEXPORT diff --git a/bindings/flow/tester/Tester.actor.cpp b/bindings/flow/tester/Tester.actor.cpp index 8a5f5adc26..958ff1a0be 100644 --- a/bindings/flow/tester/Tester.actor.cpp +++ b/bindings/flow/tester/Tester.actor.cpp @@ -1863,7 +1863,7 @@ ACTOR void _test_versionstamp() { try { g_network = newNet2(TLSConfig()); - API* fdb = FDB::API::selectAPIVersion(700); + API* fdb = FDB::API::selectAPIVersion(710); fdb->setupNetwork(); startThread(networkThread, fdb); diff --git a/bindings/go/README.md b/bindings/go/README.md index 8619e1692a..87bf502d36 100644 --- a/bindings/go/README.md +++ b/bindings/go/README.md @@ -9,7 +9,7 @@ This package requires: - [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only) - FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c)) -Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-700. +Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-710. To install this package, you can run the "fdb-go-install.sh" script (for versions 5.0.x and greater): diff --git a/bindings/go/src/fdb/cluster.go b/bindings/go/src/fdb/cluster.go index 5ab17b5273..b5556d93fd 100644 --- a/bindings/go/src/fdb/cluster.go +++ b/bindings/go/src/fdb/cluster.go @@ -22,7 +22,7 @@ package fdb -// #define FDB_API_VERSION 700 +// #define FDB_API_VERSION 710 // #include import "C" diff --git a/bindings/go/src/fdb/database.go b/bindings/go/src/fdb/database.go index 60f3f03d06..0e18ab908c 100644 --- a/bindings/go/src/fdb/database.go +++ b/bindings/go/src/fdb/database.go @@ -22,7 +22,7 @@ package fdb -// #define FDB_API_VERSION 700 +// #define FDB_API_VERSION 710 // #include import "C" diff --git a/bindings/go/src/fdb/doc.go b/bindings/go/src/fdb/doc.go index e1759701ff..2ecf99f200 100644 --- a/bindings/go/src/fdb/doc.go +++ b/bindings/go/src/fdb/doc.go @@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below: func main() { // Different API versions may expose different runtime behaviors. - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) // Open the default database from the system cluster db := fdb.MustOpenDefault() diff --git a/bindings/go/src/fdb/errors.go b/bindings/go/src/fdb/errors.go index 9c9f75b566..9ce11ca150 100644 --- a/bindings/go/src/fdb/errors.go +++ b/bindings/go/src/fdb/errors.go @@ -22,7 +22,7 @@ package fdb -// #define FDB_API_VERSION 700 +// #define FDB_API_VERSION 710 // #include import "C" diff --git a/bindings/go/src/fdb/fdb.go b/bindings/go/src/fdb/fdb.go index bc05a05dba..662951be82 100644 --- a/bindings/go/src/fdb/fdb.go +++ b/bindings/go/src/fdb/fdb.go @@ -22,7 +22,7 @@ package fdb -// #define FDB_API_VERSION 700 +// #define FDB_API_VERSION 710 // #include // #include import "C" @@ -108,7 +108,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error { // library, an error will be returned. APIVersion must be called prior to any // other functions in the fdb package. // -// Currently, this package supports API versions 200 through 700. +// Currently, this package supports API versions 200 through 710. // // Warning: When using the multi-version client API, setting an API version that // is not supported by a particular client library will prevent that client from @@ -116,7 +116,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error { // the API version of your application after upgrading your client until the // cluster has also been upgraded. func APIVersion(version int) error { - headerVersion := 700 + headerVersion := 710 networkMutex.Lock() defer networkMutex.Unlock() @@ -128,7 +128,7 @@ func APIVersion(version int) error { return errAPIVersionAlreadySet } - if version < 200 || version > 700 { + if version < 200 || version > 710 { return errAPIVersionNotSupported } diff --git a/bindings/go/src/fdb/fdb_test.go b/bindings/go/src/fdb/fdb_test.go index e455dba473..d55a3a7d63 100644 --- a/bindings/go/src/fdb/fdb_test.go +++ b/bindings/go/src/fdb/fdb_test.go @@ -32,7 +32,7 @@ import ( func ExampleOpenDefault() { var e error - e = fdb.APIVersion(700) + e = fdb.APIVersion(710) if e != nil { fmt.Printf("Unable to set API version: %v\n", e) return @@ -52,7 +52,7 @@ func ExampleOpenDefault() { } func TestVersionstamp(t *testing.T) { - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) db := fdb.MustOpenDefault() setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) { @@ -98,7 +98,7 @@ func TestVersionstamp(t *testing.T) { } func ExampleTransactor() { - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) db := fdb.MustOpenDefault() setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error { @@ -149,7 +149,7 @@ func ExampleTransactor() { } func ExampleReadTransactor() { - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) db := fdb.MustOpenDefault() getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) { @@ -202,7 +202,7 @@ func ExampleReadTransactor() { } func ExamplePrefixRange() { - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) db := fdb.MustOpenDefault() tr, e := db.CreateTransaction() @@ -241,7 +241,7 @@ func ExamplePrefixRange() { } func ExampleRangeIterator() { - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) db := fdb.MustOpenDefault() tr, e := db.CreateTransaction() diff --git a/bindings/go/src/fdb/futures.go b/bindings/go/src/fdb/futures.go index e51d5eaa8d..35115f8594 100644 --- a/bindings/go/src/fdb/futures.go +++ b/bindings/go/src/fdb/futures.go @@ -23,7 +23,7 @@ package fdb // #cgo LDFLAGS: -lfdb_c -lm -// #define FDB_API_VERSION 700 +// #define FDB_API_VERSION 710 // #include // #include // diff --git a/bindings/go/src/fdb/range.go b/bindings/go/src/fdb/range.go index 584f23cb2b..32155eae45 100644 --- a/bindings/go/src/fdb/range.go +++ b/bindings/go/src/fdb/range.go @@ -22,7 +22,7 @@ package fdb -// #define FDB_API_VERSION 700 +// #define FDB_API_VERSION 710 // #include import "C" diff --git a/bindings/go/src/fdb/transaction.go b/bindings/go/src/fdb/transaction.go index 9c64b06ac7..98bfa86c08 100644 --- a/bindings/go/src/fdb/transaction.go +++ b/bindings/go/src/fdb/transaction.go @@ -22,7 +22,7 @@ package fdb -// #define FDB_API_VERSION 700 +// #define FDB_API_VERSION 710 // #include import "C" diff --git a/bindings/java/CMakeLists.txt b/bindings/java/CMakeLists.txt index 2da8639b8d..09012cdf97 100644 --- a/bindings/java/CMakeLists.txt +++ b/bindings/java/CMakeLists.txt @@ -141,8 +141,6 @@ endif() target_include_directories(fdb_java PRIVATE ${JNI_INCLUDE_DIRS}) # libfdb_java.so is loaded by fdb-java.jar and doesn't need to depened on jvm shared libraries. target_link_libraries(fdb_java PRIVATE fdb_c) -set_target_properties(fdb_java PROPERTIES - LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib/${SYSTEM_NAME}/amd64/) if(APPLE) set_target_properties(fdb_java PROPERTIES SUFFIX ".jnilib") endif() @@ -217,7 +215,11 @@ if(NOT OPEN_FOR_IDE) elseif(APPLE) set(lib_destination "osx/x86_64") else() - set(lib_destination "linux/amd64") + if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + set(lib_destination "linux/aarch64") + else() + set(lib_destination "linux/amd64") + endif() endif() set(lib_destination "${unpack_dir}/lib/${lib_destination}") set(jni_package "${CMAKE_BINARY_DIR}/packages/lib") diff --git a/bindings/java/JavaWorkload.cpp b/bindings/java/JavaWorkload.cpp index 7eaf9527b6..b2506965eb 100644 --- a/bindings/java/JavaWorkload.cpp +++ b/bindings/java/JavaWorkload.cpp @@ -19,7 +19,7 @@ */ #include -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include #include @@ -375,7 +375,7 @@ struct JVM { jmethodID selectMethod = env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;"); checkException(); - auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(700)); + auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(710)); checkException(); env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V")); checkException(); diff --git a/bindings/java/fdbJNI.cpp b/bindings/java/fdbJNI.cpp index 06acae658e..587190d3a5 100644 --- a/bindings/java/fdbJNI.cpp +++ b/bindings/java/fdbJNI.cpp @@ -21,7 +21,7 @@ #include #include -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include diff --git a/bindings/java/src/integration/com/apple/foundationdb/DirectoryTest.java b/bindings/java/src/integration/com/apple/foundationdb/DirectoryTest.java index ddddd20ad1..5634e7d741 100644 --- a/bindings/java/src/integration/com/apple/foundationdb/DirectoryTest.java +++ b/bindings/java/src/integration/com/apple/foundationdb/DirectoryTest.java @@ -42,7 +42,7 @@ import org.junit.jupiter.api.extension.ExtendWith; */ @ExtendWith(RequiresDatabase.class) class DirectoryTest { - private static final FDB fdb = FDB.selectAPIVersion(700); + private static final FDB fdb = FDB.selectAPIVersion(710); @Test void testCanCreateDirectory() throws Exception { diff --git a/bindings/java/src/integration/com/apple/foundationdb/RangeQueryIntegrationTest.java b/bindings/java/src/integration/com/apple/foundationdb/RangeQueryIntegrationTest.java index e7490fd038..8c9dbc049c 100644 --- a/bindings/java/src/integration/com/apple/foundationdb/RangeQueryIntegrationTest.java +++ b/bindings/java/src/integration/com/apple/foundationdb/RangeQueryIntegrationTest.java @@ -41,7 +41,7 @@ import org.junit.jupiter.api.extension.ExtendWith; */ @ExtendWith(RequiresDatabase.class) class RangeQueryIntegrationTest { - private static final FDB fdb = FDB.selectAPIVersion(700); + private static final FDB fdb = FDB.selectAPIVersion(710); @BeforeEach @AfterEach diff --git a/bindings/java/src/integration/com/apple/foundationdb/RequiresDatabase.java b/bindings/java/src/integration/com/apple/foundationdb/RequiresDatabase.java index 803a25ab1c..69537c8a8d 100644 --- a/bindings/java/src/integration/com/apple/foundationdb/RequiresDatabase.java +++ b/bindings/java/src/integration/com/apple/foundationdb/RequiresDatabase.java @@ -80,7 +80,7 @@ public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback { * assume that if we are here, then canRunIntegrationTest() is returning true and we don't have to bother * checking it. */ - try (Database db = FDB.selectAPIVersion(700).open()) { + try (Database db = FDB.selectAPIVersion(710).open()) { db.run(tr -> { CompletableFuture future = tr.get("test".getBytes()); diff --git a/bindings/java/src/junit/com/apple/foundationdb/FDBLibraryRule.java b/bindings/java/src/junit/com/apple/foundationdb/FDBLibraryRule.java index c50899fef9..455cb9c4b6 100644 --- a/bindings/java/src/junit/com/apple/foundationdb/FDBLibraryRule.java +++ b/bindings/java/src/junit/com/apple/foundationdb/FDBLibraryRule.java @@ -37,7 +37,7 @@ public class FDBLibraryRule implements BeforeAllCallback { public FDBLibraryRule(int apiVersion) { this.apiVersion = apiVersion; } - public static FDBLibraryRule current() { return new FDBLibraryRule(700); } + public static FDBLibraryRule current() { return new FDBLibraryRule(710); } public static FDBLibraryRule v63() { return new FDBLibraryRule(630); } diff --git a/bindings/java/src/main/com/apple/foundationdb/FDB.java b/bindings/java/src/main/com/apple/foundationdb/FDB.java index 031a1e2472..1a54e108d5 100644 --- a/bindings/java/src/main/com/apple/foundationdb/FDB.java +++ b/bindings/java/src/main/com/apple/foundationdb/FDB.java @@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger; * This call is required before using any other part of the API. The call allows * an error to be thrown at this point to prevent client code from accessing a later library * with incorrect assumptions from the current version. The API version documented here is version - * {@code 700}.

+ * {@code 710}.

* FoundationDB encapsulates multiple versions of its interface by requiring * the client to explicitly specify the version of the API it uses. The purpose * of this design is to allow you to upgrade the server, client libraries, or @@ -183,8 +183,8 @@ public class FDB { } if(version < 510) throw new IllegalArgumentException("API version not supported (minimum 510)"); - if(version > 700) - throw new IllegalArgumentException("API version not supported (maximum 700)"); + if(version > 710) + throw new IllegalArgumentException("API version not supported (maximum 710)"); Select_API_version(version); singleton = new FDB(version); diff --git a/bindings/java/src/main/com/apple/foundationdb/JNIUtil.java b/bindings/java/src/main/com/apple/foundationdb/JNIUtil.java index 8aa3d9f138..99c2f8a322 100644 --- a/bindings/java/src/main/com/apple/foundationdb/JNIUtil.java +++ b/bindings/java/src/main/com/apple/foundationdb/JNIUtil.java @@ -36,11 +36,7 @@ class JNIUtil { private static final String TEMPFILE_PREFIX = "fdbjni"; private static final String TEMPFILE_SUFFIX = ".library"; - private enum OS { - WIN32("windows", "amd64", false), - LINUX("linux", "amd64", true), - OSX("osx", "x86_64", true); - + private static class OS { private final String name; private final String arch; private final boolean canDeleteEager; @@ -171,13 +167,19 @@ class JNIUtil { private static OS getRunningOS() { String osname = System.getProperty("os.name").toLowerCase(); - if(osname.startsWith("windows")) - return OS.WIN32; - if(osname.startsWith("linux")) - return OS.LINUX; - if(osname.startsWith("mac") || osname.startsWith("darwin")) - return OS.OSX; - throw new IllegalStateException("Unknown or unsupported OS: " + osname); + String arch = System.getProperty("os.arch"); + if (!arch.equals("amd64") && !arch.equals("x86_64") && !arch.equals("aarch64")) { + throw new IllegalStateException("Unknown or unsupported arch: " + arch); + } + if (osname.startsWith("windows")) { + return new OS("windows", arch, /* canDeleteEager */ false); + } else if (osname.startsWith("linux")) { + return new OS("linux", arch, /* canDeleteEager */ true); + } else if (osname.startsWith("mac") || osname.startsWith("darwin")) { + return new OS("osx", arch, /* canDeleteEager */ true); + } else { + throw new IllegalStateException("Unknown or unsupported OS: " + osname); + } } private JNIUtil() {} diff --git a/bindings/java/src/main/overview.html.in b/bindings/java/src/main/overview.html.in index adaedd1a03..fe20448dfb 100644 --- a/bindings/java/src/main/overview.html.in +++ b/bindings/java/src/main/overview.html.in @@ -13,7 +13,7 @@ and then added to your classpath.

Getting started

To start using FoundationDB from Java, create an instance of the {@link com.apple.foundationdb.FDB FoundationDB API interface} with the version of the -API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 700}). +API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 710}). With this API object you can then open {@link com.apple.foundationdb.Cluster Cluster}s and {@link com.apple.foundationdb.Database Database}s and start using {@link com.apple.foundationdb.Transaction Transaction}s. @@ -29,7 +29,7 @@ import com.apple.foundationdb.tuple.Tuple; public class Example { public static void main(String[] args) { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database db = fdb.open()) { // Run an operation on the database diff --git a/bindings/java/src/test/com/apple/foundationdb/test/AbstractTester.java b/bindings/java/src/test/com/apple/foundationdb/test/AbstractTester.java index e27e80b082..8cb1230c2f 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/AbstractTester.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/AbstractTester.java @@ -27,7 +27,7 @@ import com.apple.foundationdb.Database; import com.apple.foundationdb.FDB; public abstract class AbstractTester { - public static final int API_VERSION = 700; + public static final int API_VERSION = 710; protected static final int NUM_RUNS = 25; protected static final Charset ASCII = Charset.forName("ASCII"); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/BlockingBenchmark.java b/bindings/java/src/test/com/apple/foundationdb/test/BlockingBenchmark.java index 68f7d74a95..d9c8c20d23 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/BlockingBenchmark.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/BlockingBenchmark.java @@ -33,7 +33,7 @@ public class BlockingBenchmark { private static final int PARALLEL = 100; public static void main(String[] args) throws InterruptedException { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); // The cluster file DOES NOT need to be valid, although it must exist. // This is because the database is never really contacted in this test. diff --git a/bindings/java/src/test/com/apple/foundationdb/test/ConcurrentGetSetGet.java b/bindings/java/src/test/com/apple/foundationdb/test/ConcurrentGetSetGet.java index bddfd6f57d..046a39f66d 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/ConcurrentGetSetGet.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/ConcurrentGetSetGet.java @@ -48,7 +48,7 @@ public class ConcurrentGetSetGet { } public static void main(String[] args) { - try(Database database = FDB.selectAPIVersion(700).open()) { + try(Database database = FDB.selectAPIVersion(710).open()) { new ConcurrentGetSetGet().apply(database); } } diff --git a/bindings/java/src/test/com/apple/foundationdb/test/Example.java b/bindings/java/src/test/com/apple/foundationdb/test/Example.java index 44e9087b3e..80c35b5ca2 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/Example.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/Example.java @@ -26,7 +26,7 @@ import com.apple.foundationdb.tuple.Tuple; public class Example { public static void main(String[] args) { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database db = fdb.open()) { // Run an operation on the database diff --git a/bindings/java/src/test/com/apple/foundationdb/test/IterableTest.java b/bindings/java/src/test/com/apple/foundationdb/test/IterableTest.java index ce1f623f4c..a9a7a37b66 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/IterableTest.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/IterableTest.java @@ -31,7 +31,7 @@ public class IterableTest { public static void main(String[] args) throws InterruptedException { final int reps = 1000; try { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database db = fdb.open()) { runTests(reps, db); } diff --git a/bindings/java/src/test/com/apple/foundationdb/test/LocalityTests.java b/bindings/java/src/test/com/apple/foundationdb/test/LocalityTests.java index d049ac83f7..a14b466514 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/LocalityTests.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/LocalityTests.java @@ -34,7 +34,7 @@ import com.apple.foundationdb.tuple.ByteArrayUtil; public class LocalityTests { public static void main(String[] args) { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database database = fdb.open(args[0])) { try(Transaction tr = database.createTransaction()) { String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join(); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/ParallelRandomScan.java b/bindings/java/src/test/com/apple/foundationdb/test/ParallelRandomScan.java index 624566964a..a218a6460e 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/ParallelRandomScan.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/ParallelRandomScan.java @@ -43,7 +43,7 @@ public class ParallelRandomScan { private static final int PARALLELISM_STEP = 5; public static void main(String[] args) throws InterruptedException { - FDB api = FDB.selectAPIVersion(700); + FDB api = FDB.selectAPIVersion(710); try(Database database = api.open(args[0])) { for(int i = PARALLELISM_MIN; i <= PARALLELISM_MAX; i += PARALLELISM_STEP) { runTest(database, i, ROWS, DURATION_MS); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/RangeTest.java b/bindings/java/src/test/com/apple/foundationdb/test/RangeTest.java index 4232a6d664..38eaf7b424 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/RangeTest.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/RangeTest.java @@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction; import com.apple.foundationdb.async.AsyncIterable; public class RangeTest { - private static final int API_VERSION = 700; + private static final int API_VERSION = 710; public static void main(String[] args) { System.out.println("About to use version " + API_VERSION); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/SerialInsertion.java b/bindings/java/src/test/com/apple/foundationdb/test/SerialInsertion.java index c16599196c..90adea8ac9 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/SerialInsertion.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/SerialInsertion.java @@ -34,7 +34,7 @@ public class SerialInsertion { private static final int NODES = 1000000; public static void main(String[] args) { - FDB api = FDB.selectAPIVersion(700); + FDB api = FDB.selectAPIVersion(710); try(Database database = api.open()) { long start = System.currentTimeMillis(); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/SerialIteration.java b/bindings/java/src/test/com/apple/foundationdb/test/SerialIteration.java index db63999daa..8e4578d97f 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/SerialIteration.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/SerialIteration.java @@ -39,7 +39,7 @@ public class SerialIteration { private static final int THREAD_COUNT = 1; public static void main(String[] args) throws InterruptedException { - FDB api = FDB.selectAPIVersion(700); + FDB api = FDB.selectAPIVersion(710); try(Database database = api.open(args[0])) { for(int i = 1; i <= THREAD_COUNT; i++) { runThreadedTest(database, i); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/SerialTest.java b/bindings/java/src/test/com/apple/foundationdb/test/SerialTest.java index df084d564f..5b89379350 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/SerialTest.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/SerialTest.java @@ -30,7 +30,7 @@ public class SerialTest { public static void main(String[] args) throws InterruptedException { final int reps = 1000; try { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database db = fdb.open()) { runTests(reps, db); } diff --git a/bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java b/bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java index 78de1ae3db..cb58c3e72d 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java @@ -39,7 +39,7 @@ public class SnapshotTransactionTest { private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges")); public static void main(String[] args) { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database db = fdb.open()) { snapshotReadShouldNotConflict(db); snapshotShouldNotAddConflictRange(db); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/TupleTest.java b/bindings/java/src/test/com/apple/foundationdb/test/TupleTest.java index c3ad8313be..2145b88966 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/TupleTest.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/TupleTest.java @@ -37,7 +37,7 @@ public class TupleTest { public static void main(String[] args) throws NoSuchFieldException { final int reps = 1000; try { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database db = fdb.open()) { runTests(reps, db); } diff --git a/bindings/java/src/test/com/apple/foundationdb/test/VersionstampSmokeTest.java b/bindings/java/src/test/com/apple/foundationdb/test/VersionstampSmokeTest.java index e50bc9c031..6ed02c008b 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/VersionstampSmokeTest.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/VersionstampSmokeTest.java @@ -32,7 +32,7 @@ import com.apple.foundationdb.tuple.Versionstamp; public class VersionstampSmokeTest { public static void main(String[] args) { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database db = fdb.open()) { db.run(tr -> { tr.clear(Tuple.from("prefix").range()); diff --git a/bindings/java/src/test/com/apple/foundationdb/test/WatchTest.java b/bindings/java/src/test/com/apple/foundationdb/test/WatchTest.java index 14c0aa1d43..eb675d1518 100644 --- a/bindings/java/src/test/com/apple/foundationdb/test/WatchTest.java +++ b/bindings/java/src/test/com/apple/foundationdb/test/WatchTest.java @@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction; public class WatchTest { public static void main(String[] args) { - FDB fdb = FDB.selectAPIVersion(700); + FDB fdb = FDB.selectAPIVersion(710); try(Database database = fdb.open(args[0])) { database.options().setLocationCacheSize(42); try(Transaction tr = database.createTransaction()) { diff --git a/bindings/python/fdb/__init__.py b/bindings/python/fdb/__init__.py index c969b6c70c..0054e72808 100644 --- a/bindings/python/fdb/__init__.py +++ b/bindings/python/fdb/__init__.py @@ -52,7 +52,7 @@ def get_api_version(): def api_version(ver): - header_version = 700 + header_version = 710 if '_version' in globals(): if globals()['_version'] != ver: diff --git a/bindings/python/fdb/impl.py b/bindings/python/fdb/impl.py index e8cc2a79b8..d38582b459 100644 --- a/bindings/python/fdb/impl.py +++ b/bindings/python/fdb/impl.py @@ -253,7 +253,7 @@ def transactional(*tr_args, **tr_kwargs): @functools.wraps(func) def wrapper(*args, **kwargs): # We can't throw this from the decorator, as when a user runs - # >>> import fdb ; fdb.api_version(700) + # >>> import fdb ; fdb.api_version(710) # the code above uses @transactional before the API version is set if fdb.get_api_version() >= 630 and inspect.isgeneratorfunction(func): raise ValueError("Generators can not be wrapped with fdb.transactional") diff --git a/bindings/python/tests/size_limit_tests.py b/bindings/python/tests/size_limit_tests.py index 756d9422e0..fdc9cdaf54 100644 --- a/bindings/python/tests/size_limit_tests.py +++ b/bindings/python/tests/size_limit_tests.py @@ -22,7 +22,7 @@ import fdb import sys if __name__ == '__main__': - fdb.api_version(700) + fdb.api_version(710) @fdb.transactional def setValue(tr, key, value): diff --git a/bindings/ruby/lib/fdb.rb b/bindings/ruby/lib/fdb.rb index df8448ea0b..f96c25945a 100644 --- a/bindings/ruby/lib/fdb.rb +++ b/bindings/ruby/lib/fdb.rb @@ -36,7 +36,7 @@ module FDB end end def self.api_version(version) - header_version = 700 + header_version = 710 if self.is_api_version_selected?() if @@chosen_version != version raise "FDB API already loaded at version #{@@chosen_version}." diff --git a/build/cmake/Dockerfile b/build/cmake/Dockerfile index 3f9d51a29a..0452606a1f 100644 --- a/build/cmake/Dockerfile +++ b/build/cmake/Dockerfile @@ -13,7 +13,7 @@ RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.1 cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/ # install boost -RUN curl -L https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_72_0.tar.bz2 > /tmp/boost.tar.bz2 &&\ +RUN curl -L https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\ cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\ sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_72_0/boost /usr/local/include/ &&\ rm -rf boost.tar.bz2 boost_1_72_0 diff --git a/build/cmake/package_tester/fdb_c_app/app.c b/build/cmake/package_tester/fdb_c_app/app.c index f26b2513c1..6fe24068f9 100644 --- a/build/cmake/package_tester/fdb_c_app/app.c +++ b/build/cmake/package_tester/fdb_c_app/app.c @@ -1,7 +1,7 @@ -#define FDB_API_VERSION 700 +#define FDB_API_VERSION 710 #include int main(int argc, char* argv[]) { - fdb_select_api_version(700); + fdb_select_api_version(710); return 0; } diff --git a/build/cmake/package_tester/modules/tests.sh b/build/cmake/package_tester/modules/tests.sh index 35ff098a6f..2495e21a94 100644 --- a/build/cmake/package_tester/modules/tests.sh +++ b/build/cmake/package_tester/modules/tests.sh @@ -65,7 +65,7 @@ then python setup.py install successOr "Installing python bindings failed" popd - python -c 'import fdb; fdb.api_version(700)' + python -c 'import fdb; fdb.api_version(710)' successOr "Loading python bindings failed" # Test cmake and pkg-config integration: https://github.com/apple/foundationdb/issues/1483 diff --git a/build/docker/centos6/build/Dockerfile b/build/docker/centos6/build/Dockerfile index c007626643..0a1fbbd70a 100644 --- a/build/docker/centos6/build/Dockerfile +++ b/build/docker/centos6/build/Dockerfile @@ -22,6 +22,8 @@ RUN sed -i -e '/enabled/d' /etc/yum.repos.d/CentOS-Base.repo && \ curl \ debbuild \ devtoolset-8 \ + devtoolset-8-libasan-devel \ + devtoolset-8-libtsan-devel \ devtoolset-8-libubsan-devel \ devtoolset-8-valgrind-devel \ dos2unix \ @@ -156,7 +158,7 @@ RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocks rm -rf /tmp/* # install boost 1.67 to /opt -RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \ +RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \ echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \ sha256sum -c boost-sha-67.txt && \ tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \ @@ -165,7 +167,7 @@ RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0. # install boost 1.72 to /opt RUN source /opt/rh/devtoolset-8/enable && \ - curl -Ls https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \ + curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \ echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \ sha256sum -c boost-sha-72.txt && \ tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \ diff --git a/build/docker/centos7/build/Dockerfile b/build/docker/centos7/build/Dockerfile index 18773c041a..de376d2557 100644 --- a/build/docker/centos7/build/Dockerfile +++ b/build/docker/centos7/build/Dockerfile @@ -18,6 +18,8 @@ RUN rpmkeys --import mono-project.com.rpmkey.pgp && \ curl \ debbuild \ devtoolset-8 \ + devtoolset-8-libasan-devel \ + devtoolset-8-libtsan-devel \ devtoolset-8-libubsan-devel \ devtoolset-8-systemtap-sdt-devel \ docker-ce \ @@ -139,7 +141,7 @@ RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocks rm -rf /tmp/* # install boost 1.67 to /opt -RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \ +RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \ echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \ sha256sum -c boost-sha-67.txt && \ tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \ @@ -148,7 +150,7 @@ RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0. # install boost 1.72 to /opt RUN source /opt/rh/devtoolset-8/enable && \ - curl -Ls https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \ + curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \ echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \ sha256sum -c boost-sha-72.txt && \ tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \ diff --git a/cmake/CPackConfig.cmake b/cmake/CPackConfig.cmake index 08f90bc0c5..c67059ec65 100644 --- a/cmake/CPackConfig.cmake +++ b/cmake/CPackConfig.cmake @@ -9,24 +9,6 @@ elseif(CPACK_GENERATOR MATCHES "DEB") set(CPACK_COMPONENTS_ALL clients-deb server-deb clients-versioned server-versioned) set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) -elseif(CPACK_GENERATOR MATCHES "productbuild") - set(CPACK_PACKAGING_INSTALL_PREFIX "/") - set(CPACK_COMPONENTS_ALL clients-pm server-pm) - set(CPACK_STRIP_FILES TRUE) - set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) - set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall) - set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) -# Commenting out this readme file until it works within packaging - set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf) - set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources) -# Changing the path of this file as CMAKE_BINARY_DIR does not seem to be defined - set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt) - if(NOT FDB_RELEASE) - set(prerelease_string "-PRERELEASE") - else() - set(prerelease_string "") - endif() - set(CPACK_PACKAGE_FILE_NAME "FoundationDB-${PROJECT_VERSION}${prerelease_string}") elseif(CPACK_GENERATOR MATCHES "TGZ") set(CPACK_STRIP_FILES TRUE) set(CPACK_COMPONENTS_ALL clients-tgz server-tgz) diff --git a/cmake/CompileBoost.cmake b/cmake/CompileBoost.cmake index 0b1cc68502..687c266f0b 100644 --- a/cmake/CompileBoost.cmake +++ b/cmake/CompileBoost.cmake @@ -38,7 +38,7 @@ function(compile_boost) include(ExternalProject) set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install") ExternalProject_add("${MY_TARGET}Project" - URL "https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2" + URL "https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2" URL_HASH SHA256=59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 CONFIGURE_COMMAND ./bootstrap.sh ${BOOTSTRAP_ARGS} BUILD_COMMAND ${B2_COMMAND} link=static ${MY_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install diff --git a/cmake/FDBInstall.cmake b/cmake/FDBInstall.cmake index 263291c433..2dd4be696f 100644 --- a/cmake/FDBInstall.cmake +++ b/cmake/FDBInstall.cmake @@ -214,7 +214,7 @@ endfunction() function(fdb_install) if(NOT WIN32 AND NOT OPEN_FOR_IDE) - set(one_value_options COMPONENT DESTINATION EXPORT DESTINATION_SUFFIX) + set(one_value_options COMPONENT DESTINATION EXPORT DESTINATION_SUFFIX RENAME) set(multi_value_options TARGETS FILES PROGRAMS DIRECTORY) cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}") @@ -237,6 +237,9 @@ function(fdb_install) get_install_dest(${pkg} ${destination} install_path) string(TOLOWER "${pkg}" package) if(install_export) + if(IN_RENAME) + message(FATAL_ERROR "RENAME for EXPORT target not implemented") + endif() install( EXPORT "${IN_EXPORT}-${package}" DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}" @@ -248,11 +251,20 @@ function(fdb_install) set(export_args EXPORT "${IN_EXPORT}-${package}") endif() if(NOT ${install_path} STREQUAL "") - install( - ${args} - ${export_args} - DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}" - COMPONENT "${IN_COMPONENT}-${package}") + if(IN_RENAME) + install( + ${args} + ${export_args} + DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}" + COMPONENT "${IN_COMPONENT}-${package}" + RENAME ${IN_RENAME}) + else() + install( + ${args} + ${export_args} + DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}" + COMPONENT "${IN_COMPONENT}-${package}") + endif() endif() endif() endforeach() diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index a037b65df2..91d39d4125 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -46,10 +46,6 @@ function(install_symlink) TO "../${rel_path}bin/${IN_FILE_NAME}" DESTINATION "usr/lib64/${IN_LINK_NAME}" COMPONENTS "${IN_COMPONENT}-deb") - install_symlink_impl( - TO "../${rel_path}local/bin/${IN_FILE_NAME}" - DESTINATION "usr/lib64/${IN_LINK_NAME}" - COMPONENTS "${IN_COMPONENT}-pm") elseif("${IN_LINK_DIR}" MATCHES "bin") install_symlink_impl( TO "../${rel_path}bin/${IN_FILE_NAME}" @@ -61,10 +57,6 @@ function(install_symlink) COMPONENTS "${IN_COMPONENT}-el6" "${IN_COMPONENT}-el7" "${IN_COMPONENT}-deb") - install_symlink_impl( - TO "../${rel_path}/bin/${IN_FILE_NAME}" - DESTINATION "usr/local/bin/${IN_LINK_NAME}" - COMPONENTS "${IN_COMPONENT}-pm") elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor") install_symlink_impl( TO "../../${rel_path}bin/${IN_FILE_NAME}" @@ -76,10 +68,6 @@ function(install_symlink) COMPONENTS "${IN_COMPONENT}-el6" "${IN_COMPONENT}-el7" "${IN_COMPONENT}-deb") - install_symlink_impl( - TO "../../${rel_path}/bin/${IN_FILE_NAME}" - DESTINATION "usr/local/lib/foundationdb/${IN_LINK_NAME}" - COMPONENTS "${IN_COMPONENT}-pm") else() message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}") endif() @@ -103,8 +91,8 @@ function(symlink_files) endif() endfunction() -fdb_install_packages(TGZ DEB EL7 PM VERSIONED) -fdb_install_dirs(BIN SBIN LIB FDBMONITOR INCLUDE ETC LOG DATA) +fdb_install_packages(TGZ DEB EL7 VERSIONED) +fdb_install_dirs(BIN SBIN LIB FDBMONITOR INCLUDE ETC LOG DATA BACKUPAGENT) message(STATUS "FDB_INSTALL_DIRS -> ${FDB_INSTALL_DIRS}") install_destinations(TGZ @@ -112,6 +100,7 @@ install_destinations(TGZ SBIN sbin LIB lib FDBMONITOR sbin + BACKUPAGENT usr/lib/foundationdb INCLUDE include ETC etc/foundationdb LOG log/foundationdb @@ -122,19 +111,13 @@ install_destinations(DEB SBIN usr/sbin LIB usr/lib FDBMONITOR usr/lib/foundationdb + BACKUPAGENT usr/lib/foundationdb INCLUDE usr/include ETC etc/foundationdb LOG var/log/foundationdb DATA var/lib/foundationdb/data) copy_install_destinations(DEB EL7) install_destinations(EL7 LIB usr/lib64) -install_destinations(PM - BIN usr/local/bin - SBIN usr/local/sbin - LIB lib - FDBMONITOR usr/local/libexec - INCLUDE usr/local/include - ETC usr/local/etc/foundationdb) # This can be used for debugging in case above is behaving funky #print_install_destinations() @@ -142,7 +125,7 @@ install_destinations(PM set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") if(APPLE) - set(CPACK_GENERATOR TGZ productbuild) + set(CPACK_GENERATOR TGZ) else() set(CPACK_GENERATOR RPM DEB TGZ) endif() @@ -212,19 +195,16 @@ set(CPACK_PACKAGE_CONTACT "The FoundationDB Community") set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7) set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb) set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz) -set(CPACK_COMPONENT_SERVER-PM_DEPENDS clients-pm) set(CPACK_COMPONENT_SERVER-VERSIONED_DEPENDS clients-versioned) set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server") set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server") set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server") -set(CPACK_COMPONENT_SERVER-PM_DISPLAY_NAME "foundationdb-server") set(CPACK_COMPONENT_SERVER-VERSIONED_DISPLAY_NAME "foundationdb-server-${PROJECT_VERSION}") set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients") set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients") set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients") -set(CPACK_COMPONENT_CLIENTS-PM_DISPLAY_NAME "foundationdb-clients") set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb-clients-${PROJECT_VERSION}") @@ -382,19 +362,6 @@ set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_CONTROL_EXTRA ${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst ${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm) -################################################################################ -# MacOS configuration -################################################################################ - -if(APPLE) - install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh - DESTINATION "usr/local/foundationdb" - COMPONENT clients-pm) - install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist - DESTINATION "Library/LaunchDaemons" - COMPONENT server-pm) -endif() - ################################################################################ # Configuration for DEB ################################################################################ @@ -413,9 +380,6 @@ set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description") set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description") if(NOT WIN32) - install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new - DESTINATION "usr/local/etc" - COMPONENT server-pm) fdb_install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf DESTINATION etc COMPONENT server) diff --git a/contrib/TestHarness/Program.cs.cmake b/contrib/TestHarness/Program.cs.cmake index 8d666b2725..075a2758d6 100644 --- a/contrib/TestHarness/Program.cs.cmake +++ b/contrib/TestHarness/Program.cs.cmake @@ -717,7 +717,7 @@ namespace SummarizeTest delegate IEnumerable parseDelegate(System.IO.Stream stream, string file, bool keepOriginalElement = false, double startTime = -1, double endTime = Double.MaxValue, - double samplingFactor = 1.0); + double samplingFactor = 1.0, Action nonFatalErrorMessage = null); static int Summarize(string[] traceFiles, string summaryFileName, string errorFileName, bool? killed, List outputErrors, int? exitCode, long? peakMemory, @@ -750,12 +750,14 @@ namespace SummarizeTest { try { + // Use Action to set this because IEnumerables with yield can't have an out variable + string nonFatalParseError = null; parseDelegate parse; if (traceFileName.EndsWith(".json")) parse = Magnesium.JsonParser.Parse; else parse = Magnesium.XmlParser.Parse; - foreach (var ev in parse(traceFile, traceFileName)) + foreach (var ev in parse(traceFile, traceFileName, nonFatalErrorMessage: (x) => { nonFatalParseError = x; })) { Magnesium.Severity newSeverity; if (severityMap.TryGetValue(new KeyValuePair(ev.Type, ev.Severity), out newSeverity)) @@ -876,6 +878,11 @@ namespace SummarizeTest if (ev.Type == "StderrSeverity") stderrSeverity = int.Parse(ev.Details.NewSeverity); } + if (nonFatalParseError != null) { + xout.Add(new XElement("NonFatalParseError", + new XAttribute("Severity", (int)Magnesium.Severity.SevWarnAlways), + new XAttribute("ErrorMessage", nonFatalParseError))); + } } catch (Exception e) diff --git a/contrib/TraceLogHelper/JsonParser.cs b/contrib/TraceLogHelper/JsonParser.cs index 9d7272a37f..84fbab81ab 100644 --- a/contrib/TraceLogHelper/JsonParser.cs +++ b/contrib/TraceLogHelper/JsonParser.cs @@ -1,4 +1,4 @@ -/* +/* * JsonParser.cs * * This source file is part of the FoundationDB open source project @@ -34,9 +34,10 @@ namespace Magnesium { static Random r = new Random(); + // dummy parameter nonFatalParseError to match xml public static IEnumerable Parse(System.IO.Stream stream, string file, bool keepOriginalElement = false, double startTime = -1, double endTime = Double.MaxValue, - double samplingFactor = 1.0) + double samplingFactor = 1.0, Action nonFatalErrorMessage = null) { using (var reader = new System.IO.StreamReader(stream)) { diff --git a/contrib/TraceLogHelper/XmlParser.cs b/contrib/TraceLogHelper/XmlParser.cs index 3728c58c3b..9ab79d920e 100644 --- a/contrib/TraceLogHelper/XmlParser.cs +++ b/contrib/TraceLogHelper/XmlParser.cs @@ -33,14 +33,29 @@ namespace Magnesium public static IEnumerable Parse(System.IO.Stream stream, string file, bool keepOriginalElement = false, double startTime = -1, double endTime = Double.MaxValue, - double samplingFactor = 1.0) + double samplingFactor = 1.0, Action nonFatalErrorMessage = null) { using (var reader = XmlReader.Create(stream)) { reader.ReadToDescendant("Trace"); reader.Read(); - foreach (var xev in StreamElements(reader)) + + // foreach (var xev in StreamElements(reader)) + // need to be able to catch and save non-fatal exceptions in StreamElements, so use explicit iterator instead of foreach + var iter = StreamElements(reader).GetEnumerator(); + while (true) { + try { + if (!iter.MoveNext()) { + break; + } + } catch (Exception e) { + if (nonFatalErrorMessage != null) { + nonFatalErrorMessage(e.Message); + } + break; + } + var xev = iter.Current; Event ev = null; try { @@ -165,28 +180,20 @@ namespace Magnesium } } + // throws exceptions if xml is invalid private static IEnumerable StreamElements(this XmlReader reader) { while (!reader.EOF) { if (reader.NodeType == XmlNodeType.Element) { - XElement node = null; - try - { - node = XElement.ReadFrom(reader) as XElement; - } - catch (Exception) { break; } + XElement node = XElement.ReadFrom(reader) as XElement; if (node != null) yield return node; } else { - try - { reader.Read(); - } - catch (Exception) { break; } } } } diff --git a/contrib/apiversioner.py b/contrib/apiversioner.py new file mode 100755 index 0000000000..9806216671 --- /dev/null +++ b/contrib/apiversioner.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python3 +# +# apiversioner.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2021 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import logging +import os +import re +import sys +import traceback + + +LOG_FORMAT = '%(created)f [%(levelname)s] %(message)s' + +EXCLUDED_FILES = list(map(re.compile, [ + # Output directories + r'\.git/.*', r'bin/.*', r'packages/.*', r'\.objs/.*', r'\.deps/.*', r'bindings/go/build/.*', r'documentation/sphinx/\.out/.*', + + # Generated files + r'.*\.g\.cpp$', r'.*\.g\.h$', r'(^|.*/)generated.mk$', r'.*\.g\.S$', + r'.*/MutationType\.java', r'.*/generated\.go', + + # Binary files + r'.*\.class$', r'.*\.o$', r'.*\.a$', r'.*[\.-]debug', r'.*\.so$', r'.*\.dylib$', r'.*\.dll$', r'.*\.tar[^/]*$', r'.*\.jar$', r'.*pyc$', r'bindings/flow/bin/.*', + r'.*\.pdf$', r'.*\.jp[e]*g', r'.*\.png', r'.*\.ico', + r'packaging/msi/art/.*', + + # Project configuration files + r'.*foundationdb\.VC\.db$', r'.*foundationdb\.VC\.VC\.opendb$', r'.*iml$', + + # Source files from someone else + r'(^|.*/)Hash3\..*', r'(^|.*/)sqlite.*', + r'bindings/go/godoc-resources/.*', + r'bindings/go/src/fdb/tuple/testdata/tuples.golden', + r'fdbcli/linenoise/.*', + r'fdbrpc/rapidjson/.*', r'fdbrpc/rapidxml/.*', r'fdbrpc/zlib/.*', r'fdbrpc/sha1/.*', + r'fdbrpc/xml2json.hpp$', r'fdbrpc/libcoroutine/.*', r'fdbrpc/libeio/.*', r'fdbrpc/lib64/.*', + r'fdbrpc/generated-constants.cpp$', + + # Miscellaneous + r'bindings/nodejs/node_modules/.*', r'bindings/go/godoc/.*', r'.*trace.*xml$', r'.*log$', r'.*\.DS_Store$', r'simfdb/\.*', r'.*~$', r'.*.swp$' +])) + +SUSPECT_PHRASES = map(re.compile, [ + r'#define\s+FDB_API_VERSION\s+(\d+)', + r'\.\s*selectApiVersion\s*\(\s*(\d+)\s*\)', + r'\.\s*APIVersion\s*\(\s*(\d+)\s*\)', + r'\.\s*MustAPIVersion\s*\(\s*(\d+)\s*\)', + r'header_version\s+=\s+(\d+)', + r'\.\s*apiVersion\s*\(\s*(\d+)\s*\)', + r'API_VERSION\s*=\s*(\d+)', + r'fdb_select_api_version\s*\((\d+)\)' +]) + +DIM_CODE = '\033[2m' +BOLD_CODE = '\033[1m' +RED_COLOR = '\033[91m' +GREEN_COLOR = '\033[92m' +END_COLOR = '\033[0m' + + +def positive_response(val): + return val.lower() in {'y', 'yes'} + + +# Returns: new line list + a dirty flag +def rewrite_lines(lines, version_re, new_version, suspect_only=True, print_diffs=False, ask_confirm=False, grayscale=False): + new_lines = [] + dirty = False + new_str = str(new_version) + regexes = SUSPECT_PHRASES if suspect_only else [version_re] + group_index = 1 if suspect_only else 2 + for line_no, line in enumerate(lines): + new_line = line + offset = 0 + + for regex in regexes: + for m in regex.finditer(line): + # Replace suspect code with new version. + start = m.start(group_index) + end = m.end(group_index) + new_line = new_line[:start + offset] + new_str + new_line[end + offset:] + offset += len(new_str) - (end - start) + + if (print_diffs or ask_confirm) and line != new_line: + print('Rewrite:') + print('\n'.join(map(lambda pair: ' {:4d}: {}'.format(line_no - 1 + pair[0], pair[1]), enumerate(lines[line_no - 2:line_no])))) + print((DIM_CODE if grayscale else RED_COLOR) + '-{:4d}: {}'.format(line_no + 1, line) + END_COLOR) + print((BOLD_CODE if grayscale else GREEN_COLOR) + '+{:4d}: {}'.format(line_no + 1, new_line) + END_COLOR) + print('\n'.join(map(lambda pair: ' {:4d}: {}'.format(line_no + 2 + pair[0], pair[1]), enumerate(lines[line_no + 1:line_no + 3])))) + + if ask_confirm: + text = input('Looks good (y/n)? ') + if not positive_response(text): + print('Okay, skipping.') + new_line = line + + dirty = dirty or (new_line != line) + new_lines.append(new_line) + + return new_lines, dirty + + +def address_file(base_path, file_path, version, new_version=None, suspect_only=False, show_diffs=False, + rewrite=False, ask_confirm=True, grayscale=False, paths_only=False): + if any(map(lambda x: x.match(file_path), EXCLUDED_FILES)): + logging.debug('skipping file %s as matches excluded list', file_path) + return True + + # Look for all instances of the version number where it is not part of a larger number + version_re = re.compile('(^|[^\\d])(' + str(version) + ')([^\\d]|$)') + try: + contents = open(os.path.join(base_path, file_path), 'r').read() + lines = contents.split('\n') + new_lines = lines + dirty = False + + if suspect_only: + # Look for suspect lines (lines that attempt to set a version) + found = False + for line_no, line in enumerate(lines): + for suspect_phrase in SUSPECT_PHRASES: + for match in suspect_phrase.finditer(line): + curr_version = int(match.groups()[0]) + if (new_version is None and curr_version < version) or (new_version is not None and curr_version < new_version): + found = True + logging.info('Old version: %s:%d:%s', file_path, line_no + 1, line) + + if found and new_version is not None and (show_diffs or rewrite): + new_lines, dirty = rewrite_lines(lines, version_re, new_version, True, print_diffs=True, + ask_confirm=(rewrite and ask_confirm), grayscale=grayscale) + + else: + matching_lines = filter(lambda pair: version_re.search(pair[1]), enumerate(lines)) + + # Look for lines with the version + if matching_lines: + if paths_only: + logging.info('File %s matches', file_path) + else: + for line_no, line in matching_lines: + logging.info('Match: %s:%d:%s', file_path, line_no + 1, line) + if new_version is not None and (show_diffs or rewrite): + new_lines, dirty = rewrite_lines(lines, version_re, new_version, False, print_diffs=True, + ask_confirm=(rewrite and ask_confirm), grayscale=grayscale) + else: + logging.debug('File %s does not match', file_path) + + if dirty and rewrite: + logging.info('Rewriting %s', os.path.join(base_path, file_path)) + with open(os.path.join(base_path, file_path), 'w') as fout: + fout.write('\n'.join(new_lines)) + + return True + except (OSError, UnicodeDecodeError) as e: + logging.exception('Unable to read file %s due to OSError', os.path.join(base_path, file_path)) + return False + + +def address_path(path, version, new_version=None, suspect_only=False, show_diffs=False, rewrite=False, ask_confirm=True, grayscale=False, paths_only=False): + try: + if os.path.exists(path): + if os.path.isdir(path): + status = True + for dir_path, dir_names, file_names in os.walk(path): + for file_name in file_names: + file_path = os.path.relpath(os.path.join(dir_path, file_name), path) + status = address_file(path, file_path, version, new_version, suspect_only, show_diffs, + rewrite, ask_confirm, grayscale, paths_only) and status + return status + else: + base_name, file_name = os.path.split(path) + return address_file(base_name, file_name, version, new_version, suspect_only, show_diffs, rewrite, ask_confirm, grayscale) + else: + logging.error('Path %s does not exist', path) + return False + except OSError as e: + logging.exception('Unable to find all API versions due to OSError') + return False + + +def run(arg_list): + parser = argparse.ArgumentParser(description='finds and rewrites the API version in FDB source files') + parser.add_argument('path', help='path to search for FDB source files') + parser.add_argument('version', type=int, help='current/old version to search for') + parser.add_argument('--new-version', type=int, default=None, help='new version to update to') + parser.add_argument('--suspect-only', action='store_true', default=False, help='only look for phrases trying to set the API version') + parser.add_argument('--show-diffs', action='store_true', default=False, help='show suggested diffs for fixing version') + parser.add_argument('--rewrite', action='store_true', default=False, help='rewrite offending files') + parser.add_argument('-y', '--skip-confirm', action='store_true', default=False, help='do not ask for confirmation before rewriting') + parser.add_argument('--grayscale', action='store_true', default=False, + help='print diffs using grayscale output instead of red and green') + parser.add_argument('--paths-only', action='store_true', default=False, help='display only the path instead of the offending lines') + args = parser.parse_args(arg_list) + return address_path(args.path, args.version, args.new_version, args.suspect_only, args.show_diffs, + args.rewrite, not args.skip_confirm, args.grayscale, args.paths_only) + + +if __name__ == '__main__': + logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) + if not run(sys.argv[1:]): + exit(1) diff --git a/contrib/grv_proxy_model/grv_test.py b/contrib/grv_proxy_model/grv_test.py new file mode 100755 index 0000000000..1cd0224538 --- /dev/null +++ b/contrib/grv_proxy_model/grv_test.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 + +# +# grv_test.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import inspect +import sys + +import rate_model +import workload_model +import proxy_model +import ratekeeper_model +from priority import Priority +from plot import Plotter + +parser = argparse.ArgumentParser() +parser.add_argument('-w', '--workload', type=str, help='Name of workload to run') +parser.add_argument('-r', '--ratekeeper', type=str, help='Name of ratekeeper model') +parser.add_argument('-d', '--duration', type=int, default=240, help='Duration of simulated test, in seconds. Defaults to 240.') +parser.add_argument('-L', '--limiter', type=str, default='Original', help='Name of limiter implementation. Defaults to \'Original\'.') +parser.add_argument('-p', '--proxy', type=str, default='ProxyModel', help='Name of proxy implementation. Defaults to \'ProxyModel\'.') +parser.add_argument('--list', action='store_true', default=False, help='List options for all models.') +parser.add_argument('--no-graph', action='store_true', default=False, help='Disable graphical output.') + +args = parser.parse_args() + +def print_choices_list(context=None): + if context == 'workload' or context is None: + print('Workloads:') + for w in workload_model.predefined_workloads.keys(): + print(' %s' % w) + + if context == 'ratekeeper' or context is None: + print('\nRatekeeper models:') + for r in ratekeeper_model.predefined_ratekeeper.keys(): + print(' %s' % r) + + proxy_model_classes = [c for c in [getattr(proxy_model, a) for a in dir(proxy_model)] if inspect.isclass(c)] + + if context == 'proxy' or context is None: + print('\nProxy models:') + for p in proxy_model_classes: + if issubclass(p, proxy_model.ProxyModel): + print(' %s' % p.__name__) + + if context == 'limiter' or context is None: + print('\nProxy limiters:') + for p in proxy_model_classes: + if issubclass(p, proxy_model.Limiter) and p != proxy_model.Limiter: + name = p.__name__ + if name.endswith('Limiter'): + name = name[0:-len('Limiter')] + print(' %s' % name) + +if args.workload is None or args.ratekeeper is None: + print('ERROR: A workload (-w/--workload) and ratekeeper model (-r/--ratekeeper) must be specified.\n') + print_choices_list() + sys.exit(1) + +if args.list: + print_choices_list() + sys.exit(0) + +def validate_class_type(var, name, superclass): + cls = getattr(var, name, None) + return cls is not None and inspect.isclass(cls) and issubclass(cls, superclass) + +if not args.ratekeeper in ratekeeper_model.predefined_ratekeeper: + print('Invalid ratekeeper model `%s\'' % args.ratekeeper) + print_choices_list('ratekeeper') + sys.exit(1) + +if not args.workload in workload_model.predefined_workloads: + print('Invalid workload model `%s\'' % args.workload) + print_choices_list('workload') + sys.exit(1) + +if not validate_class_type(proxy_model, args.proxy, proxy_model.ProxyModel): + print('Invalid proxy model `%s\'' % args.proxy) + print_choices_list('proxy') + sys.exit(1) + +limiter_name = args.limiter +if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter): + limiter_name += 'Limiter' + if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter): + print('Invalid proxy limiter `%s\'' % args.limiter) + print_choices_list('limiter') + sys.exit(1) + +ratekeeper = ratekeeper_model.predefined_ratekeeper[args.ratekeeper] +workload = workload_model.predefined_workloads[args.workload] + +limiter = getattr(proxy_model, limiter_name) +proxy = getattr(proxy_model, args.proxy)(args.duration, ratekeeper, workload, limiter) + +proxy.run() + +for priority in workload.priorities(): + latencies = sorted([p for t in proxy.results.latencies[priority].values() for p in t]) + total_started = sum(proxy.results.started[priority].values()) + still_queued = sum([r.count for r in proxy.request_queue if r.priority == priority]) + + if len(latencies) > 0: + print('\n%s: %d requests in %d seconds (rate=%f). %d still queued.' % (priority, total_started, proxy.time, float(total_started)/proxy.time, still_queued)) + print(' Median latency: %f' % latencies[len(latencies)//2]) + print(' 90%% latency: %f' % latencies[int(0.9*len(latencies))]) + print(' 99%% latency: %f' % latencies[int(0.99*len(latencies))]) + print(' 99.9%% latency: %f' % latencies[int(0.999*len(latencies))]) + print(' Max latency: %f' % latencies[-1]) + +print('') + +if not args.no_graph: + plotter = Plotter(proxy.results) + plotter.display() diff --git a/contrib/grv_proxy_model/plot.py b/contrib/grv_proxy_model/plot.py new file mode 100755 index 0000000000..9334e2c844 --- /dev/null +++ b/contrib/grv_proxy_model/plot.py @@ -0,0 +1,107 @@ +# +# plot.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import matplotlib.pyplot as plt + +class Plotter: + def __init__(self, results): + self.results = results + + def add_plot(data, time_resolution, label, use_avg=False): + out_data = {} + counts = {} + for t in data.keys(): + out_data.setdefault(t//time_resolution*time_resolution, 0) + counts.setdefault(t//time_resolution*time_resolution, 0) + out_data[t//time_resolution*time_resolution] += data[t] + counts[t//time_resolution*time_resolution] += 1 + + if use_avg: + out_data = { t: v/counts[t] for t,v in out_data.items() } + + plt.plot(list(out_data.keys()), list(out_data.values()), label=label) + + def add_plot_with_times(data, label): + plt.plot(list(data.keys()), list(data.values()), label=label) + + def display(self, time_resolution=0.1): + plt.figure(figsize=(40,9)) + plt.subplot(3, 3, 1) + for priority in self.results.started.keys(): + Plotter.add_plot(self.results.started[priority], time_resolution, priority) + + plt.xlabel('Time (s)') + plt.ylabel('Released/s') + plt.legend() + + plt.subplot(3, 3, 2) + for priority in self.results.queued.keys(): + Plotter.add_plot(self.results.queued[priority], time_resolution, priority) + + plt.xlabel('Time (s)') + plt.ylabel('Requests/s') + plt.legend() + + plt.subplot(3, 3, 3) + for priority in self.results.unprocessed_queue_sizes.keys(): + data = {k: max(v) for (k,v) in self.results.unprocessed_queue_sizes[priority].items()} + Plotter.add_plot(data, time_resolution, priority) + + plt.xlabel('Time (s)') + plt.ylabel('Max queue size') + plt.legend() + + num = 4 + for priority in self.results.latencies.keys(): + plt.subplot(3, 3, num) + median_latencies = {k: v[int(0.5*len(v))] if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()} + percentile90_latencies = {k: v[int(0.9*len(v))] if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()} + max_latencies = {k: max(v) if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()} + + Plotter.add_plot(median_latencies, time_resolution, 'median') + Plotter.add_plot(percentile90_latencies, time_resolution, '90th percentile') + Plotter.add_plot(max_latencies, time_resolution, 'max') + + plt.xlabel('Time (s)') + plt.ylabel(str(priority) + ' Latency (s)') + plt.yscale('log') + plt.legend() + num += 1 + + for priority in self.results.rate.keys(): + plt.subplot(3, 3, num) + if len(self.results.rate[priority]) > 0: + Plotter.add_plot(self.results.rate[priority], time_resolution, 'Rate', use_avg=True) + if len(self.results.released[priority]) > 0: + Plotter.add_plot(self.results.released[priority], time_resolution, 'Released', use_avg=True) + if len(self.results.limit[priority]) > 0: + Plotter.add_plot(self.results.limit[priority], time_resolution, 'Limit', use_avg=True) + if len(self.results.limit_and_budget[priority]) > 0: + Plotter.add_plot(self.results.limit_and_budget[priority], time_resolution, 'Limit and budget', use_avg=True) + if len(self.results.budget[priority]) > 0: + Plotter.add_plot(self.results.budget[priority], time_resolution, 'Budget', use_avg=True) + + plt.xlabel('Time (s)') + plt.ylabel('Value (' + str(priority) + ')') + plt.legend() + num += 1 + + plt.show() + diff --git a/contrib/grv_proxy_model/priority.py b/contrib/grv_proxy_model/priority.py new file mode 100755 index 0000000000..3ba5c05f2e --- /dev/null +++ b/contrib/grv_proxy_model/priority.py @@ -0,0 +1,40 @@ +# +# priority.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import functools + +@functools.total_ordering +class Priority: + def __init__(self, priority_value, label): + self.priority_value = priority_value + self.label = label + + def __lt__(self, other): + return self.priority_value < other.priority_value + + def __str__(self): + return self.label + + def __repr__(self): + return repr(self.label) + +Priority.SYSTEM = Priority(0, "System") +Priority.DEFAULT = Priority(1, "Default") +Priority.BATCH = Priority(2, "Batch") diff --git a/contrib/grv_proxy_model/proxy_model.py b/contrib/grv_proxy_model/proxy_model.py new file mode 100755 index 0000000000..9ca2a39bfe --- /dev/null +++ b/contrib/grv_proxy_model/proxy_model.py @@ -0,0 +1,338 @@ +# +# proxy_model.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import copy +import functools +import heapq + +from priority import Priority +from smoother import Smoother + +@functools.total_ordering +class Task: + def __init__(self, time, fxn): + self.time = time + self.fxn = fxn + + def __lt__(self, other): + return self.time < other.time + +class Limiter: + class UpdateRateParams: + def __init__(self, time): + self.time = time + + class UpdateLimitParams: + def __init__(self, time, elapsed): + self.time = time + self.elapsed = elapsed + + class CanStartParams: + def __init__(self, time, num_started, count): + self.time = time + self.num_started = num_started + self.count = count + + class UpdateBudgetParams: + def __init__(self, time, num_started, num_started_at_priority, min_priority, last_batch, queue_empty, elapsed): + self.time = time + self.num_started = num_started + self.num_started_at_priority = num_started_at_priority + self.min_priority = min_priority + self.last_batch = last_batch + self.queue_empty = queue_empty + self.elapsed = elapsed + + def __init__(self, priority, ratekeeper_model, proxy_model): + self.priority = priority + self.ratekeeper_model = ratekeeper_model + self.proxy_model = proxy_model + self.limit = 0 + self.rate = self.ratekeeper_model.get_limit(0, self.priority) + + def update_rate(self, params): + pass + + def update_limit(self, params): + pass + + def can_start(self, params): + pass + + def update_budget(self, params): + pass + +class OriginalLimiter(Limiter): + def __init__(self, priority, limit_rate_model, proxy_model): + Limiter.__init__(self, priority, limit_rate_model, proxy_model) + + def update_rate(self, params): + self.rate = self.ratekeeper_model.get_limit(params.time, self.priority) + + def update_limit(self, params): + self.limit = min(0, self.limit) + params.elapsed * self.rate + self.limit = min(self.limit, self.rate * 0.01) + self.limit = min(self.limit, 100000) + + self.proxy_model.results.rate[self.priority][params.time] = self.rate + self.proxy_model.results.limit[self.priority][params.time] = self.limit + + def can_start(self, params): + return params.num_started < self.limit + + def update_budget(self, params): + self.limit -= params.num_started + +class PositiveBudgetLimiter(OriginalLimiter): + def __init__(self, priority, limit_rate_model, proxy_model): + OriginalLimiter.__init__(self, priority, limit_rate_model, proxy_model) + + def update_limit(self, params): + self.limit += params.elapsed * self.rate + self.limit = min(self.limit, 2.0 * self.rate) + +class ClampedBudgetLimiter(PositiveBudgetLimiter): + def __init__(self, priority, limit_rate_model, proxy_model): + PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model) + + def update_budget(self, params): + min_budget = -self.rate * 5.0 + if self.limit > min_budget: + self.limit = max(self.limit - params.num_started, min_budget) + +class TimeLimiter(PositiveBudgetLimiter): + def __init__(self, priority, limit_rate_model, proxy_model): + PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model) + self.locked_until = 0 + + def can_start(self, params): + return params.time >= self.locked_until and PositiveBudgetLimiter.can_start(self, params) + + def update_budget(self, params): + #print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch)) + + if params.min_priority >= self.priority or params.num_started < self.limit: + self.limit -= params.num_started + else: + self.limit = min(self.limit, max(self.limit - params.num_started, -params.last_batch)) + self.locked_until = min(params.time + 2.0, max(params.time, self.locked_until) + (params.num_started - self.limit)/self.rate) + + #print('End update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority)) + +class TimePositiveBudgetLimiter(PositiveBudgetLimiter): + def __init__(self, priority, limit_rate_model, proxy_model): + PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model) + self.locked_until = 0 + + def update_limit(self, params): + if params.time >= self.locked_until: + PositiveBudgetLimiter.update_limit(self, params) + + def can_start(self, params): + return params.num_started + params.count <= self.limit + + def update_budget(self, params): + #if params.num_started > 0: + #print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch)) + + if params.num_started > self.limit: + self.locked_until = min(params.time + 2.0, max(params.time, self.locked_until) + penalty/self.rate) + self.limit = 0 + else: + self.limit -= params.num_started + + #if params.num_started > 0: + #print('End update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority)) + +class SmoothingLimiter(OriginalLimiter): + def __init__(self, priority, limit_rate_model, proxy_model): + OriginalLimiter.__init__(self, priority, limit_rate_model, proxy_model) + self.smooth_released = Smoother(2) + self.smooth_rate_limit = Smoother(2) + self.rate_set = False + + def update_rate(self, params): + OriginalLimiter.update_rate(self, params) + if not self.rate_set: + self.rate_set = True + self.smooth_rate_limit.reset(self.rate) + else: + self.smooth_rate_limit.set_total(params.time, self.rate) + + def update_limit(self, params): + self.limit = 2.0 * (self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time)) + + def can_start(self, params): + return params.num_started + params.count <= self.limit + + def update_budget(self, params): + self.smooth_released.add_delta(params.time, params.num_started) + +class SmoothingBudgetLimiter(SmoothingLimiter): + def __init__(self, priority, limit_rate_model, proxy_model): + SmoothingLimiter.__init__(self, priority, limit_rate_model, proxy_model) + #self.smooth_filled = Smoother(2) + self.budget = 0 + + def update_limit(self, params): + release_rate = (self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time)) + #self.smooth_filled.set_total(params.time, 1 if release_rate > 0 else 0) + self.limit = 2.0 * release_rate + + self.proxy_model.results.rate[self.priority][params.time] = self.smooth_rate_limit.smooth_total(params.time) + self.proxy_model.results.released[self.priority][params.time] = self.smooth_released.smooth_rate(params.time) + self.proxy_model.results.limit[self.priority][params.time] = self.limit + self.proxy_model.results.limit_and_budget[self.priority][params.time] = self.limit + self.budget + self.proxy_model.results.budget[self.priority][params.time] = self.budget + + #self.budget = max(0, self.budget + params.elapsed * self.smooth_rate_limit.smooth_total(params.time)) + + #if self.smooth_filled.smooth_total(params.time) >= 0.1: + #self.budget += params.elapsed * self.smooth_rate_limit.smooth_total(params.time) + + #print('Update limit: time=%f, priority=%s, limit=%f, rate=%f, released=%f, budget=%f' % (params.time, self.priority, self.limit, self.smooth_rate_limit.smooth_total(params.time), self.smooth_released.smooth_rate(params.time), self.budget)) + + def can_start(self, params): + return params.num_started + params.count <= self.limit + self.budget #or params.num_started + params.count <= self.budget + + def update_budget(self, params): + self.budget = max(0, self.budget + (self.limit - params.num_started_at_priority) / 2 * params.elapsed) + + if params.queue_empty: + self.budget = min(10, self.budget) + + self.smooth_released.add_delta(params.time, params.num_started_at_priority) + +class ProxyModel: + class Results: + def __init__(self, priorities, duration): + self.started = self.init_result(priorities, 0, duration) + self.queued = self.init_result(priorities, 0, duration) + self.latencies = self.init_result(priorities, [], duration) + self.unprocessed_queue_sizes = self.init_result(priorities, [], duration) + + self.rate = {p:{} for p in priorities} + self.released = {p:{} for p in priorities} + self.limit = {p:{} for p in priorities} + self.limit_and_budget = {p:{} for p in priorities} + self.budget = {p:{} for p in priorities} + + def init_result(self, priorities, starting_value, duration): + return {p: {s: copy.copy(starting_value) for s in range(0, duration)} for p in priorities} + + def __init__(self, duration, ratekeeper_model, workload_model, Limiter): + self.time = 0 + self.log_time = 0 + self.duration = duration + self.priority_limiters = { priority: Limiter(priority, ratekeeper_model, self) for priority in workload_model.priorities() } + self.workload_model = workload_model + self.request_scheduled = { p: False for p in self.workload_model.priorities()} + + self.tasks = [] + self.request_queue = [] + self.results = ProxyModel.Results(self.workload_model.priorities(), duration) + + def run(self): + self.update_rate() + self.process_requests(self.time) + + for priority in self.workload_model.priorities(): + next_request = self.workload_model.next_request(self.time, priority) + assert next_request is not None + heapq.heappush(self.tasks, Task(next_request.time, lambda next_request=next_request: self.receive_request(next_request))) + self.request_scheduled[priority] = True + + while True:# or len(self.request_queue) > 0: + if int(self.time) > self.log_time: + self.log_time = int(self.time) + #print(self.log_time) + + task = heapq.heappop(self.tasks) + self.time = task.time + if self.time >= self.duration: + break + + task.fxn() + + def update_rate(self): + for limiter in self.priority_limiters.values(): + limiter.update_rate(Limiter.UpdateRateParams(self.time)) + + heapq.heappush(self.tasks, Task(self.time + 0.01, lambda: self.update_rate())) + + def receive_request(self, request): + heapq.heappush(self.request_queue, request) + + self.results.queued[request.priority][int(self.time)] += request.count + + next_request = self.workload_model.next_request(self.time, request.priority) + if next_request is not None and next_request.time < self.duration: + heapq.heappush(self.tasks, Task(next_request.time, lambda: self.receive_request(next_request))) + else: + self.request_scheduled[request.priority] = False + + def process_requests(self, last_time): + elapsed = self.time - last_time + for limiter in self.priority_limiters.values(): + limiter.update_limit(Limiter.UpdateLimitParams(self.time, elapsed)) + + current_started = 0 + started = {p:0 for p in self.workload_model.priorities()} + + min_priority = Priority.SYSTEM + last_batch = 0 + while len(self.request_queue) > 0: + request = self.request_queue[0] + + if not self.priority_limiters[request.priority].can_start(Limiter.CanStartParams(self.time, current_started, request.count)): + break + + min_priority = request.priority + last_batch = request.count + + if self.workload_model.request_completed(request) and not self.request_scheduled[request.priority]: + next_request = self.workload_model.next_request(self.time, request.priority) + assert next_request is not None + heapq.heappush(self.tasks, Task(next_request.time, lambda next_request=next_request: self.receive_request(next_request))) + self.request_scheduled[request.priority] = True + + current_started += request.count + started[request.priority] += request.count + + heapq.heappop(self.request_queue) + self.results.started[request.priority][int(self.time)] += request.count + self.results.latencies[request.priority][int(self.time)].append(self.time-request.time) + + if len(self.request_queue) == 0: + min_priority = Priority.BATCH + + for priority, limiter in self.priority_limiters.items(): + started_at_priority = sum([v for p,v in started.items() if p <= priority]) + limiter.update_budget(Limiter.UpdateBudgetParams(self.time, current_started, started_at_priority, min_priority, last_batch, len(self.request_queue) == 0 or self.request_queue[0].priority > priority, elapsed)) + + for priority in self.workload_model.priorities(): + self.results.unprocessed_queue_sizes[priority][int(self.time)].append(self.workload_model.workload_models[priority].outstanding) + + current_time = self.time + + delay = 0.001 + heapq.heappush(self.tasks, Task(self.time + delay, lambda: self.process_requests(current_time))) + + diff --git a/contrib/grv_proxy_model/rate_model.py b/contrib/grv_proxy_model/rate_model.py new file mode 100755 index 0000000000..1fabce2c7e --- /dev/null +++ b/contrib/grv_proxy_model/rate_model.py @@ -0,0 +1,83 @@ +# +# rate_model.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import numpy + +class RateModel: + def __init__(self): + pass + + def get_rate(self, time): + pass + +class FixedRateModel(RateModel): + def __init__(self, rate): + RateModel.__init__(self) + self.rate = rate + + def get_rate(self, time): + return self.rate + +class UnlimitedRateModel(FixedRateModel): + def __init__(self): + self.rate = 1e9 + +class IntervalRateModel(RateModel): + def __init__(self, intervals): + self.intervals = sorted(intervals) + + def get_rate(self, time): + if len(self.intervals) == 0 or time < self.intervals[0][0]: + return 0 + + target_interval = len(self.intervals)-1 + for i in range(1, len(self.intervals)): + if time < self.intervals[i][0]: + target_interval = i-1 + break + + self.intervals = self.intervals[target_interval:] + return self.intervals[0][1] + +class SawtoothRateModel(RateModel): + def __init__(self, low, high, frequency): + self.low = low + self.high = high + self.frequency = frequency + + def get_rate(self, time): + if int(2*time/self.frequency) % 2 == 0: + return self.low + else: + return self.high + +class DistributionRateModel(RateModel): + def __init__(self, distribution, frequency): + self.distribution = distribution + self.frequency = frequency + self.last_change = 0 + self.rate = None + + def get_rate(self, time): + if self.frequency == 0 or int((time - self.last_change) / self.frequency) > int(self.last_change / self.frequency) or self.rate is None: + self.last_change = time + self.rate = self.distribution() + + return self.rate diff --git a/contrib/grv_proxy_model/ratekeeper_model.py b/contrib/grv_proxy_model/ratekeeper_model.py new file mode 100755 index 0000000000..57125dc4c0 --- /dev/null +++ b/contrib/grv_proxy_model/ratekeeper_model.py @@ -0,0 +1,67 @@ +# +# ratekeeper.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import numpy +import rate_model +from priority import Priority + +class RatekeeperModel: + def __init__(self, limit_models): + self.limit_models = limit_models + + def get_limit(self, time, priority): + return self.limit_models[priority].get_rate(time) + +predefined_ratekeeper = {} + +predefined_ratekeeper['default200_batch100'] = RatekeeperModel( +{ + Priority.SYSTEM: rate_model.UnlimitedRateModel(), + Priority.DEFAULT: rate_model.FixedRateModel(200), + Priority.BATCH: rate_model.FixedRateModel(100) +}) + +predefined_ratekeeper['default_sawtooth'] = RatekeeperModel( +{ + Priority.SYSTEM: rate_model.UnlimitedRateModel(), + Priority.DEFAULT: rate_model.SawtoothRateModel(10, 200, 1), + Priority.BATCH: rate_model.FixedRateModel(0) +}) + +predefined_ratekeeper['default_uniform_random'] = RatekeeperModel( +{ + Priority.SYSTEM: rate_model.UnlimitedRateModel(), + Priority.DEFAULT: rate_model.DistributionRateModel(lambda: numpy.random.uniform(10, 200), 1), + Priority.BATCH: rate_model.FixedRateModel(0) +}) + +predefined_ratekeeper['default_trickle'] = RatekeeperModel( +{ + Priority.SYSTEM: rate_model.UnlimitedRateModel(), + Priority.DEFAULT: rate_model.FixedRateModel(3), + Priority.BATCH: rate_model.FixedRateModel(0) +}) + +predefined_ratekeeper['default1000'] = RatekeeperModel( +{ + Priority.SYSTEM: rate_model.UnlimitedRateModel(), + Priority.DEFAULT: rate_model.FixedRateModel(1000), + Priority.BATCH: rate_model.FixedRateModel(500) +}) diff --git a/contrib/grv_proxy_model/smoother.py b/contrib/grv_proxy_model/smoother.py new file mode 100644 index 0000000000..bc1b32ea12 --- /dev/null +++ b/contrib/grv_proxy_model/smoother.py @@ -0,0 +1,53 @@ +# +# smoother.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import math + +class Smoother: + def __init__(self, folding_time): + self.folding_time = folding_time + self.reset(0) + + def reset(self, value): + self.time = 0 + self.total = value + self.estimate = value + + def set_total(self, time, total): + self.add_delta(time, total-self.total) + + def add_delta(self, time, delta): + self.update(time) + self.total += delta + + def smooth_total(self, time): + self.update(time) + return self.estimate + + def smooth_rate(self, time): + self.update(time) + return (self.total-self.estimate) / self.folding_time + + def update(self, time): + elapsed = time - self.time + if elapsed > 0: + self.time = time + self.estimate += (self.total-self.estimate) * (1-math.exp(-elapsed/self.folding_time)) + diff --git a/contrib/grv_proxy_model/workload_model.py b/contrib/grv_proxy_model/workload_model.py new file mode 100755 index 0000000000..63fb4c472e --- /dev/null +++ b/contrib/grv_proxy_model/workload_model.py @@ -0,0 +1,201 @@ +# +# workload_model.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import functools +import numpy +import math + +import rate_model +from priority import Priority + +@functools.total_ordering +class Request: + def __init__(self, time, count, priority): + self.time = time + self.count = count + self.priority = priority + + def __lt__(self, other): + return self.priority < other.priority + +class PriorityWorkloadModel: + def __init__(self, priority, rate_model, batch_model, generator, max_outstanding=1e9): + self.priority = priority + self.rate_model = rate_model + self.batch_model = batch_model + self.generator = generator + self.max_outstanding = max_outstanding + self.outstanding = 0 + + def next_request(self, time): + if self.outstanding >= self.max_outstanding: + return None + + batch_size = self.batch_model.next_batch() + self.outstanding += batch_size + interval = self.generator.next_request_interval(self.rate_model.get_rate(time)) + return Request(time + interval, batch_size, self.priority) + + def request_completed(self, request): + was_full = self.max_outstanding <= self.outstanding + self.outstanding -= request.count + + return was_full and self.outstanding < self.max_outstanding + +class WorkloadModel: + def __init__(self, workload_models): + self.workload_models = workload_models + + def priorities(self): + return list(self.workload_models.keys()) + + def next_request(self, time, priority): + return self.workload_models[priority].next_request(time) + + def request_completed(self, request): + return self.workload_models[request.priority].request_completed(request) + +class Distribution: + EXPONENTIAL = lambda x: numpy.random.exponential(x) + UNIFORM = lambda x: numpy.random.uniform(0, 2.0*x) + FIXED = lambda x: x + +class BatchGenerator: + def __init__(self): + pass + + def next_batch(self): + pass + +class DistributionBatchGenerator(BatchGenerator): + def __init__(self, distribution, size): + BatchGenerator.__init__(self) + self.distribution = distribution + self.size = size + + def next_batch(self): + return math.ceil(self.distribution(self.size)) + +class RequestGenerator: + def __init__(self): + pass + + def next_request_interval(self, rate): + pass + +class DistributionRequestGenerator(RequestGenerator): + def __init__(self, distribution): + RequestGenerator.__init__(self) + self.distribution = distribution + + def next_request_interval(self, rate): + if rate == 0: + return 1e9 + + return self.distribution(1.0/rate) + +predefined_workloads = {} + +predefined_workloads['slow_exponential'] = WorkloadModel( +{ + Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT, + rate_model.FixedRateModel(100), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.EXPONENTIAL), + max_outstanding=100 + ) +}) + +predefined_workloads['fixed_uniform'] = WorkloadModel( +{ + Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM, + rate_model.FixedRateModel(0), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=10 + ), + Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT, + rate_model.FixedRateModel(95), + DistributionBatchGenerator(Distribution.FIXED, 10), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=200 + ), + Priority.BATCH: PriorityWorkloadModel(Priority.BATCH, + rate_model.FixedRateModel(1), + DistributionBatchGenerator(Distribution.UNIFORM, 500), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=200 + ) +}) + +predefined_workloads['batch_starvation'] = WorkloadModel( +{ + Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM, + rate_model.FixedRateModel(1), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=10 + ), + Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT, + rate_model.IntervalRateModel([(0,50), (60,150), (120,90)]), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=200 + ), + Priority.BATCH: PriorityWorkloadModel(Priority.BATCH, + rate_model.FixedRateModel(100), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=200 + ) +}) + +predefined_workloads['default_low_high_low'] = WorkloadModel( +{ + Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM, + rate_model.FixedRateModel(0), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=10 + ), + Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT, + rate_model.IntervalRateModel([(0,100), (60,300), (120,100)]), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=200 + ), + Priority.BATCH: PriorityWorkloadModel(Priority.BATCH, + rate_model.FixedRateModel(0), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.UNIFORM), + max_outstanding=200 + ) +}) + +for rate in [83, 100, 180, 190, 200]: + predefined_workloads['default%d' % rate] = WorkloadModel( + { + Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT, + rate_model.FixedRateModel(rate), + DistributionBatchGenerator(Distribution.FIXED, 1), + DistributionRequestGenerator(Distribution.EXPONENTIAL), + max_outstanding=1000 + ) + }) diff --git a/design/special-key-space.md b/design/special-key-space.md index 5d22e9d7f3..7cdcfe460d 100644 --- a/design/special-key-space.md +++ b/design/special-key-space.md @@ -20,7 +20,7 @@ Consequently, the special-key-space framework wants to integrate all client func If your feature is exposing information to clients and the results are easily formatted as key-value pairs, then you can use special-key-space to implement your client function. ## How -If you choose to use, you need to implement a function class that inherits from `SpecialKeyRangeReadImpl`, which has an abstract method `Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr)`. +If you choose to use, you need to implement a function class that inherits from `SpecialKeyRangeReadImpl`, which has an abstract method `Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr)`. This method can be treated as a callback, whose implementation details are determined by the developer. Once you fill out the method, register the function class to the corresponding key range. Below is a detailed example. @@ -38,10 +38,10 @@ public: CountryToCapitalCity[LiteralStringRef("China")] = LiteralStringRef("Beijing"); } // Implement the getRange interface - Future> getRange(ReadYourWritesTransaction* ryw, + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override { - Standalone result; + RangeResult result; for (auto const& country : CountryToCapitalCity) { // the registered range here: [\xff\xff/example/, \xff\xff/example/\xff] Key keyWithPrefix = country.first.withPrefix(range.begin); @@ -71,7 +71,7 @@ ASSERT(res1.present() && res.getValue() == LiteralStringRef("Tokyo")); // getRange // Note: for getRange(key1, key2), both key1 and key2 should prefixed with \xff\xff // something like getRange("normal_key", "\xff\xff/...") is not supported yet -Standalone res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff"))); +RangeResult res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff"))); // res2 should contain USA and UK ASSERT( res2.size() == 2 && diff --git a/documentation/sphinx/conf.py b/documentation/sphinx/conf.py index 5ec9238930..ab42fdba6a 100644 --- a/documentation/sphinx/conf.py +++ b/documentation/sphinx/conf.py @@ -49,7 +49,7 @@ master_doc = 'index' # General information about the project. project = u'FoundationDB' -copyright = u'2013-2018 Apple, Inc and the FoundationDB project authors' +copyright = u'2013-2021 Apple, Inc and the FoundationDB project authors' # Load the version information from 'versions.target' import xml.etree.ElementTree as ET diff --git a/documentation/sphinx/source/administration.rst b/documentation/sphinx/source/administration.rst index 5f6369d889..7053a78ca0 100644 --- a/documentation/sphinx/source/administration.rst +++ b/documentation/sphinx/source/administration.rst @@ -799,3 +799,18 @@ Upgrading from Older Versions ----------------------------- Upgrades from versions older than 5.0.0 are no longer supported. + +Version-specific notes on downgrading +===================================== + +In general, downgrades between non-patch releases (i.e. 6.2.x - 6.1.x) are not supported. + +.. _downgrade-specific-version: + +Downgrading from 6.3.13 - 6.2.33 +-------------------------------- +After upgrading from 6.2 to 6.3, the option of rolling back and downgrading to 6.2 is still possible, given that the following conditions are met: + +* The 6.3 cluster cannot have ``TLogVersion`` greater than V4 (6.2). +* The 6.3 cluster cannot use storage engine types that are not ``ssd-1``, ``ssd-2``, or ``memory``. +* The 6.3 cluster must not have any key servers serialized with tag encoding. This condition can only be guaranteed if the ``TAG_ENCODE_KEY_SERVERS`` knob has never been changed to ``true`` on this cluster. diff --git a/documentation/sphinx/source/api-c.rst b/documentation/sphinx/source/api-c.rst index 0d02dc18dd..0acafea8ba 100644 --- a/documentation/sphinx/source/api-c.rst +++ b/documentation/sphinx/source/api-c.rst @@ -133,7 +133,7 @@ API versioning Prior to including ``fdb_c.h``, you must define the ``FDB_API_VERSION`` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. :: - #define FDB_API_VERSION 700 + #define FDB_API_VERSION 710 #include .. function:: fdb_error_t fdb_select_api_version(int version) diff --git a/documentation/sphinx/source/api-common.rst.inc b/documentation/sphinx/source/api-common.rst.inc index 0be8cc30fd..f70e16a5d6 100644 --- a/documentation/sphinx/source/api-common.rst.inc +++ b/documentation/sphinx/source/api-common.rst.inc @@ -148,7 +148,7 @@ .. |atomic-versionstamps-tuple-warning-value| replace:: At this time, versionstamped values are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages. -.. |api-version| replace:: 700 +.. |api-version| replace:: 710 .. |streaming-mode-blurb1| replace:: When using |get-range-func| and similar interfaces, API clients can request large ranges of the database to iterate over. Making such a request doesn't necessarily mean that the client will consume all of the data in the range - sometimes the client doesn't know how far it intends to iterate in advance. FoundationDB tries to balance latency and bandwidth by requesting data for iteration in batches. diff --git a/documentation/sphinx/source/api-python.rst b/documentation/sphinx/source/api-python.rst index 59b82406e0..0cd1e8f078 100644 --- a/documentation/sphinx/source/api-python.rst +++ b/documentation/sphinx/source/api-python.rst @@ -108,7 +108,7 @@ Opening a database After importing the ``fdb`` module and selecting an API version, you probably want to open a :class:`Database` using :func:`open`:: import fdb - fdb.api_version(700) + fdb.api_version(710) db = fdb.open() .. function:: open( cluster_file=None, event_model=None ) diff --git a/documentation/sphinx/source/api-ruby.rst b/documentation/sphinx/source/api-ruby.rst index 7c707f445b..ddb721a0d0 100644 --- a/documentation/sphinx/source/api-ruby.rst +++ b/documentation/sphinx/source/api-ruby.rst @@ -93,7 +93,7 @@ Opening a database After requiring the ``FDB`` gem and selecting an API version, you probably want to open a :class:`Database` using :func:`open`:: require 'fdb' - FDB.api_version 700 + FDB.api_version 710 db = FDB.open .. function:: open( cluster_file=nil ) -> Database diff --git a/documentation/sphinx/source/api-version-upgrade-guide.rst b/documentation/sphinx/source/api-version-upgrade-guide.rst index 83486986a6..707d8e3246 100644 --- a/documentation/sphinx/source/api-version-upgrade-guide.rst +++ b/documentation/sphinx/source/api-version-upgrade-guide.rst @@ -9,6 +9,14 @@ This document provides an overview of changes that an application developer may For more details about API versions, see :ref:`api-versions`. +.. _api-version-upgrade-guide-710: + +API version 710 +=============== + +General +------- + .. _api-version-upgrade-guide-700: API version 700 diff --git a/documentation/sphinx/source/class-scheduling-go.rst b/documentation/sphinx/source/class-scheduling-go.rst index 77d9c01e90..4f505d4931 100644 --- a/documentation/sphinx/source/class-scheduling-go.rst +++ b/documentation/sphinx/source/class-scheduling-go.rst @@ -29,7 +29,7 @@ Before using the API, we need to specify the API version. This allows programs t .. code-block:: go - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file `. @@ -78,7 +78,7 @@ If this is all working, it looks like we are ready to start building a real appl func main() { // Different API versions may expose different runtime behaviors. - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) // Open the default database from the system cluster db := fdb.MustOpenDefault() @@ -666,7 +666,7 @@ Here's the code for the scheduling tutorial: } func main() { - fdb.MustAPIVersion(700) + fdb.MustAPIVersion(710) db := fdb.MustOpenDefault() db.Options().SetTransactionTimeout(60000) // 60,000 ms = 1 minute db.Options().SetTransactionRetryLimit(100) diff --git a/documentation/sphinx/source/class-scheduling-java.rst b/documentation/sphinx/source/class-scheduling-java.rst index c5dda17d55..dec3b23e18 100644 --- a/documentation/sphinx/source/class-scheduling-java.rst +++ b/documentation/sphinx/source/class-scheduling-java.rst @@ -30,7 +30,7 @@ Before using the API, we need to specify the API version. This allows programs t private static final Database db; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); } @@ -66,7 +66,7 @@ If this is all working, it looks like we are ready to start building a real appl private static final Database db; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); } @@ -441,7 +441,7 @@ Here's the code for the scheduling tutorial: private static final Database db; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); db.options().setTransactionTimeout(60000); // 60,000 ms = 1 minute db.options().setTransactionRetryLimit(100); diff --git a/documentation/sphinx/source/class-scheduling-ruby.rst b/documentation/sphinx/source/class-scheduling-ruby.rst index c8d8483aad..f5871578e3 100644 --- a/documentation/sphinx/source/class-scheduling-ruby.rst +++ b/documentation/sphinx/source/class-scheduling-ruby.rst @@ -23,7 +23,7 @@ Open a Ruby interactive interpreter and import the FoundationDB API module:: Before using the API, we need to specify the API version. This allows programs to maintain compatibility even if the API is modified in future versions:: - > FDB.api_version 700 + > FDB.api_version 710 => nil Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file `. :: @@ -46,7 +46,7 @@ If this is all working, it looks like we are ready to start building a real appl .. code-block:: ruby require 'fdb' - FDB.api_version 700 + FDB.api_version 710 @db = FDB.open @db['hello'] = 'world' print 'hello ', @db['hello'] @@ -373,7 +373,7 @@ Here's the code for the scheduling tutorial: require 'fdb' - FDB.api_version 700 + FDB.api_version 710 #################################### ## Initialization ## diff --git a/documentation/sphinx/source/class-scheduling.rst b/documentation/sphinx/source/class-scheduling.rst index 23615a08a6..bdf3c72680 100644 --- a/documentation/sphinx/source/class-scheduling.rst +++ b/documentation/sphinx/source/class-scheduling.rst @@ -30,7 +30,7 @@ Open a Python interactive interpreter and import the FoundationDB API module:: Before using the API, we need to specify the API version. This allows programs to maintain compatibility even if the API is modified in future versions:: - >>> fdb.api_version(700) + >>> fdb.api_version(710) Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file `. :: @@ -48,7 +48,7 @@ When this command returns without exception, the modification is durably stored If this is all working, it looks like we are ready to start building a real application. For reference, here's the full code for "hello world":: import fdb - fdb.api_version(700) + fdb.api_version(710) db = fdb.open() db[b'hello'] = b'world' print 'hello', db[b'hello'] @@ -91,7 +91,7 @@ FoundationDB includes a few tools that make it easy to model data using this app opening a :ref:`directory ` in the database:: import fdb - fdb.api_version(700) + fdb.api_version(710) db = fdb.open() scheduling = fdb.directory.create_or_open(db, ('scheduling',)) @@ -337,7 +337,7 @@ Here's the code for the scheduling tutorial:: import fdb import fdb.tuple - fdb.api_version(700) + fdb.api_version(710) #################################### diff --git a/documentation/sphinx/source/developer-guide.rst b/documentation/sphinx/source/developer-guide.rst index d26f235304..f51db018bb 100644 --- a/documentation/sphinx/source/developer-guide.rst +++ b/documentation/sphinx/source/developer-guide.rst @@ -955,11 +955,27 @@ that process, and wait for necessary data to be moved away. Maintenance mode will be unable to use until the key is cleared, which is the same as the fdbcli command ``datadistribution enable ssfailure``. While the key is set, any commit that tries to set a key in the range will fail with the ``special_keys_api_failure`` error. #. ``\xff\xff/management/data_distribution/`` Read/write. Changing these two keys will change the two corresponding system keys ``\xff/dataDistributionMode`` and ``\xff\x02/rebalanceDDIgnored``. The value of ``\xff\xff/management/data_distribution/mode`` is a literal text of ``0`` (disable) or ``1`` (enable). Transactions committed with invalid values will throw ``special_keys_api_failure`` . The value of ``\xff\xff/management/data_distribution/rebalance_ignored`` is empty. If present, it means data distribution is disabled for rebalance. Any transaction committed with non-empty value for this key will throw ``special_keys_api_failure``. For more details, see help text of ``fdbcli`` command ``datadistribution``. +#. ``\xff\xff/management/consistency_check_suspended`` Read/write. Set or read this key will set or read the underlying system key ``\xff\x02/ConsistencyCheck/Suspend``. The value of this special key is unused thus if present, will be empty. In particular, if the key exists, then consistency is suspended. For more details, see help text of ``fdbcli`` command ``consistencycheck``. +#. ``\xff\xff/management/db_locked`` Read/write. A single key that can be read and modified. Set the key will lock the database and clear the key will unlock. If the database is already locked, then the commit will fail with the ``special_keys_api_failure`` error. For more details, see help text of ``fdbcli`` command ``lock`` and ``unlock``. +#. ``\xff\xff/management/auto_coordinators`` Read-only. A single key, if read, will return a set of processes which is able to satisfy the current redundency level and serve as new coordinators. The return value is formatted as a comma delimited string of network addresses of coordinators, i.e. ``,,...,``. An exclusion is syntactically either an ip address (e.g. ``127.0.0.1``), or an ip address and port (e.g. ``127.0.0.1:4500``). If no port is specified, then all processes on that host match the exclusion. +Configuration module +~~~~~~~~~~~~~~~~~~~~ + +The configuration module is for changing the cluster configuration. +For example, you can change a process type or update coordinators by manipulating related special keys through transactions. + +#. ``\xff\xff/configuration/process/class_type/
:= `` Read/write. Reading keys in the range will retrieve processes' class types. Setting keys in the range will update processes' class types. The process matching ``
`` will be assigned to the given class type if the commit is successful. The valid class types are ``storage``, ``transaction``, ``resolution``, etc. A full list of class type can be found via ``fdbcli`` command ``help setclass``. Clearing keys is forbidden in the range. Instead, you can set the type as ``default``, which will clear the assigned class type if existing. For more details, see help text of ``fdbcli`` command ``setclass``. +#. ``\xff\xff/configuration/process/class_source/
:= `` Read-only. Reading keys in the range will retrieve processes' class source. The class source is one of ``command_line``, ``configure_auto``, ``set_class`` and ``invalid``, indicating the source that the process's class type comes from. +#. ``\xff\xff/configuration/coordinators/processes := ,,...,`` Read/write. A single key, if read, will return a comma delimited string of coordinators's network addresses. Thus to provide a new set of cooridinators, set the key with a correct formatted string of new coordinators' network addresses. As there's always the need to have coordinators, clear on the key is forbidden and a transaction will fail with the ``special_keys_api_failure`` error if the clear is committed. For more details, see help text of ``fdbcli`` command ``coordinators``. +#. ``\xff\xff/configuration/coordinators/cluster_description := `` Read/write. A single key, if read, will return the cluster description. Thus modifying the key will update the cluster decription. The new description needs to match ``[A-Za-z0-9_]+``, otherwise, the ``special_keys_api_failure`` error will be thrown. In addition, clear on the key is meaningless thus forbidden. For more details, see help text of ``fdbcli`` command ``coordinators``. + +The ``
`` here is the network address of the corresponding process. Thus the general form is ``ip:port``. + Error message module ~~~~~~~~~~~~~~~~~~~~ diff --git a/documentation/sphinx/source/hierarchical-documents-java.rst b/documentation/sphinx/source/hierarchical-documents-java.rst index db33abd4ef..b9869afd01 100644 --- a/documentation/sphinx/source/hierarchical-documents-java.rst +++ b/documentation/sphinx/source/hierarchical-documents-java.rst @@ -69,7 +69,7 @@ Here’s a basic implementation of the recipe. private static final long EMPTY_ARRAY = -1; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); docSpace = new Subspace(Tuple.from("D")); } diff --git a/documentation/sphinx/source/mr-status-json-schemas.rst.inc b/documentation/sphinx/source/mr-status-json-schemas.rst.inc index 8cbd5d9a9f..7979331898 100644 --- a/documentation/sphinx/source/mr-status-json-schemas.rst.inc +++ b/documentation/sphinx/source/mr-status-json-schemas.rst.inc @@ -121,6 +121,16 @@ "counter":0, "roughness":0.0 }, + "fetched_versions":{ + "hz":0.0, + "counter":0, + "roughness":0.0 + }, + "fetches_from_logs":{ + "hz":0.0, + "counter":0, + "roughness":0.0 + }, "grv_latency_statistics":{ // GRV Latency metrics are grouped according to priority (currently batch or default). "default":{ "count":0, @@ -171,6 +181,18 @@ "p99":0.0, "p99.9":0.0 }, + "commit_batching_window_size":{ + "count":0, + "min":0.0, + "max":0.0, + "median":0.0, + "mean":0.0, + "p25":0.0, + "p90":0.0, + "p95":0.0, + "p99":0.0, + "p99.9":0.0 + }, "grv_latency_bands":{ // How many GRV requests belong to the latency (in seconds) band (e.g., How many requests belong to [0.01,0.1] latency band). The key is the upper bound of the band and the lower bound is the next smallest band (or 0, if none). Example: {0.01: 27, 0.1: 18, 1: 1, inf: 98,filtered: 10}, we have 18 requests in [0.01, 0.1) band. "$map_key=upperBoundOfBand": 1 }, @@ -592,6 +614,10 @@ "data_distribution_disabled_for_rebalance":true, "data_distribution_disabled":true, "active_primary_dc":"pv", + "bounce_impact":{ + "can_clean_bounce":true, + "reason":"" + }, "configuration":{ "log_anti_quorum":0, "log_replicas":2, diff --git a/documentation/sphinx/source/multimaps-java.rst b/documentation/sphinx/source/multimaps-java.rst index 3c9a46ad3c..d321a8a0aa 100644 --- a/documentation/sphinx/source/multimaps-java.rst +++ b/documentation/sphinx/source/multimaps-java.rst @@ -74,7 +74,7 @@ Here’s a simple implementation of multimaps with multisets as described: private static final int N = 100; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); multi = new Subspace(Tuple.from("M")); } diff --git a/documentation/sphinx/source/priority-queues-java.rst b/documentation/sphinx/source/priority-queues-java.rst index 0fafb08b4b..37476a3663 100644 --- a/documentation/sphinx/source/priority-queues-java.rst +++ b/documentation/sphinx/source/priority-queues-java.rst @@ -74,7 +74,7 @@ Here's a basic implementation of the model: private static final Random randno; static{ - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); pq = new Subspace(Tuple.from("P")); diff --git a/documentation/sphinx/source/queues-java.rst b/documentation/sphinx/source/queues-java.rst index b4b60df48b..033f0df88a 100644 --- a/documentation/sphinx/source/queues-java.rst +++ b/documentation/sphinx/source/queues-java.rst @@ -73,7 +73,7 @@ The following is a simple implementation of the basic pattern: private static final Random randno; static{ - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); queue = new Subspace(Tuple.from("Q")); randno = new Random(); diff --git a/documentation/sphinx/source/release-notes/release-notes-620.rst b/documentation/sphinx/source/release-notes/release-notes-620.rst index 3148eefa97..3e388a8129 100644 --- a/documentation/sphinx/source/release-notes/release-notes-620.rst +++ b/documentation/sphinx/source/release-notes/release-notes-620.rst @@ -8,6 +8,7 @@ Release Notes * Fix backup agent stall when writing to local filesystem with slow metadata operations. `(PR #4428) `_ * Backup agent no longer uses 4k block caching layer on local output files so that write operations are larger. `(PR #4428) `_ * Fix accounting error that could cause commits to incorrectly fail with ``proxy_memory_limit_exceeded``. `(PR #4529) `_ +* Added support for downgrades from FDB version 6.3. For more details, see the :ref:`administration notes `. `(PR #4673) `_ `(PR #4469) `_ 6.2.32 ====== diff --git a/documentation/sphinx/source/release-notes/release-notes-630.rst b/documentation/sphinx/source/release-notes/release-notes-630.rst index f4b5c8aacb..2057e7fcb2 100644 --- a/documentation/sphinx/source/release-notes/release-notes-630.rst +++ b/documentation/sphinx/source/release-notes/release-notes-630.rst @@ -2,9 +2,11 @@ Release Notes ############# + 6.3.13 ====== * The multi-version client now requires at most two client connections with version 6.2 or larger, regardless of how many external clients are configured. Clients older than 6.2 will continue to create an additional connection each. `(PR #4667) `_ +* Fix an accounting error that could potentially result in inaccuracies in priority busyness metrics. `(PR #4824) `_ 6.3.12 ====== diff --git a/documentation/sphinx/source/release-notes/release-notes-700.rst b/documentation/sphinx/source/release-notes/release-notes-700.rst index 84e8f0680a..ea78b9a10b 100644 --- a/documentation/sphinx/source/release-notes/release-notes-700.rst +++ b/documentation/sphinx/source/release-notes/release-notes-700.rst @@ -30,8 +30,10 @@ Fixes Status ------ - - +* Added ``commit_batching_window_size`` to the proxy roles section of status to record statistics about commit batching window size on each proxy. `(PR #4735) `_ +* Added ``cluster.bounce_impact`` section to status to report if there will be any extra effects when bouncing the cluster, and if so, the reason for those effects. `(PR #4770) `_ +* Added ``fetched_versions`` to the storage metrics section of status to report how fast a storage server is catching up in versions. `(PR #4770) `_ +* Added ``fetches_from_logs`` to the storage metrics section of status to report how frequently a storage server fetches updates from transaction logs. `(PR #4770) `_ Bindings -------- diff --git a/documentation/sphinx/source/simple-indexes-java.rst b/documentation/sphinx/source/simple-indexes-java.rst index c5edf02e71..61769ea847 100644 --- a/documentation/sphinx/source/simple-indexes-java.rst +++ b/documentation/sphinx/source/simple-indexes-java.rst @@ -87,7 +87,7 @@ In this example, we’re storing user data based on user ID but sometimes need t private static final Subspace index; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); main = new Subspace(Tuple.from("user")); index = new Subspace(Tuple.from("zipcode_index")); diff --git a/documentation/sphinx/source/tables-java.rst b/documentation/sphinx/source/tables-java.rst index 235dbd5b47..14cd0348ca 100644 --- a/documentation/sphinx/source/tables-java.rst +++ b/documentation/sphinx/source/tables-java.rst @@ -62,7 +62,7 @@ Here’s a simple implementation of the basic table pattern: private static final Subspace colIndex; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); table = new Subspace(Tuple.from("T")); rowIndex = table.subspace(Tuple.from("R")); diff --git a/documentation/sphinx/source/vector-java.rst b/documentation/sphinx/source/vector-java.rst index 17da6ebed8..4341948316 100644 --- a/documentation/sphinx/source/vector-java.rst +++ b/documentation/sphinx/source/vector-java.rst @@ -77,7 +77,7 @@ Here’s the basic pattern: private static final Subspace vector; static { - fdb = FDB.selectAPIVersion(700); + fdb = FDB.selectAPIVersion(710); db = fdb.open(); vector = new Subspace(Tuple.from("V")); } diff --git a/documentation/tutorial/tutorial.actor.cpp b/documentation/tutorial/tutorial.actor.cpp index dfc684e922..29b462c366 100644 --- a/documentation/tutorial/tutorial.actor.cpp +++ b/documentation/tutorial/tutorial.actor.cpp @@ -366,7 +366,7 @@ ACTOR Future fdbClient() { // 3. write 10 values in [k, k+100] beginIdx = deterministicRandom()->randomInt(0, 1e8 - 100); startKey = keyPrefix + std::to_string(beginIdx); - Standalone range = wait(tx.getRange(KeyRangeRef(startKey, endKey), 100)); + RangeResult range = wait(tx.getRange(KeyRangeRef(startKey, endKey), 100)); for (int i = 0; i < 10; ++i) { Key k = Key(keyPrefix + std::to_string(beginIdx + deterministicRandom()->randomInt(0, 100))); tx.set(k, LiteralStringRef("foo")); diff --git a/fdbbackup/CMakeLists.txt b/fdbbackup/CMakeLists.txt index 48b1ad1aef..da2457b850 100644 --- a/fdbbackup/CMakeLists.txt +++ b/fdbbackup/CMakeLists.txt @@ -23,14 +23,14 @@ target_link_libraries(fdbdecode PRIVATE fdbclient) if(NOT OPEN_FOR_IDE) if(GENERATE_DEBUG_PACKAGES) fdb_install(TARGETS fdbbackup DESTINATION bin COMPONENT clients) - fdb_install(PROGRAMS $ DESTINATION fdbmonitor COMPONENT clients RENAME backup_agent/backup_agent) + fdb_install(PROGRAMS $ DESTINATION backupagent COMPONENT clients RENAME backup_agent/backup_agent) fdb_install(PROGRAMS $ DESTINATION bin COMPONENT clients RENAME fdbrestore) fdb_install(PROGRAMS $ DESTINATION bin COMPONENT clients RENAME dr_agent) fdb_install(PROGRAMS $ DESTINATION bin COMPONENT clients RENAME fdbdr) else() add_custom_target(prepare_fdbbackup_install ALL DEPENDS strip_only_fdbbackup) fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/packages/bin/fdbbackup DESTINATION bin COMPONENT clients) - fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/packages/bin/fdbbackup DESTINATION fdbmonitor COMPONENT clients RENAME backup_agent/backup_agent) + fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/packages/bin/fdbbackup DESTINATION backupagent COMPONENT clients RENAME backup_agent/backup_agent) fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/packages/bin/fdbbackup DESTINATION bin COMPONENT clients RENAME fdbrestore) fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/packages/bin/fdbbackup DESTINATION bin COMPONENT clients RENAME dr_agent) fdb_install(PROGRAMS ${CMAKE_BINARY_DIR}/packages/bin/fdbbackup DESTINATION bin COMPONENT clients RENAME fdbdr) diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index f976de06a6..77e4b03f0d 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -1576,7 +1576,7 @@ ACTOR Future getLayerStatus(Reference tr state Reference tr2(new ReadYourWritesTransaction(dest)); tr2->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr2->setOption(FDBTransactionOptions::LOCK_AWARE); - state Standalone tagNames = wait(tr2->getRange(dba.tagNames.range(), 10000, snapshot)); + state RangeResult tagNames = wait(tr2->getRange(dba.tagNames.range(), 10000, snapshot)); state std::vector>> backupVersion; state std::vector> backupStatus; state std::vector> tagRangeBytesDR; @@ -1638,7 +1638,7 @@ ACTOR Future cleanupStatus(Reference tr, std::string name, std::string id, int limit = 1) { - state Standalone docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true)); + state RangeResult docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true)); state bool readMore = false; state int i; for (i = 0; i < docs.size(); ++i) { @@ -1667,7 +1667,7 @@ ACTOR Future cleanupStatus(Reference tr, } if (readMore) { limit = 10000; - Standalone docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true)); + RangeResult docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true)); docs = std::move(docs2); readMore = false; } @@ -1684,7 +1684,7 @@ ACTOR Future getLayerStatus(Database src, std::string root try { tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr.setOption(FDBTransactionOptions::LOCK_AWARE); - state Standalone kvPairs = + state RangeResult kvPairs = wait(tr.getRange(KeyRangeRef(rootKey, strinc(rootKey)), GetRangeLimits::ROW_LIMIT_UNLIMITED)); json_spirit::mObject statusDoc; JSONDoc modifier(statusDoc); @@ -4246,4 +4246,4 @@ int main(int argc, char* argv[]) { } flushAndExit(status); -} \ No newline at end of file +} diff --git a/fdbcli/CMakeLists.txt b/fdbcli/CMakeLists.txt index 2b65baf040..d0cab5b178 100644 --- a/fdbcli/CMakeLists.txt +++ b/fdbcli/CMakeLists.txt @@ -1,7 +1,10 @@ set(FDBCLI_SRCS fdbcli.actor.cpp + fdbcli.actor.h + ConsistencyCheckCommand.actor.cpp FlowLineNoise.actor.cpp FlowLineNoise.h + Util.cpp linenoise/linenoise.h) if(NOT WIN32) diff --git a/fdbcli/ConsistencyCheckCommand.actor.cpp b/fdbcli/ConsistencyCheckCommand.actor.cpp new file mode 100644 index 0000000000..892acbb239 --- /dev/null +++ b/fdbcli/ConsistencyCheckCommand.actor.cpp @@ -0,0 +1,63 @@ +/* + * ConsistencyCheckCommand.actor.cpp + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2021 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "fdbcli/fdbcli.actor.h" + +#include "fdbclient/FDBOptions.g.h" +#include "fdbclient/IClientApi.h" + +#include "flow/Arena.h" +#include "flow/FastRef.h" +#include "flow/ThreadHelper.actor.h" +#include "flow/actorcompiler.h" // This must be the last #include. + +namespace fdb_cli { + +const KeyRef consistencyCheckSpecialKey = LiteralStringRef("\xff\xff/management/consistency_check_suspended"); + +ACTOR Future consistencyCheckCommandActor(Reference tr, std::vector tokens) { + // Here we do not proceed in a try-catch loop since the transaction is always supposed to succeed. + // If not, the outer loop catch block(fdbcli.actor.cpp) will handle the error and print out the error message + tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES); + if (tokens.size() == 1) { + Optional suspended = wait(safeThreadFutureToFuture(tr->get(consistencyCheckSpecialKey))); + printf("ConsistencyCheck is %s\n", suspended.present() ? "off" : "on"); + } else if (tokens.size() == 2 && tokencmp(tokens[1], "off")) { + tr->set(consistencyCheckSpecialKey, Value()); + wait(safeThreadFutureToFuture(tr->commit())); + } else if (tokens.size() == 2 && tokencmp(tokens[1], "on")) { + tr->clear(consistencyCheckSpecialKey); + wait(safeThreadFutureToFuture(tr->commit())); + } else { + printUsage(tokens[0]); + return false; + } + return true; +} + +CommandFactory consistencyCheckFactory( + "consistencycheck", + CommandHelp( + "consistencycheck [on|off]", + "permits or prevents consistency checking", + "Calling this command with `on' permits consistency check processes to run and `off' will halt their checking. " + "Calling this command with no arguments will display if consistency checking is currently allowed.\n")); + +} // namespace fdb_cli diff --git a/flow/ThreadHelper.cpp b/fdbcli/Util.cpp similarity index 50% rename from flow/ThreadHelper.cpp rename to fdbcli/Util.cpp index fe61752ea5..f67f27c774 100644 --- a/flow/ThreadHelper.cpp +++ b/fdbcli/Util.cpp @@ -1,9 +1,9 @@ /* - * ThreadHelper.cpp + * Util.cpp * * This source file is part of the FoundationDB open source project * - * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * Copyright 2013-2021 Apple Inc. and the FoundationDB project authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,8 +18,26 @@ * limitations under the License. */ -#include "flow/ThreadHelper.actor.h" +#include "fdbcli/fdbcli.actor.h" -ThreadCallback* ThreadCallback::addCallback(ThreadCallback* cb) { - return (new ThreadMultiCallback())->addCallback(this)->addCallback(cb); +#include "flow/Arena.h" + +namespace fdb_cli { + +bool tokencmp(StringRef token, const char* command) { + if (token.size() != strlen(command)) + return false; + + return !memcmp(token.begin(), command, token.size()); } + +void printUsage(StringRef command) { + const auto& helpMap = CommandFactory::commands(); + auto i = helpMap.find(command.toString()); + if (i != helpMap.end()) + printf("Usage: %s\n", i->second.usage.c_str()); + else + fprintf(stderr, "ERROR: Unknown command `%s'\n", command.toString().c_str()); +} + +} // namespace fdb_cli diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index 9eb5f974a4..8291ee6cc3 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -21,6 +21,8 @@ #include "boost/lexical_cast.hpp" #include "fdbclient/NativeAPI.actor.h" #include "fdbclient/FDBTypes.h" +#include "fdbclient/IClientApi.h" +#include "fdbclient/MultiVersionTransaction.h" #include "fdbclient/Status.h" #include "fdbclient/StatusClient.h" #include "fdbclient/DatabaseContext.h" @@ -34,6 +36,7 @@ #include "fdbclient/FDBOptions.g.h" #include "fdbclient/TagThrottle.h" +#include "fdbclient/ThreadSafeTransaction.h" #include "flow/DeterministicRandom.h" #include "flow/Platform.h" @@ -41,6 +44,7 @@ #include "flow/SimpleOpt.h" #include "fdbcli/FlowLineNoise.h" +#include "fdbcli/fdbcli.actor.h" #include #include @@ -56,6 +60,13 @@ #include "flow/actorcompiler.h" // This must be the last #include. +#define FDB_API_VERSION 710 +/* + * While we could just use the MultiVersionApi instance directly, this #define allows us to swap in any other IClientApi + * instance (e.g. from ThreadSafeApi) + */ +#define API ((IClientApi*)MultiVersionApi::api) + extern const char* getSourceVersion(); std::vector validOptions; @@ -173,6 +184,13 @@ public: } } + // TODO: replace the above function after we refactor all fdbcli code + void apply(Reference tr) { + for (const auto& [name, value] : transactionOptions.options) { + tr->setOption(name, value.castTo()); + } + } + // Returns true if any options have been set bool hasAnyOptionsEnabled() const { return !transactionOptions.options.empty(); } @@ -320,13 +338,6 @@ static std::string formatStringRef(StringRef item, bool fullEscaping = false) { return ret; } -static bool tokencmp(StringRef token, const char* command) { - if (token.size() != strlen(command)) - return false; - - return !memcmp(token.begin(), command, token.size()); -} - static std::vector> parseLine(std::string& line, bool& err, bool& partial) { err = false; partial = false; @@ -453,20 +464,13 @@ static void printProgramUsage(const char* name) { " -h, --help Display this help and exit.\n"); } -struct CommandHelp { - std::string usage; - std::string short_desc; - std::string long_desc; - CommandHelp() {} - CommandHelp(const char* u, const char* s, const char* l) : usage(u), short_desc(s), long_desc(l) {} -}; - -std::map helpMap; -std::set hiddenCommands; - #define ESCAPINGK "\n\nFor information on escaping keys, type `help escaping'." #define ESCAPINGKV "\n\nFor information on escaping keys and values, type `help escaping'." +using namespace fdb_cli; +std::map& helpMap = CommandFactory::commands(); +std::set& hiddenCommands = CommandFactory::hiddenCommands(); + void initHelp() { helpMap["begin"] = CommandHelp("begin", @@ -650,11 +654,6 @@ void initHelp() { "SECONDS have elapsed, or after a storage server with a different ZONEID fails. Only one ZONEID can be marked " "for maintenance. Calling this command with no arguments will display any ongoing maintenance. Calling this " "command with `off' will disable maintenance.\n"); - helpMap["consistencycheck"] = CommandHelp( - "consistencycheck [on|off]", - "permits or prevents consistency checking", - "Calling this command with `on' permits consistency check processes to run and `off' will halt their checking. " - "Calling this command with no arguments will display if consistency checking is currently allowed.\n"); helpMap["throttle"] = CommandHelp("throttle [ARGS]", "view and control throttled tags", @@ -721,14 +720,6 @@ void printHelp(StringRef command) { printf("I don't know anything about `%s'\n", formatStringRef(command).c_str()); } -void printUsage(StringRef command) { - auto i = helpMap.find(command.toString()); - if (i != helpMap.end()) - printf("Usage: %s\n", i->second.usage.c_str()); - else - fprintf(stderr, "ERROR: Unknown command `%s'\n", command.toString().c_str()); -} - std::string getCoordinatorsInfoString(StatusObjectReader statusObj) { std::string outputString; try { @@ -2671,6 +2662,27 @@ Reference getTransaction(Database db, return tr; } +// TODO: Update the function to get rid of Database and ReadYourWritesTransaction after refactoring +// The original ReadYourWritesTransaciton handle "tr" is needed as some commands can be called inside a +// transaction and "tr" holds the pointer to the ongoing transaction object. As it's not easy to get ride of "tr" in +// one shot and we are refactoring the code to use Reference (tr2), we need to let "tr2" point to the same +// underlying transaction like "tr". Thus everytime we need to use "tr2", we first update "tr" and let "tr2" points to +// "tr1". "tr2" is always having the same lifetime as "tr1" +Reference getTransaction(Database db, + Reference& tr, + Reference& tr2, + FdbOptions* options, + bool intrans) { + // Update "tr" to point to a brand new transaction object when it's not initialized or "intrans" flag is "false", + // which indicates we need a new transaction object + if (!tr || !intrans) { + tr = makeReference(db); + options->apply(tr); + } + tr2 = Reference(new ThreadSafeTransaction(tr.getPtr())); + return tr2; +} + std::string newCompletion(const char* base, const char* name) { return format("%s%s ", base, name); } @@ -3142,6 +3154,9 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { state Database db; state Reference tr; + // TODO: refactoring work, will replace db, tr when we have all commands through the general fdb interface + state Reference db2; + state Reference tr2; state bool writeMode = false; @@ -3179,6 +3194,15 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { return 1; } + // Note: refactoring work, will remove the above code finally + try { + db2 = API->createDatabase(opt.clusterFile.c_str()); + } catch (Error& e) { + fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code()); + printf("Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str()); + return 1; + } + if (opt.trace) { TraceEvent("CLIProgramStart") .setMaxEventLength(12000) @@ -3684,7 +3708,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { if (tokencmp(tokens[0], "kill")) { getTransaction(db, tr, options, intrans); if (tokens.size() == 1) { - Standalone kvs = wait( + RangeResult kvs = wait( makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")), CLIENT_KNOBS->TOO_MANY))); @@ -3751,7 +3775,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { if (tokencmp(tokens[0], "suspend")) { getTransaction(db, tr, options, intrans); if (tokens.size() == 1) { - Standalone kvs = wait( + RangeResult kvs = wait( makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")), CLIENT_KNOBS->TOO_MANY))); @@ -3847,29 +3871,9 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { } if (tokencmp(tokens[0], "consistencycheck")) { - getTransaction(db, tr, options, intrans); - tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); - tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); - tr->setOption(FDBTransactionOptions::LOCK_AWARE); - if (tokens.size() == 1) { - state Future>> ccSuspendSettingFuture = - tr->get(fdbShouldConsistencyCheckBeSuspended); - wait(makeInterruptable(success(ccSuspendSettingFuture))); - bool ccSuspendSetting = - ccSuspendSettingFuture.get().present() - ? BinaryReader::fromStringRef(ccSuspendSettingFuture.get().get(), Unversioned()) - : false; - printf("ConsistencyCheck is %s\n", ccSuspendSetting ? "off" : "on"); - } else if (tokens.size() == 2 && tokencmp(tokens[1], "off")) { - tr->set(fdbShouldConsistencyCheckBeSuspended, BinaryWriter::toValue(true, Unversioned())); - wait(commitTransaction(tr)); - } else if (tokens.size() == 2 && tokencmp(tokens[1], "on")) { - tr->set(fdbShouldConsistencyCheckBeSuspended, BinaryWriter::toValue(false, Unversioned())); - wait(commitTransaction(tr)); - } else { - printUsage(tokens[0]); - is_error = true; - } + getTransaction(db, tr, tr2, options, intrans); + bool _result = wait(consistencyCheckCommandActor(tr2, tokens)); + is_error = !_result; continue; } @@ -3962,7 +3966,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { continue; } getTransaction(db, tr, options, intrans); - Standalone kvs = wait( + RangeResult kvs = wait( makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")), CLIENT_KNOBS->TOO_MANY))); @@ -3991,7 +3995,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { continue; } getTransaction(db, tr, options, intrans); - Standalone kvs = wait(makeInterruptable( + RangeResult kvs = wait(makeInterruptable( tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")), CLIENT_KNOBS->TOO_MANY))); @@ -4070,7 +4074,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { continue; } getTransaction(db, tr, options, intrans); - Standalone kvs = wait( + RangeResult kvs = wait( makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")), CLIENT_KNOBS->TOO_MANY))); @@ -4112,7 +4116,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { if (tokencmp(tokens[0], "expensive_data_check")) { getTransaction(db, tr, options, intrans); if (tokens.size() == 1) { - Standalone kvs = wait( + RangeResult kvs = wait( makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")), CLIENT_KNOBS->TOO_MANY))); @@ -4228,7 +4232,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { endKey = strinc(tokens[1]); } - Standalone kvs = wait(makeInterruptable( + RangeResult kvs = wait(makeInterruptable( getTransaction(db, tr, options, intrans)->getRange(KeyRangeRef(tokens[1], endKey), limit))); printf("\nRange limited to %d keys\n", limit); @@ -4956,7 +4960,9 @@ int main(int argc, char** argv) { } try { - setupNetwork(); + // Note: refactoring fdbcli, in progress + API->selectApiVersion(FDB_API_VERSION); + API->setupNetwork(); Future cliFuture = runCli(opt); Future timeoutFuture = opt.exit_timeout ? timeExit(opt.exit_timeout) : Never(); auto f = stopNetworkAfter(success(cliFuture) || timeoutFuture); diff --git a/fdbcli/fdbcli.actor.h b/fdbcli/fdbcli.actor.h new file mode 100644 index 0000000000..ceae1263c2 --- /dev/null +++ b/fdbcli/fdbcli.actor.h @@ -0,0 +1,78 @@ +/* + * fdbcli.actor.h + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2021 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +// When actually compiled (NO_INTELLISENSE), include the generated +// version of this file. In intellisense use the source version. +#if defined(NO_INTELLISENSE) && !defined(FDBCLI_FDBCLI_ACTOR_G_H) +#define FDBCLI_FDBCLI_ACTOR_G_H +#include "fdbcli/fdbcli.actor.g.h" +#elif !defined(FDBCLI_FDBCLI_ACTOR_H) +#define FDBCLI_FDBCLI_ACTOR_H + +#include "fdbclient/IClientApi.h" +#include "flow/Arena.h" + +#include "flow/actorcompiler.h" // This must be the last #include. + +namespace fdb_cli { + +struct CommandHelp { + std::string usage; + std::string short_desc; + std::string long_desc; + CommandHelp() {} + CommandHelp(const char* u, const char* s, const char* l) : usage(u), short_desc(s), long_desc(l) {} +}; + +struct CommandFactory { + CommandFactory(const char* name, CommandHelp help) { commands()[name] = help; } + CommandFactory(const char* name) { hiddenCommands().insert(name); } + static std::map& commands() { + static std::map helpMap; + return helpMap; + } + static std::set& hiddenCommands() { + static std::set commands; + return commands; + } +}; + +// Special keys used by fdbcli commands + +// consistencycheck +extern const KeyRef consistencyCheckSpecialKey; + +// help functions (Copied from fdbcli.actor.cpp) + +// compare StringRef with the given c string +bool tokencmp(StringRef token, const char* command); +// print the usage of the specified command +void printUsage(StringRef command); + +// All fdbcli commands (alphabetically) +// consistency command +ACTOR Future consistencyCheckCommandActor(Reference tr, std::vector tokens); + +} // namespace fdb_cli + +#include "flow/unactorcompiler.h" +#endif diff --git a/fdbclient/BackupAgent.actor.h b/fdbclient/BackupAgent.actor.h index 1e3f41bb0e..c8903b9fe4 100644 --- a/fdbclient/BackupAgent.actor.h +++ b/fdbclient/BackupAgent.actor.h @@ -593,10 +593,10 @@ public: Reference futureBucket; }; -typedef std::pair, Version> RangeResultWithVersion; +using RangeResultWithVersion = std::pair; struct RCGroup { - Standalone items; + RangeResult items; Version version; uint64_t groupKey; diff --git a/fdbclient/BackupAgentBase.actor.cpp b/fdbclient/BackupAgentBase.actor.cpp index 72fce5a509..fba2e69954 100644 --- a/fdbclient/BackupAgentBase.actor.cpp +++ b/fdbclient/BackupAgentBase.actor.cpp @@ -401,7 +401,7 @@ ACTOR Future readCommitted(Database cx, releaser = FlowLock::Releaser( *lock, limits.bytes + CLIENT_KNOBS->VALUE_SIZE_LIMIT + CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT); - state Standalone values = wait(tr.getRange(begin, end, limits)); + state RangeResult values = wait(tr.getRange(begin, end, limits)); // When this buggify line is enabled, if there are more than 1 result then use half of the results if (values.size() > 1 && BUGGIFY) { @@ -467,7 +467,7 @@ ACTOR Future readCommitted(Database cx, if (lockAware) tr.setOption(FDBTransactionOptions::LOCK_AWARE); - state Standalone rangevalue = wait(tr.getRange(nextKey, end, limits)); + state RangeResult rangevalue = wait(tr.getRange(nextKey, end, limits)); // When this buggify line is enabled, if there are more than 1 result then use half of the results if (rangevalue.size() > 1 && BUGGIFY) { @@ -778,7 +778,7 @@ ACTOR static Future _eraseLogData(Reference tr, return Void(); } - state Standalone backupVersions = wait( + state RangeResult backupVersions = wait( tr->getRange(KeyRangeRef(backupLatestVersionsPath, strinc(backupLatestVersionsPath)), CLIENT_KNOBS->TOO_MANY)); // Make sure version history key does exist and lower the beginVersion if needed @@ -870,7 +870,7 @@ ACTOR static Future _eraseLogData(Reference tr, } if (!endVersion.present() && backupVersions.size() == 1) { - Standalone existingDestUidValues = + RangeResult existingDestUidValues = wait(tr->getRange(KeyRangeRef(destUidLookupPrefix, strinc(destUidLookupPrefix)), CLIENT_KNOBS->TOO_MANY)); for (auto it : existingDestUidValues) { if (it.value == destUidValue) { @@ -903,7 +903,7 @@ ACTOR Future cleanupLogMutations(Database cx, Value destUidValue, bool del tr->setOption(FDBTransactionOptions::LOCK_AWARE); tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); - state Standalone backupVersions = wait(tr->getRange( + state RangeResult backupVersions = wait(tr->getRange( KeyRangeRef(backupLatestVersionsPath, strinc(backupLatestVersionsPath)), CLIENT_KNOBS->TOO_MANY)); state Version readVer = tr->getReadVersion().get(); @@ -990,7 +990,7 @@ ACTOR Future cleanupBackup(Database cx, bool deleteData) { tr->setOption(FDBTransactionOptions::LOCK_AWARE); tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); - state Standalone destUids = wait( + state RangeResult destUids = wait( tr->getRange(KeyRangeRef(destUidLookupPrefix, strinc(destUidLookupPrefix)), CLIENT_KNOBS->TOO_MANY)); for (auto destUid : destUids) { diff --git a/fdbclient/DatabaseBackupAgent.actor.cpp b/fdbclient/DatabaseBackupAgent.actor.cpp index 9b8e02a102..20f9c6bcf2 100644 --- a/fdbclient/DatabaseBackupAgent.actor.cpp +++ b/fdbclient/DatabaseBackupAgent.actor.cpp @@ -157,7 +157,7 @@ struct BackupRangeTaskFunc : TaskFuncBase { tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::LOCK_AWARE); state Standalone> results; - Standalone values = wait(tr->getRange( + RangeResult values = wait(tr->getRange( KeyRangeRef(keyAfter(beginKey.withPrefix(keyServersPrefix)), endKey.withPrefix(keyServersPrefix)), limit)); for (auto& s : values) { @@ -314,19 +314,18 @@ struct BackupRangeTaskFunc : TaskFuncBase { applyMutationsKeyVersionMapRange.begin); state Key rangeCountKey = task->params[BackupAgentBase::keyConfigLogUid].withPrefix( applyMutationsKeyVersionCountRange.begin); - state Future> backupVersions = + state Future backupVersions = krmGetRanges(tr, prefix, KeyRangeRef(rangeBegin, rangeEnd), BUGGIFY ? 2 : 2000, 1e5); state Future> logVersionValue = tr->get( task->params[BackupAgentBase::keyConfigLogUid].withPrefix(applyMutationsEndRange.begin), true); state Future> rangeCountValue = tr->get(rangeCountKey, true); - state Future> prevRange = tr->getRange( + state Future prevRange = tr->getRange( firstGreaterOrEqual(prefix), lastLessOrEqual(rangeBegin.withPrefix(prefix)), 1, true, true); - state Future> nextRange = - tr->getRange(firstGreaterOrEqual(rangeEnd.withPrefix(prefix)), - firstGreaterOrEqual(strinc(prefix)), - 1, - true, - false); + state Future nextRange = tr->getRange(firstGreaterOrEqual(rangeEnd.withPrefix(prefix)), + firstGreaterOrEqual(strinc(prefix)), + 1, + true, + false); state Future verified = taskBucket->keepRunning(tr, task); wait(checkDatabaseLock(tr, @@ -725,7 +724,7 @@ struct CopyLogRangeTaskFunc : TaskFuncBase { state Subspace conf = Subspace(databaseBackupPrefixRange.begin) .get(BackupAgentBase::keyConfig) .get(task->params[BackupAgentBase::keyConfigLogUid]); - state std::vector> nextMutations; + state std::vector nextMutations; state bool isTimeoutOccured = false; state Optional lastKey; state Version lastVersion; @@ -736,9 +735,9 @@ struct CopyLogRangeTaskFunc : TaskFuncBase { return Optional(); } - state std::vector> mutations = std::move(nextMutations); + state std::vector mutations = std::move(nextMutations); state int64_t mutationSize = nextMutationSize; - nextMutations = std::vector>(); + nextMutations = std::vector(); nextMutationSize = 0; if (!endOfStream) { @@ -1470,7 +1469,7 @@ struct OldCopyLogRangeTaskFunc : TaskFuncBase { .get(BackupAgentBase::keyConfig) .get(task->params[BackupAgentBase::keyConfigLogUid]); - state std::vector> nextMutations; + state std::vector nextMutations; state int64_t nextMutationSize = 0; loop { try { @@ -1478,9 +1477,9 @@ struct OldCopyLogRangeTaskFunc : TaskFuncBase { return Void(); } - state std::vector> mutations = std::move(nextMutations); + state std::vector mutations = std::move(nextMutations); state int64_t mutationSize = nextMutationSize; - nextMutations = std::vector>(); + nextMutations = std::vector(); nextMutationSize = 0; if (!endOfStream) { @@ -1819,7 +1818,7 @@ struct CopyDiffLogsUpgradeTaskFunc : TaskFuncBase { } if (backupRanges.size() == 1) { - Standalone existingDestUidValues = wait(srcTr->getRange( + RangeResult existingDestUidValues = wait(srcTr->getRange( KeyRangeRef(destUidLookupPrefix, strinc(destUidLookupPrefix)), CLIENT_KNOBS->TOO_MANY)); bool found = false; for (auto it : existingDestUidValues) { @@ -2063,7 +2062,7 @@ struct StartFullBackupTaskFunc : TaskFuncBase { // Initialize destUid if (backupRanges.size() == 1) { - Standalone existingDestUidValues = wait(srcTr->getRange( + RangeResult existingDestUidValues = wait(srcTr->getRange( KeyRangeRef(destUidLookupPrefix, strinc(destUidLookupPrefix)), CLIENT_KNOBS->TOO_MANY)); bool found = false; for (auto it : existingDestUidValues) { @@ -2561,7 +2560,7 @@ public: if (backupAction == DatabaseBackupAgent::PreBackupAction::VERIFY) { // Make sure all of the ranges are empty before we backup into them. - state std::vector>> backupIntoResults; + state std::vector> backupIntoResults; for (auto& backupRange : backupRanges) { backupIntoResults.push_back( tr->getRange(backupRange.removePrefix(removePrefix).withPrefix(addPrefix), 1)); @@ -3060,13 +3059,13 @@ public: tr->setOption(FDBTransactionOptions::LOCK_AWARE); state Future> fPaused = tr->get(backupAgent->taskBucket->getPauseKey()); - state Future> fErrorValues = + state Future fErrorValues = errorLimit > 0 ? tr->getRange(backupAgent->errors.get(BinaryWriter::toValue(logUid, Unversioned())).range(), errorLimit, false, true) - : Future>(); + : Future(); state Future> fBackupUid = tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned())) .pack(DatabaseBackupAgent::keyFolderId)); @@ -3141,7 +3140,7 @@ public: // Append the errors, if requested if (errorLimit > 0) { - Standalone values = wait(fErrorValues); + RangeResult values = wait(fErrorValues); // Display the errors, if any if (values.size() > 0) { diff --git a/fdbclient/FDBTypes.h b/fdbclient/FDBTypes.h index b2cd469ab8..e2c8b4cd3b 100644 --- a/fdbclient/FDBTypes.h +++ b/fdbclient/FDBTypes.h @@ -468,11 +468,12 @@ struct Traceable : std::true_type { } }; -typedef Standalone Key; -typedef Standalone Value; -typedef Standalone KeyRange; -typedef Standalone KeyValue; -typedef Standalone KeySelector; +using Key = Standalone; +using Value = Standalone; +using KeyRange = Standalone; +using KeyValue = Standalone; +using KeySelector = Standalone; +using RangeResult = Standalone; enum { invalidVersion = -1, latestVersion = -2, MAX_VERSION = std::numeric_limits::max() }; @@ -706,6 +707,7 @@ struct RangeResultRef : VectorRef { " readToBegin:" + std::to_string(readToBegin) + " readThroughEnd:" + std::to_string(readThroughEnd); } }; +using RangeResult = Standalone; template <> struct Traceable : std::true_type { @@ -866,22 +868,36 @@ struct TLogSpillType { // Contains the amount of free and total space for a storage server, in bytes struct StorageBytes { + // Free space on the filesystem int64_t free; + // Total space on the filesystem int64_t total; - int64_t used; // Used by *this* store, not total-free - int64_t available; // Amount of disk space that can be used by data structure, including free disk space and - // internally reusable space + // Used by *this* store, not total - free + int64_t used; + // Amount of space available for use by the store, which includes free space on the filesystem + // and internal free space within the store data that is immediately reusable. + int64_t available; + // Amount of space that could eventually be available for use after garbage collection + int64_t temp; StorageBytes() {} - StorageBytes(int64_t free, int64_t total, int64_t used, int64_t available) - : free(free), total(total), used(used), available(available) {} + StorageBytes(int64_t free, int64_t total, int64_t used, int64_t available, int64_t temp = 0) + : free(free), total(total), used(used), available(available), temp(temp) {} template void serialize(Ar& ar) { serializer(ar, free, total, used, available); } -}; + std::string toString() const { + return format("{%.2f MB total, %.2f MB free, %.2f MB available, %.2f MB used, %.2f MB temp}", + total / 1e6, + free / 1e6, + available / 1e6, + used / 1e6, + temp / 1e6); + } +}; struct LogMessageVersion { // Each message pushed into the log system has a unique, totally ordered LogMessageVersion // See ILogSystem::push() for how these are assigned diff --git a/fdbclient/FileBackupAgent.actor.cpp b/fdbclient/FileBackupAgent.actor.cpp index a09ed25789..dfd33b5b67 100644 --- a/fdbclient/FileBackupAgent.actor.cpp +++ b/fdbclient/FileBackupAgent.actor.cpp @@ -1025,7 +1025,7 @@ ACTOR static Future>> getBlockOfShards(ReferencesetOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::LOCK_AWARE); state Standalone> results; - Standalone values = wait(tr->getRange( + RangeResult values = wait(tr->getRange( KeyRangeRef(keyAfter(beginKey.withPrefix(keyServersPrefix)), endKey.withPrefix(keyServersPrefix)), limit)); for (auto& s : values) { @@ -4584,7 +4584,7 @@ public: state Key destUidValue(BinaryWriter::toValue(uid, Unversioned())); if (normalizedRanges.size() == 1) { - Standalone existingDestUidValues = wait( + RangeResult existingDestUidValues = wait( tr->getRange(KeyRangeRef(destUidLookupPrefix, strinc(destUidLookupPrefix)), CLIENT_KNOBS->TOO_MANY)); bool found = false; for (auto it : existingDestUidValues) { @@ -4691,7 +4691,7 @@ public: KeyRange restoreIntoRange = KeyRangeRef(restoreRanges[index].begin, restoreRanges[index].end) .removePrefix(removePrefix) .withPrefix(addPrefix); - Standalone existingRows = wait(tr->getRange(restoreIntoRange, 1)); + RangeResult existingRows = wait(tr->getRange(restoreIntoRange, 1)); if (existingRows.size() > 0 && !onlyAppyMutationLogs) { throw restore_destination_not_empty(); } @@ -5741,7 +5741,7 @@ ACTOR static Future writeKVs(Database cx, Standalone readKVs = wait(tr.getRange(KeyRangeRef(k1, k2), CLIENT_KNOBS->TOO_MANY)); + RangeResult readKVs = wait(tr.getRange(KeyRangeRef(k1, k2), CLIENT_KNOBS->TOO_MANY)); ASSERT(readKVs.size() > 0 || begin == end); break; } catch (Error& e) { @@ -5773,7 +5773,7 @@ ACTOR static Future transformDatabaseContents(Database cx, tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr.setOption(FDBTransactionOptions::LOCK_AWARE); for (i = 0; i < restoreRanges.size(); ++i) { - Standalone kvs = wait(tr.getRange(restoreRanges[i], CLIENT_KNOBS->TOO_MANY)); + RangeResult kvs = wait(tr.getRange(restoreRanges[i], CLIENT_KNOBS->TOO_MANY)); ASSERT(!kvs.more); for (auto kv : kvs) { oldData.push_back_deep(oldData.arena(), KeyValueRef(kv.key, kv.value)); @@ -5840,7 +5840,7 @@ ACTOR static Future transformDatabaseContents(Database cx, try { tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr.setOption(FDBTransactionOptions::LOCK_AWARE); - Standalone emptyData = wait(tr.getRange(normalKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult emptyData = wait(tr.getRange(normalKeys, CLIENT_KNOBS->TOO_MANY)); for (int i = 0; i < emptyData.size(); ++i) { TraceEvent(SevError, "ExpectEmptyData") .detail("Index", i) @@ -5878,7 +5878,7 @@ ACTOR static Future transformDatabaseContents(Database cx, try { tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr.setOption(FDBTransactionOptions::LOCK_AWARE); - Standalone allData = wait(tr.getRange(normalKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult allData = wait(tr.getRange(normalKeys, CLIENT_KNOBS->TOO_MANY)); TraceEvent(SevFRTestInfo, "SanityCheckData").detail("Size", allData.size()); for (int i = 0; i < allData.size(); ++i) { std::pair backupRestoreValid = insideValidRange(allData[i], restoreRanges, backupRanges); diff --git a/fdbclient/GlobalConfig.actor.cpp b/fdbclient/GlobalConfig.actor.cpp index 58e032d363..5fa901df0e 100644 --- a/fdbclient/GlobalConfig.actor.cpp +++ b/fdbclient/GlobalConfig.actor.cpp @@ -166,7 +166,7 @@ ACTOR Future GlobalConfig::refresh(GlobalConfig* self) { self->data.clear(); Transaction tr(self->cx); - Standalone result = wait(tr.getRange(globalConfigDataKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult result = wait(tr.getRange(globalConfigDataKeys, CLIENT_KNOBS->TOO_MANY)); for (const auto& kv : result) { KeyRef systemKey = kv.key.removePrefix(globalConfigKeysPrefix); self->insert(systemKey, kv.value); diff --git a/fdbclient/IClientApi.h b/fdbclient/IClientApi.h index 45249f1509..3017a1f3f3 100644 --- a/fdbclient/IClientApi.h +++ b/fdbclient/IClientApi.h @@ -20,7 +20,6 @@ #ifndef FDBCLIENT_ICLIENTAPI_H #define FDBCLIENT_ICLIENTAPI_H -#include "fdbclient/ManagementAPI.actor.h" #pragma once #include "fdbclient/FDBOptions.g.h" @@ -42,24 +41,24 @@ public: // until the ThreadFuture's ThreadSingleAssignmentVar has its memory released or it is destroyed. virtual ThreadFuture> get(const KeyRef& key, bool snapshot = false) = 0; virtual ThreadFuture getKey(const KeySelectorRef& key, bool snapshot = false) = 0; - virtual ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - int limit, - bool snapshot = false, - bool reverse = false) = 0; - virtual ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) = 0; - virtual ThreadFuture> getRange(const KeyRangeRef& keys, - int limit, - bool snapshot = false, - bool reverse = false) = 0; - virtual ThreadFuture> getRange(const KeyRangeRef& keys, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) = 0; + virtual ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + int limit, + bool snapshot = false, + bool reverse = false) = 0; + virtual ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) = 0; + virtual ThreadFuture getRange(const KeyRangeRef& keys, + int limit, + bool snapshot = false, + bool reverse = false) = 0; + virtual ThreadFuture getRange(const KeyRangeRef& keys, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) = 0; virtual ThreadFuture>> getAddressesForKey(const KeyRef& key) = 0; virtual ThreadFuture> getVersionstamp() = 0; diff --git a/fdbclient/KeyBackedTypes.h b/fdbclient/KeyBackedTypes.h index dd6623e4ef..f92324e4ab 100644 --- a/fdbclient/KeyBackedTypes.h +++ b/fdbclient/KeyBackedTypes.h @@ -280,7 +280,7 @@ public: return map( tr->getRange( KeyRangeRef(s.pack(Codec::pack(begin)), endKey), GetRangeLimits(limit), snapshot, reverse), - [s](Standalone const& kvs) -> PairsType { + [s](RangeResult const& kvs) -> PairsType { PairsType results; for (int i = 0; i < kvs.size(); ++i) { KeyType key = Codec::unpack(s.unpack(kvs[i].key)); @@ -344,7 +344,7 @@ public: Key endKey = end.present() ? s.pack(Codec::pack(end.get())) : space.range().end; return map( tr->getRange(KeyRangeRef(s.pack(Codec::pack(begin)), endKey), GetRangeLimits(limit), snapshot), - [s](Standalone const& kvs) -> Values { + [s](RangeResult const& kvs) -> Values { Values results; for (int i = 0; i < kvs.size(); ++i) { results.push_back(Codec::unpack(s.unpack(kvs[i].key))); diff --git a/fdbclient/KeyRangeMap.actor.cpp b/fdbclient/KeyRangeMap.actor.cpp index 67992e3e95..7b7dcdf1e3 100644 --- a/fdbclient/KeyRangeMap.actor.cpp +++ b/fdbclient/KeyRangeMap.actor.cpp @@ -35,7 +35,7 @@ void KeyRangeActorMap::getRangesAffectedByInsertion(const KeyRangeRef& keys, vec affectedRanges.push_back(KeyRangeRef(keys.end, e.end())); } -Standalone krmDecodeRanges(KeyRef mapPrefix, KeyRange keys, Standalone kv) { +RangeResult krmDecodeRanges(KeyRef mapPrefix, KeyRange keys, RangeResult kv) { ASSERT(!kv.more || kv.size() > 1); KeyRange withPrefix = KeyRangeRef(mapPrefix.toString() + keys.begin.toString(), mapPrefix.toString() + keys.end.toString()); @@ -46,7 +46,7 @@ Standalone krmDecodeRanges(KeyRef mapPrefix, KeyRange keys, Stan if (kv.size() && kv.end()[-1].key.startsWith(mapPrefix)) endValue = kv.end()[-1].value; - Standalone result; + RangeResult result; result.arena().dependsOn(kv.arena()); result.arena().dependsOn(keys.arena()); @@ -67,34 +67,28 @@ Standalone krmDecodeRanges(KeyRef mapPrefix, KeyRange keys, Stan } // Returns keys.begin, all transitional points in keys, and keys.end, and their values -ACTOR Future> krmGetRanges(Transaction* tr, - Key mapPrefix, - KeyRange keys, - int limit, - int limitBytes) { +ACTOR Future krmGetRanges(Transaction* tr, Key mapPrefix, KeyRange keys, int limit, int limitBytes) { KeyRange withPrefix = KeyRangeRef(mapPrefix.toString() + keys.begin.toString(), mapPrefix.toString() + keys.end.toString()); state GetRangeLimits limits(limit, limitBytes); limits.minRows = 2; - Standalone kv = - wait(tr->getRange(lastLessOrEqual(withPrefix.begin), firstGreaterThan(withPrefix.end), limits)); + RangeResult kv = wait(tr->getRange(lastLessOrEqual(withPrefix.begin), firstGreaterThan(withPrefix.end), limits)); return krmDecodeRanges(mapPrefix, keys, kv); } -ACTOR Future> krmGetRanges(Reference tr, - Key mapPrefix, - KeyRange keys, - int limit, - int limitBytes) { +ACTOR Future krmGetRanges(Reference tr, + Key mapPrefix, + KeyRange keys, + int limit, + int limitBytes) { KeyRange withPrefix = KeyRangeRef(mapPrefix.toString() + keys.begin.toString(), mapPrefix.toString() + keys.end.toString()); state GetRangeLimits limits(limit, limitBytes); limits.minRows = 2; - Standalone kv = - wait(tr->getRange(lastLessOrEqual(withPrefix.begin), firstGreaterThan(withPrefix.end), limits)); + RangeResult kv = wait(tr->getRange(lastLessOrEqual(withPrefix.begin), firstGreaterThan(withPrefix.end), limits)); return krmDecodeRanges(mapPrefix, keys, kv); } @@ -125,8 +119,7 @@ void krmSetPreviouslyEmptyRange(CommitTransactionRef& tr, ACTOR Future krmSetRange(Transaction* tr, Key mapPrefix, KeyRange range, Value value) { state KeyRange withPrefix = KeyRangeRef(mapPrefix.toString() + range.begin.toString(), mapPrefix.toString() + range.end.toString()); - Standalone old = - wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, true)); + RangeResult old = wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, true)); Value oldValue; bool hasResult = old.size() > 0 && old[0].key.startsWith(mapPrefix); @@ -147,8 +140,7 @@ ACTOR Future krmSetRange(Transaction* tr, Key mapPrefix, KeyRange range, V ACTOR Future krmSetRange(Reference tr, Key mapPrefix, KeyRange range, Value value) { state KeyRange withPrefix = KeyRangeRef(mapPrefix.toString() + range.begin.toString(), mapPrefix.toString() + range.end.toString()); - Standalone old = - wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, true)); + RangeResult old = wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, true)); Value oldValue; bool hasResult = old.size() > 0 && old[0].key.startsWith(mapPrefix); @@ -182,7 +174,7 @@ static Future krmSetRangeCoalescing_(Transaction* tr, state KeyRange maxWithPrefix = KeyRangeRef(mapPrefix.toString() + maxRange.begin.toString(), mapPrefix.toString() + maxRange.end.toString()); - state vector>> keys; + state vector> keys; keys.push_back(tr->getRange(lastLessThan(withPrefix.begin), firstGreaterOrEqual(withPrefix.begin), 1, true)); keys.push_back(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end) + 1, 2, true)); wait(waitForAll(keys)); diff --git a/fdbclient/KeyRangeMap.h b/fdbclient/KeyRangeMap.h index 38a340c77b..7016dcfc4d 100644 --- a/fdbclient/KeyRangeMap.h +++ b/fdbclient/KeyRangeMap.h @@ -126,16 +126,16 @@ private: // krm*(): KeyRangeMap-like abstraction stored in the database, accessed through Transactions class Transaction; class ReadYourWritesTransaction; -Future> krmGetRanges(Transaction* const& tr, - Key const& mapPrefix, - KeyRange const& keys, - int const& limit = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT, - int const& limitBytes = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT_BYTES); -Future> krmGetRanges(Reference const& tr, - Key const& mapPrefix, - KeyRange const& keys, - int const& limit = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT, - int const& limitBytes = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT_BYTES); +Future krmGetRanges(Transaction* const& tr, + Key const& mapPrefix, + KeyRange const& keys, + int const& limit = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT, + int const& limitBytes = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT_BYTES); +Future krmGetRanges(Reference const& tr, + Key const& mapPrefix, + KeyRange const& keys, + int const& limit = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT, + int const& limitBytes = CLIENT_KNOBS->KRM_GET_RANGE_LIMIT_BYTES); void krmSetPreviouslyEmptyRange(Transaction* tr, const KeyRef& mapPrefix, const KeyRangeRef& keys, @@ -162,7 +162,7 @@ Future krmSetRangeCoalescing(Reference const& t KeyRange const& range, KeyRange const& maxRange, Value const& value); -Standalone krmDecodeRanges(KeyRef mapPrefix, KeyRange keys, Standalone kv); +RangeResult krmDecodeRanges(KeyRef mapPrefix, KeyRange keys, RangeResult kv); template std::vector> KeyRangeMap::getAffectedRangesAfterInsertion( diff --git a/fdbclient/Knobs.cpp b/fdbclient/Knobs.cpp index bcca5ed166..5186ca0c5c 100644 --- a/fdbclient/Knobs.cpp +++ b/fdbclient/Knobs.cpp @@ -50,6 +50,7 @@ void ClientKnobs::initialize(bool randomize) { init( RECOVERY_DELAY_SECONDS_PER_GENERATION, 60.0 ); init( MAX_GENERATIONS, 100 ); init( MAX_GENERATIONS_OVERRIDE, 0 ); + init( MAX_GENERATIONS_SIM, 50 ); //Disable network connections after this many generations in simulation, should be less than RECOVERY_DELAY_START_GENERATION init( COORDINATOR_RECONNECTION_DELAY, 1.0 ); init( CLIENT_EXAMPLE_AMOUNT, 20 ); diff --git a/fdbclient/Knobs.h b/fdbclient/Knobs.h index 3d22b5a24b..1c21d34dca 100644 --- a/fdbclient/Knobs.h +++ b/fdbclient/Knobs.h @@ -42,6 +42,7 @@ public: double RECOVERY_DELAY_SECONDS_PER_GENERATION; double MAX_GENERATIONS; double MAX_GENERATIONS_OVERRIDE; + double MAX_GENERATIONS_SIM; double COORDINATOR_RECONNECTION_DELAY; int CLIENT_EXAMPLE_AMOUNT; diff --git a/fdbclient/ManagementAPI.actor.cpp b/fdbclient/ManagementAPI.actor.cpp index 05e1ec95e2..90d670e801 100644 --- a/fdbclient/ManagementAPI.actor.cpp +++ b/fdbclient/ManagementAPI.actor.cpp @@ -357,7 +357,7 @@ ACTOR Future getDatabaseConfiguration(Database cx) { loop { try { tr.setOption(FDBTransactionOptions::LOCK_AWARE); - Standalone res = wait(tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult res = wait(tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(res.size() < CLIENT_KNOBS->TOO_MANY); DatabaseConfiguration config; config.fromKeyValues((VectorRef)res); @@ -407,7 +407,7 @@ ACTOR Future changeConfig(Database cx, std::map> fConfig = tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY); + state Future fConfig = tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY); state Future> fWorkers = getWorkers(&tr); wait(success(fConfig) || tooLong); @@ -458,19 +458,19 @@ ACTOR Future changeConfig(Database cx, std::map> fServerList = - (newConfig.regions.size()) ? tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY) - : Future>(); + state Future fServerList = (newConfig.regions.size()) + ? tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY) + : Future(); if (newConfig.usableRegions == 2) { if (oldReplicationUsesDcId) { - state Future> fLocalityList = + state Future fLocalityList = tr.getRange(tagLocalityListKeys, CLIENT_KNOBS->TOO_MANY); wait(success(fLocalityList) || tooLong); if (!fLocalityList.isReady()) { return ConfigurationResult::DATABASE_UNAVAILABLE; } - Standalone localityList = fLocalityList.get(); + RangeResult localityList = fLocalityList.get(); ASSERT(!localityList.more && localityList.size() < CLIENT_KNOBS->TOO_MANY); std::set localityDcIds; @@ -513,7 +513,7 @@ ACTOR Future changeConfig(Database cx, std::map serverList = fServerList.get(); + RangeResult serverList = fServerList.get(); ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY); std::set newDcIds; @@ -988,8 +988,8 @@ Future changeConfig(Database const& cx, std::string const& } ACTOR Future> getWorkers(Transaction* tr) { - state Future> processClasses = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY); - state Future> processData = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY); + state Future processClasses = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY); + state Future processData = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY); wait(success(processClasses) && success(processData)); ASSERT(!processClasses.get().more && processClasses.get().size() < CLIENT_KNOBS->TOO_MANY); @@ -1679,9 +1679,9 @@ ACTOR Future setClass(Database cx, AddressExclusion server, ProcessClass p } ACTOR Future> getExcludedServers(Transaction* tr) { - state Standalone r = wait(tr->getRange(excludedServersKeys, CLIENT_KNOBS->TOO_MANY)); + state RangeResult r = wait(tr->getRange(excludedServersKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY); - state Standalone r2 = wait(tr->getRange(failedServersKeys, CLIENT_KNOBS->TOO_MANY)); + state RangeResult r2 = wait(tr->getRange(failedServersKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY); vector exclusions; @@ -1867,7 +1867,7 @@ ACTOR Future checkForExcludingServersTxActor(ReadYourWritesTransaction* tr // recovery // Check that there aren't any storage servers with addresses violating the exclusions - Standalone serverList = wait(tr->getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult serverList = wait(tr->getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY); state bool ok = true; @@ -1948,7 +1948,7 @@ ACTOR Future waitForFullReplication(Database cx) { tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); tr.setOption(FDBTransactionOptions::LOCK_AWARE); - Standalone confResults = wait(tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult confResults = wait(tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(!confResults.more && confResults.size() < CLIENT_KNOBS->TOO_MANY); state DatabaseConfiguration config; config.fromKeyValues((VectorRef)confResults); @@ -2203,8 +2203,7 @@ ACTOR Future changeCachedRange(Database cx, KeyRangeRef range, bool add) { tr.clear(sysRangeClear); tr.clear(privateRange); tr.addReadConflictRange(privateRange); - Standalone previous = - wait(tr.getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, true)); + RangeResult previous = wait(tr.getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, true)); bool prevIsCached = false; if (!previous.empty()) { std::vector prevVal; @@ -2220,8 +2219,7 @@ ACTOR Future changeCachedRange(Database cx, KeyRangeRef range, bool add) { tr.set(sysRange.begin, trueValue); tr.set(privateRange.begin, serverKeysTrue); } - Standalone after = - wait(tr.getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, false)); + RangeResult after = wait(tr.getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, false)); bool afterIsCached = false; if (!after.empty()) { std::vector afterVal; diff --git a/fdbclient/MonitorLeader.actor.cpp b/fdbclient/MonitorLeader.actor.cpp index 3f48df1714..057e546501 100644 --- a/fdbclient/MonitorLeader.actor.cpp +++ b/fdbclient/MonitorLeader.actor.cpp @@ -496,7 +496,7 @@ ACTOR Future monitorLeaderOneGeneration(ReferencegetConnectionString().toString()); + .detail("OldConnStr", info.intermediateConnFile->getConnectionString().toString()).trackLatest("MonitorLeaderForwarding"); info.intermediateConnFile = makeReference( connFile->getFilename(), ClusterConnectionString(leader.get().first.serializedInfo.toString())); return info; diff --git a/fdbclient/MultiVersionTransaction.actor.cpp b/fdbclient/MultiVersionTransaction.actor.cpp index bb4049d859..18f7bc71e8 100644 --- a/fdbclient/MultiVersionTransaction.actor.cpp +++ b/fdbclient/MultiVersionTransaction.actor.cpp @@ -89,19 +89,19 @@ ThreadFuture DLTransaction::getKey(const KeySelectorRef& key, bool snapshot }); } -ThreadFuture> DLTransaction::getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - int limit, - bool snapshot, - bool reverse) { +ThreadFuture DLTransaction::getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + int limit, + bool snapshot, + bool reverse) { return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse); } -ThreadFuture> DLTransaction::getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - GetRangeLimits limits, - bool snapshot, - bool reverse) { +ThreadFuture DLTransaction::getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + GetRangeLimits limits, + bool snapshot, + bool reverse) { FdbCApi::FDBFuture* f = api->transactionGetRange(tr, begin.getKey().begin(), begin.getKey().size(), @@ -117,7 +117,7 @@ ThreadFuture> DLTransaction::getRange(const KeySelect 0, snapshot, reverse); - return toThreadFuture>(api, f, [](FdbCApi::FDBFuture* f, FdbCApi* api) { + return toThreadFuture(api, f, [](FdbCApi::FDBFuture* f, FdbCApi* api) { const FdbCApi::FDBKeyValue* kvs; int count; FdbCApi::fdb_bool_t more; @@ -125,23 +125,19 @@ ThreadFuture> DLTransaction::getRange(const KeySelect ASSERT(!error); // The memory for this is stored in the FDBFuture and is released when the future gets destroyed - return Standalone(RangeResultRef(VectorRef((KeyValueRef*)kvs, count), more), - Arena()); + return RangeResult(RangeResultRef(VectorRef((KeyValueRef*)kvs, count), more), Arena()); }); } -ThreadFuture> DLTransaction::getRange(const KeyRangeRef& keys, - int limit, - bool snapshot, - bool reverse) { +ThreadFuture DLTransaction::getRange(const KeyRangeRef& keys, int limit, bool snapshot, bool reverse) { return getRange( firstGreaterOrEqual(keys.begin), firstGreaterOrEqual(keys.end), GetRangeLimits(limit), snapshot, reverse); } -ThreadFuture> DLTransaction::getRange(const KeyRangeRef& keys, - GetRangeLimits limits, - bool snapshot, - bool reverse) { +ThreadFuture DLTransaction::getRange(const KeyRangeRef& keys, + GetRangeLimits limits, + bool snapshot, + bool reverse) { return getRange(firstGreaterOrEqual(keys.begin), firstGreaterOrEqual(keys.end), limits, snapshot, reverse); } @@ -685,45 +681,45 @@ ThreadFuture MultiVersionTransaction::getKey(const KeySelectorRef& key, boo return abortableFuture(f, tr.onChange); } -ThreadFuture> MultiVersionTransaction::getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - int limit, - bool snapshot, - bool reverse) { +ThreadFuture MultiVersionTransaction::getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + int limit, + bool snapshot, + bool reverse) { auto tr = getTransaction(); auto f = tr.transaction ? tr.transaction->getRange(begin, end, limit, snapshot, reverse) - : ThreadFuture>(Never()); + : ThreadFuture(Never()); return abortableFuture(f, tr.onChange); } -ThreadFuture> MultiVersionTransaction::getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - GetRangeLimits limits, - bool snapshot, - bool reverse) { +ThreadFuture MultiVersionTransaction::getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + GetRangeLimits limits, + bool snapshot, + bool reverse) { auto tr = getTransaction(); auto f = tr.transaction ? tr.transaction->getRange(begin, end, limits, snapshot, reverse) - : ThreadFuture>(Never()); + : ThreadFuture(Never()); return abortableFuture(f, tr.onChange); } -ThreadFuture> MultiVersionTransaction::getRange(const KeyRangeRef& keys, - int limit, - bool snapshot, - bool reverse) { +ThreadFuture MultiVersionTransaction::getRange(const KeyRangeRef& keys, + int limit, + bool snapshot, + bool reverse) { auto tr = getTransaction(); - auto f = tr.transaction ? tr.transaction->getRange(keys, limit, snapshot, reverse) - : ThreadFuture>(Never()); + auto f = + tr.transaction ? tr.transaction->getRange(keys, limit, snapshot, reverse) : ThreadFuture(Never()); return abortableFuture(f, tr.onChange); } -ThreadFuture> MultiVersionTransaction::getRange(const KeyRangeRef& keys, - GetRangeLimits limits, - bool snapshot, - bool reverse) { +ThreadFuture MultiVersionTransaction::getRange(const KeyRangeRef& keys, + GetRangeLimits limits, + bool snapshot, + bool reverse) { auto tr = getTransaction(); - auto f = tr.transaction ? tr.transaction->getRange(keys, limits, snapshot, reverse) - : ThreadFuture>(Never()); + auto f = + tr.transaction ? tr.transaction->getRange(keys, limits, snapshot, reverse) : ThreadFuture(Never()); return abortableFuture(f, tr.onChange); } @@ -913,7 +909,9 @@ MultiVersionDatabase::MultiVersionDatabase(MultiVersionApi* api, } }); - onMainThreadVoid([this]() { dbState->protocolVersionMonitor = dbState->monitorProtocolVersion(); }, nullptr); + Reference dbStateRef = dbState; + onMainThreadVoid([dbStateRef]() { dbStateRef->protocolVersionMonitor = dbStateRef->monitorProtocolVersion(); }, + nullptr); } } @@ -1021,7 +1019,7 @@ void MultiVersionDatabase::DatabaseState::addClient(Reference client .detail("LibPath", client->libPath) .detail("ProtocolVersion", client->protocolVersion); - legacyVersionMonitors.emplace_back(client); + legacyVersionMonitors.emplace_back(new LegacyVersionMonitor(client)); } } @@ -1033,16 +1031,22 @@ ThreadFuture MultiVersionDatabase::DatabaseState::monitorProtocolVersion() Optional expected = dbProtocolVersion; ThreadFuture f = versionMonitorDb->getServerProtocol(dbProtocolVersion); - return mapThreadFuture(f, [this, expected](ErrorOr cv) { + Reference self = Reference::addRef(this); + return mapThreadFuture(f, [self, expected](ErrorOr cv) { if (cv.isError()) { + if (cv.getError().code() == error_code_operation_cancelled) { + return ErrorOr(cv.getError()); + } + TraceEvent("ErrorGettingClusterProtocolVersion") .detail("ExpectedProtocolVersion", expected) .error(cv.getError()); } - ProtocolVersion clusterVersion = !cv.isError() ? cv.get() : dbProtocolVersion.orDefault(currentProtocolVersion); - onMainThreadVoid([this, clusterVersion]() { protocolVersionChanged(clusterVersion); }, nullptr); - return Void(); + ProtocolVersion clusterVersion = + !cv.isError() ? cv.get() : self->dbProtocolVersion.orDefault(currentProtocolVersion); + onMainThreadVoid([self, clusterVersion]() { self->protocolVersionChanged(clusterVersion); }, nullptr); + return ErrorOr(Void()); }); } @@ -1053,6 +1057,9 @@ void MultiVersionDatabase::DatabaseState::protocolVersionChanged(ProtocolVersion if (dbProtocolVersion.present() && protocolVersion.normalizedVersion() == dbProtocolVersion.get().normalizedVersion()) { dbProtocolVersion = protocolVersion; + + ASSERT(protocolVersionMonitor.isValid()); + protocolVersionMonitor.cancel(); protocolVersionMonitor = monitorProtocolVersion(); } @@ -1076,12 +1083,13 @@ void MultiVersionDatabase::DatabaseState::protocolVersionChanged(ProtocolVersion if (client->external && !MultiVersionApi::apiVersionAtLeast(610)) { // Old API versions return a future when creating the database, so we need to wait for it + Reference self = Reference::addRef(this); dbReady = mapThreadFuture( - newDb.castTo()->onReady(), [this, newDb, client](ErrorOr ready) { + newDb.castTo()->onReady(), [self, newDb, client](ErrorOr ready) { if (!ready.isError()) { - onMainThreadVoid([this, newDb, client]() { updateDatabase(newDb, client); }, nullptr); + onMainThreadVoid([self, newDb, client]() { self->updateDatabase(newDb, client); }, nullptr); } else { - onMainThreadVoid([this, client]() { updateDatabase(Reference(), client); }, + onMainThreadVoid([self, client]() { self->updateDatabase(Reference(), client); }, nullptr); } @@ -1139,6 +1147,9 @@ void MultiVersionDatabase::DatabaseState::updateDatabase(Reference ne } dbVar->set(db); + + ASSERT(protocolVersionMonitor.isValid()); + protocolVersionMonitor.cancel(); protocolVersionMonitor = monitorProtocolVersion(); } @@ -1146,20 +1157,32 @@ void MultiVersionDatabase::DatabaseState::updateDatabase(Reference ne // Must be called from the main thread void MultiVersionDatabase::DatabaseState::startLegacyVersionMonitors() { for (auto itr = legacyVersionMonitors.begin(); itr != legacyVersionMonitors.end(); ++itr) { - while (itr != legacyVersionMonitors.end() && itr->client->failed) { + while (itr != legacyVersionMonitors.end() && (*itr)->client->failed) { + (*itr)->close(); itr = legacyVersionMonitors.erase(itr); } if (itr != legacyVersionMonitors.end() && - (!dbProtocolVersion.present() || itr->client->protocolVersion != dbProtocolVersion.get())) { - itr->startConnectionMonitor(Reference::addRef(this)); + (!dbProtocolVersion.present() || (*itr)->client->protocolVersion != dbProtocolVersion.get())) { + (*itr)->startConnectionMonitor(Reference::addRef(this)); } } } // Cleans up state for the legacy version monitors to break reference cycles -// Must be called from the main thread void MultiVersionDatabase::DatabaseState::close() { - legacyVersionMonitors.clear(); + Reference self = Reference::addRef(this); + onMainThreadVoid( + [self]() { + if (self->protocolVersionMonitor.isValid()) { + self->protocolVersionMonitor.cancel(); + } + for (auto monitor : self->legacyVersionMonitors) { + monitor->close(); + } + + self->legacyVersionMonitors.clear(); + }, + nullptr); } // Starts the connection monitor by creating a database object at an old version. @@ -1176,19 +1199,22 @@ void MultiVersionDatabase::LegacyVersionMonitor::startConnectionMonitor( tr = Reference(); TraceEvent("StartingLegacyVersionMonitor").detail("ProtocolVersion", client->protocolVersion); + Reference self = Reference::addRef(this); versionMonitor = - mapThreadFuture(db.castTo()->onReady(), [this, dbState](ErrorOr ready) { + mapThreadFuture(db.castTo()->onReady(), [self, dbState](ErrorOr ready) { onMainThreadVoid( - [this, ready, dbState]() { + [self, ready, dbState]() { if (ready.isError()) { - TraceEvent(SevError, "FailedToOpenDatabaseOnClient") - .error(ready.getError()) - .detail("LibPath", client->libPath); + if (ready.getError().code() != error_code_operation_cancelled) { + TraceEvent(SevError, "FailedToOpenDatabaseOnClient") + .error(ready.getError()) + .detail("LibPath", self->client->libPath); - client->failed = true; - MultiVersionApi::api->updateSupportedVersions(); + self->client->failed = true; + MultiVersionApi::api->updateSupportedVersions(); + } } else { - runGrvProbe(dbState); + self->runGrvProbe(dbState); } }, nullptr); @@ -1202,30 +1228,28 @@ void MultiVersionDatabase::LegacyVersionMonitor::startConnectionMonitor( // Must be called from main thread void MultiVersionDatabase::LegacyVersionMonitor::runGrvProbe(Reference dbState) { tr = db->createTransaction(); - versionMonitor = mapThreadFuture(tr->getReadVersion(), [this, dbState](ErrorOr v) { - onMainThreadVoid( - [this, v, dbState]() { - monitorRunning = false; - - // If the version attempt returns an error, we regard that as a connection (except - // operation_cancelled) - if (v.isError() && v.getError().code() == error_code_operation_cancelled) { - TraceEvent(SevError, "FailedToOpenDatabaseOnClient") - .error(v.getError()) - .detail("LibPath", client->libPath); - - client->failed = true; - MultiVersionApi::api->updateSupportedVersions(); - } else { - dbState->protocolVersionChanged(client->protocolVersion); - } - }, - nullptr); + Reference self = Reference::addRef(this); + versionMonitor = mapThreadFuture(tr->getReadVersion(), [self, dbState](ErrorOr v) { + // If the version attempt returns an error, we regard that as a connection (except operation_cancelled) + if (!v.isError() || v.getError().code() != error_code_operation_cancelled) { + onMainThreadVoid( + [self, dbState]() { + self->monitorRunning = false; + dbState->protocolVersionChanged(self->client->protocolVersion); + }, + nullptr); + } return v.map([](Version v) { return Void(); }); }); } +void MultiVersionDatabase::LegacyVersionMonitor::close() { + if (versionMonitor.isValid()) { + versionMonitor.cancel(); + } +} + std::atomic_flag MultiVersionDatabase::externalClientsInitialized = ATOMIC_FLAG_INIT; // MultiVersionApi diff --git a/fdbclient/MultiVersionTransaction.h b/fdbclient/MultiVersionTransaction.h index 2244ec2c6e..a98e16b440 100644 --- a/fdbclient/MultiVersionTransaction.h +++ b/fdbclient/MultiVersionTransaction.h @@ -200,24 +200,24 @@ public: ThreadFuture> get(const KeyRef& key, bool snapshot = false) override; ThreadFuture getKey(const KeySelectorRef& key, bool snapshot = false) override; - ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - int limit, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeyRangeRef& keys, - int limit, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeyRangeRef& keys, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) override; + ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + int limit, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeyRangeRef& keys, + int limit, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeyRangeRef& keys, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) override; ThreadFuture>> getAddressesForKey(const KeyRef& key) override; ThreadFuture> getVersionstamp() override; ThreadFuture getEstimatedRangeSizeBytes(const KeyRangeRef& keys) override; @@ -339,24 +339,24 @@ public: ThreadFuture> get(const KeyRef& key, bool snapshot = false) override; ThreadFuture getKey(const KeySelectorRef& key, bool snapshot = false) override; - ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - int limit, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeyRangeRef& keys, - int limit, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeyRangeRef& keys, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) override; + ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + int limit, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeyRangeRef& keys, + int limit, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeyRangeRef& keys, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) override; ThreadFuture>> getAddressesForKey(const KeyRef& key) override; ThreadFuture> getVersionstamp() override; @@ -492,7 +492,6 @@ public: void startLegacyVersionMonitors(); // Cleans up state for the legacy version monitors to break reference cycles - // Must be called from the main thread void close(); Reference db; @@ -518,7 +517,7 @@ public: // Versions 5.0 and older do not support connection packet monitoring and require alternate techniques to // determine the cluster version. - std::list legacyVersionMonitors; + std::list> legacyVersionMonitors; Optional dbProtocolVersion; @@ -533,9 +532,11 @@ public: // A struct that enables monitoring whether the cluster is running an old version (<= 5.0) that doesn't support // connect packet monitoring. - struct LegacyVersionMonitor { + struct LegacyVersionMonitor : ThreadSafeReferenceCounted { LegacyVersionMonitor(Reference const& client) : client(client), monitorRunning(false) {} - ~LegacyVersionMonitor() { TraceEvent("DestroyingVersionMonitor"); } + + // Terminates the version monitor to break reference cycles + void close(); // Starts the connection monitor by creating a database object at an old version. // Must be called from the main thread diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index c95ed7ba29..d7b854806f 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -386,7 +386,7 @@ ACTOR static Future delExcessClntTxnEntriesActor(Transaction* tr, int64_t ? (txInfoSize - clientTxInfoSizeLimit) : CLIENT_KNOBS->TRANSACTION_SIZE_LIMIT; GetRangeLimits limit(GetRangeLimits::ROW_LIMIT_UNLIMITED, getRangeByteLimit); - Standalone txEntries = + RangeResult txEntries = wait(tr->getRange(KeyRangeRef(clientLatencyName, strinc(clientLatencyName)), limit)); state int64_t numBytesToDel = 0; KeyRef endKey; @@ -596,7 +596,7 @@ ACTOR Future updateCachedRanges(DatabaseContext* self, std::map range = wait(tr.getRange(storageCacheKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult range = wait(tr.getRange(storageCacheKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(!range.more); std::vector>> cacheInterfaces; cacheInterfaces.reserve(cacheServers->size()); @@ -673,8 +673,7 @@ ACTOR Future monitorCacheList(DatabaseContext* self) { // the cyclic reference to self. wait(refreshTransaction(self, &tr)); try { - Standalone cacheList = - wait(tr.getRange(storageCacheServerKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult cacheList = wait(tr.getRange(storageCacheServerKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(!cacheList.more); bool hasChanges = false; std::map allCacheServers; @@ -757,16 +756,16 @@ void DatabaseContext::registerSpecialKeySpaceModule(SpecialKeySpace::MODULE modu specialKeySpaceModules.push_back(std::move(impl)); } -ACTOR Future> getWorkerInterfaces(Reference clusterFile); +ACTOR Future getWorkerInterfaces(Reference clusterFile); ACTOR Future> getJSON(Database db); struct WorkerInterfacesSpecialKeyImpl : SpecialKeyRangeReadImpl { - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override { + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override { if (ryw->getDatabase().getPtr() && ryw->getDatabase()->getConnectionFile()) { Key prefix = Key(getKeyRange().begin); return map(getWorkerInterfaces(ryw->getDatabase()->getConnectionFile()), - [prefix = prefix, kr = KeyRange(kr)](const Standalone& in) { - Standalone result; + [prefix = prefix, kr = KeyRange(kr)](const RangeResult& in) { + RangeResult result; for (const auto& [k_, v] : in) { auto k = k_.withPrefix(prefix); if (kr.contains(k)) @@ -777,7 +776,7 @@ struct WorkerInterfacesSpecialKeyImpl : SpecialKeyRangeReadImpl { return result; }); } else { - return Standalone(); + return RangeResult(); } } @@ -785,10 +784,10 @@ struct WorkerInterfacesSpecialKeyImpl : SpecialKeyRangeReadImpl { }; struct SingleSpecialKeyImpl : SpecialKeyRangeReadImpl { - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override { + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override { ASSERT(kr.contains(k)); return map(f(ryw), [k = k](Optional v) { - Standalone result; + RangeResult result; if (v.present()) { result.push_back_deep(result.arena(), KeyValueRef(k, v.get())); } @@ -807,11 +806,11 @@ private: class HealthMetricsRangeImpl : public SpecialKeyRangeAsyncImpl { public: explicit HealthMetricsRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; -static Standalone healthMetricsToKVPairs(const HealthMetrics& metrics, KeyRangeRef kr) { - Standalone result; +static RangeResult healthMetricsToKVPairs(const HealthMetrics& metrics, KeyRangeRef kr) { + RangeResult result; if (CLIENT_BUGGIFY) return result; if (kr.contains(LiteralStringRef("\xff\xff/metrics/health/aggregate")) && metrics.worstStorageDurabilityLag != 0) { @@ -881,8 +880,7 @@ static Standalone healthMetricsToKVPairs(const HealthMetrics& me return result; } -ACTOR static Future> healthMetricsGetRangeActor(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) { +ACTOR static Future healthMetricsGetRangeActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { HealthMetrics metrics = wait(ryw->getDatabase()->getHealthMetrics( /*detailed ("per process")*/ kr.intersects(KeyRangeRef(LiteralStringRef("\xff\xff/metrics/health/storage/"), LiteralStringRef("\xff\xff/metrics/health/storage0"))) || @@ -893,8 +891,7 @@ ACTOR static Future> healthMetricsGetRangeActor(ReadY HealthMetricsRangeImpl::HealthMetricsRangeImpl(KeyRangeRef kr) : SpecialKeyRangeAsyncImpl(kr) {} -Future> HealthMetricsRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future HealthMetricsRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return healthMetricsGetRangeActor(ryw, kr); } @@ -1967,14 +1964,14 @@ AddressExclusion AddressExclusion::parse(StringRef const& key) { } } -Future> getRange(Database const& cx, - Future const& fVersion, - KeySelector const& begin, - KeySelector const& end, - GetRangeLimits const& limits, - bool const& reverse, - TransactionInfo const& info, - TagSet const& tags); +Future getRange(Database const& cx, + Future const& fVersion, + KeySelector const& begin, + KeySelector const& end, + GetRangeLimits const& limits, + bool const& reverse, + TransactionInfo const& info, + TagSet const& tags); ACTOR Future> getValue(Future version, Key key, @@ -2698,14 +2695,14 @@ void transformRangeLimits(GetRangeLimits limits, bool reverse, GetKeyValuesReque } } -ACTOR Future> getExactRange(Database cx, - Version version, - KeyRange keys, - GetRangeLimits limits, - bool reverse, - TransactionInfo info, - TagSet tags) { - state Standalone output; +ACTOR Future getExactRange(Database cx, + Version version, + KeyRange keys, + GetRangeLimits limits, + bool reverse, + TransactionInfo info, + TagSet tags) { + state RangeResult output; state Span span("NAPI:getExactRange"_loc, info.spanID); // printf("getExactRange( '%s', '%s' )\n", keys.begin.toString().c_str(), keys.end.toString().c_str()); @@ -2877,14 +2874,14 @@ Future resolveKey(Database const& cx, return getKey(cx, key, version, info, tags); } -ACTOR Future> getRangeFallback(Database cx, - Version version, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool reverse, - TransactionInfo info, - TagSet tags) { +ACTOR Future getRangeFallback(Database cx, + Version version, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool reverse, + TransactionInfo info, + TagSet tags) { if (version == latestVersion) { state Transaction transaction(cx); transaction.setOption(FDBTransactionOptions::CAUSAL_READ_RISKY); @@ -2900,15 +2897,15 @@ ACTOR Future> getRangeFallback(Database cx, state Key b = wait(fb); state Key e = wait(fe); if (b >= e) { - return Standalone(); + return RangeResult(); } // if e is allKeys.end, we have read through the end of the database // if b is allKeys.begin, we have either read through the beginning of the database, // or allKeys.begin exists in the database and will be part of the conflict range anyways - Standalone _r = wait(getExactRange(cx, version, KeyRangeRef(b, e), limits, reverse, info, tags)); - Standalone r = _r; + RangeResult _r = wait(getExactRange(cx, version, KeyRangeRef(b, e), limits, reverse, info, tags)); + RangeResult r = _r; if (b == allKeys.begin && ((reverse && !r.more) || !reverse)) r.readToBegin = true; @@ -2940,7 +2937,7 @@ void getRangeFinished(Database cx, bool snapshot, Promise> conflictRange, bool reverse, - Standalone result) { + RangeResult result) { int64_t bytes = 0; for (const KeyValueRef& kv : result) { bytes += kv.key.size() + kv.value.size(); @@ -2986,21 +2983,21 @@ void getRangeFinished(Database cx, } } -ACTOR Future> getRange(Database cx, - Reference trLogInfo, - Future fVersion, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - Promise> conflictRange, - bool snapshot, - bool reverse, - TransactionInfo info, - TagSet tags) { +ACTOR Future getRange(Database cx, + Reference trLogInfo, + Future fVersion, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + Promise> conflictRange, + bool snapshot, + bool reverse, + TransactionInfo info, + TagSet tags) { state GetRangeLimits originalLimits(limits); state KeySelector originalBegin = begin; state KeySelector originalEnd = end; - state Standalone output; + state RangeResult output; state Span span("NAPI:getRange"_loc, info.spanID); try { @@ -3132,8 +3129,8 @@ ACTOR Future> getRange(Database cx, bool readToBegin = output.readToBegin; bool readThroughEnd = output.readThroughEnd; - output = Standalone( - RangeResultRef(rep.data, modifiedSelectors || limits.isReached() || rep.more), rep.arena); + output = RangeResult(RangeResultRef(rep.data, modifiedSelectors || limits.isReached() || rep.more), + rep.arena); output.readToBegin = readToBegin; output.readThroughEnd = readThroughEnd; @@ -3186,7 +3183,7 @@ ACTOR Future> getRange(Database cx, TEST(true); // !GetKeyValuesReply.more and modifiedSelectors in getRange if (!rep.data.size()) { - Standalone result = wait(getRangeFallback( + RangeResult result = wait(getRangeFallback( cx, version, originalBegin, originalEnd, originalLimits, reverse, info, tags)); getRangeFinished(cx, trLogInfo, @@ -3223,7 +3220,7 @@ ACTOR Future> getRange(Database cx, reverse ? (end - 1).isBackward() : begin.isBackward()); if (e.code() == error_code_wrong_shard_server) { - Standalone result = wait(getRangeFallback( + RangeResult result = wait(getRangeFallback( cx, version, originalBegin, originalEnd, originalLimits, reverse, info, tags)); getRangeFinished(cx, trLogInfo, @@ -3259,14 +3256,14 @@ ACTOR Future> getRange(Database cx, } } -Future> getRange(Database const& cx, - Future const& fVersion, - KeySelector const& begin, - KeySelector const& end, - GetRangeLimits const& limits, - bool const& reverse, - TransactionInfo const& info, - TagSet const& tags) { +Future getRange(Database const& cx, + Future const& fVersion, + KeySelector const& begin, + KeySelector const& end, + GetRangeLimits const& limits, + bool const& reverse, + TransactionInfo const& info, + TagSet const& tags) { return getRange(cx, Reference(), fVersion, @@ -3488,18 +3485,18 @@ ACTOR Future>> getAddressesForKeyActor(Key key // serverInterfaces vector being empty, which will cause us to return an empty addresses list. state Key ksKey = keyServersKey(key); - state Standalone serverTagResult = wait(getRange(cx, - ver, - lastLessOrEqual(serverTagKeys.begin), - firstGreaterThan(serverTagKeys.end), - GetRangeLimits(CLIENT_KNOBS->TOO_MANY), - false, - info, - options.readTags)); + state RangeResult serverTagResult = wait(getRange(cx, + ver, + lastLessOrEqual(serverTagKeys.begin), + firstGreaterThan(serverTagKeys.end), + GetRangeLimits(CLIENT_KNOBS->TOO_MANY), + false, + info, + options.readTags)); ASSERT(!serverTagResult.more && serverTagResult.size() < CLIENT_KNOBS->TOO_MANY); - Future> futureServerUids = getRange( + Future futureServerUids = getRange( cx, ver, lastLessOrEqual(ksKey), firstGreaterThan(ksKey), GetRangeLimits(1), false, info, options.readTags); - Standalone serverUids = wait(futureServerUids); + RangeResult serverUids = wait(futureServerUids); ASSERT(serverUids.size()); // every shard needs to have a team @@ -3579,16 +3576,16 @@ Future Transaction::getKey(const KeySelector& key, bool snapshot) { return getKeyAndConflictRange(cx, key, getReadVersion(), conflictRange, info, options.readTags); } -Future> Transaction::getRange(const KeySelector& begin, - const KeySelector& end, - GetRangeLimits limits, - bool snapshot, - bool reverse) { +Future Transaction::getRange(const KeySelector& begin, + const KeySelector& end, + GetRangeLimits limits, + bool snapshot, + bool reverse) { ++cx->transactionLogicalReads; ++cx->transactionGetRangeRequests; if (limits.isReached()) - return Standalone(); + return RangeResult(); if (!limits.isValid()) return range_limits_invalid(); @@ -3609,7 +3606,7 @@ Future> Transaction::getRange(const KeySelector& begi if (b.offset >= e.offset && b.getKey() >= e.getKey()) { TEST(true); // Native range inverted - return Standalone(); + return RangeResult(); } Promise> conflictRange; @@ -3621,11 +3618,11 @@ Future> Transaction::getRange(const KeySelector& begi cx, trLogInfo, getReadVersion(), b, e, limits, conflictRange, snapshot, reverse, info, options.readTags); } -Future> Transaction::getRange(const KeySelector& begin, - const KeySelector& end, - int limit, - bool snapshot, - bool reverse) { +Future Transaction::getRange(const KeySelector& begin, + const KeySelector& end, + int limit, + bool snapshot, + bool reverse) { return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse); } @@ -3981,7 +3978,7 @@ ACTOR void checkWrites(Database cx, if (m.mutated) { checkedRanges++; if (m.cleared) { - Standalone shouldBeEmpty = wait(tr.getRange(it->range(), 1)); + RangeResult shouldBeEmpty = wait(tr.getRange(it->range(), 1)); if (shouldBeEmpty.size()) { TraceEvent(SevError, "CheckWritesFailed") .detail("Class", "Clear") @@ -5640,7 +5637,7 @@ ACTOR static Future rebootWorkerActor(DatabaseContext* cx, ValueRef add state std::map> address_interface; if (!cx->getConnectionFile()) return 0; - Standalone kvs = wait(getWorkerInterfaces(cx->getConnectionFile())); + RangeResult kvs = wait(getWorkerInterfaces(cx->getConnectionFile())); ASSERT(!kvs.more); // Note: reuse this knob from fdbcli, change it if necessary Reference connectLock(new FlowLock(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM)); diff --git a/fdbclient/NativeAPI.actor.h b/fdbclient/NativeAPI.actor.h index 86a8f9313a..fee44d20d1 100644 --- a/fdbclient/NativeAPI.actor.h +++ b/fdbclient/NativeAPI.actor.h @@ -252,30 +252,30 @@ public: [[nodiscard]] Future watch(Reference watch); [[nodiscard]] Future getKey(const KeySelector& key, bool snapshot = false); // Future< Optional > get( const KeySelectorRef& key ); - [[nodiscard]] Future> getRange(const KeySelector& begin, - const KeySelector& end, - int limit, - bool snapshot = false, - bool reverse = false); - [[nodiscard]] Future> getRange(const KeySelector& begin, - const KeySelector& end, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false); - [[nodiscard]] Future> getRange(const KeyRange& keys, - int limit, - bool snapshot = false, - bool reverse = false) { + [[nodiscard]] Future getRange(const KeySelector& begin, + const KeySelector& end, + int limit, + bool snapshot = false, + bool reverse = false); + [[nodiscard]] Future getRange(const KeySelector& begin, + const KeySelector& end, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false); + [[nodiscard]] Future getRange(const KeyRange& keys, + int limit, + bool snapshot = false, + bool reverse = false) { return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()), KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limit, snapshot, reverse); } - [[nodiscard]] Future> getRange(const KeyRange& keys, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) { + [[nodiscard]] Future getRange(const KeyRange& keys, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) { return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()), KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limits, diff --git a/fdbclient/RYWIterator.cpp b/fdbclient/RYWIterator.cpp index fd3eec35c7..cfd233e3a2 100644 --- a/fdbclient/RYWIterator.cpp +++ b/fdbclient/RYWIterator.cpp @@ -42,7 +42,7 @@ const RYWIterator::SEGMENT_TYPE RYWIterator::typeMap[12] = { }; RYWIterator::SEGMENT_TYPE RYWIterator::type() const { - if (is_unreadable()) + if (is_unreadable() && !bypassUnreadable) throw accessed_unreadable(); return typeMap[writes.type() * 3 + cache.type()]; @@ -72,7 +72,7 @@ ExtStringRef RYWIterator::endKey() { } const KeyValueRef* RYWIterator::kv(Arena& arena) { - if (is_unreadable()) + if (is_unreadable() && !bypassUnreadable) throw accessed_unreadable(); if (writes.is_unmodified_range()) { @@ -347,8 +347,9 @@ void testSnapshotCache() { } /* -ACTOR Standalone getRange( Transaction* tr, KeySelector begin, KeySelector end, SnapshotCache* cache, -WriteMap* writes, GetRangeLimits limits ) { RYWIterator it(cache, writes); RYWIterator itEnd(cache, writes); +ACTOR RangeResult getRange( Transaction* tr, KeySelector begin, KeySelector end, SnapshotCache* cache, +WriteMap* writes, GetRangeLimits limits ) { + RYWIterator it(cache, writes); RYWIterator itEnd(cache, writes); resolveKeySelectorFromCache( begin, it ); resolveKeySelectorFromCache( end, itEnd ); @@ -362,9 +363,8 @@ WriteMap* writes, GetRangeLimits limits ) { RYWIterator it(cache, writes); RYWIt ucEnd.skipUncached(itEnd); state KeySelector read_end = ucEnd==itEnd ? end : -firstGreaterOrEqual(ucEnd.endKey().toStandaloneStringRef()); Standalone snapshot_read = wait( -tr->getRange( begin, read_end, limits, false, false ) ); cache->insert( getKnownKeyRange( snapshot_read, begin, read_end -), snapshot_read ); +firstGreaterOrEqual(ucEnd.endKey().toStandaloneStringRef()); RangeResult snapshot_read = wait(tr->getRange( begin, +read_end, limits, false, false ) ); cache->insert( getKnownKeyRange( snapshot_read, begin, read_end), snapshot_read ); // TODO: Is there a more efficient way to deal with invalidation? it = itEnd = RYWIterator( cache, writes ); diff --git a/fdbclient/RYWIterator.h b/fdbclient/RYWIterator.h index e28b11c033..8bc9091fe2 100644 --- a/fdbclient/RYWIterator.h +++ b/fdbclient/RYWIterator.h @@ -28,7 +28,7 @@ class RYWIterator { public: RYWIterator(SnapshotCache* snapshotCache, WriteMap* writeMap) - : cache(snapshotCache), writes(writeMap), begin_key_cmp(0), end_key_cmp(0) {} + : cache(snapshotCache), writes(writeMap), begin_key_cmp(0), end_key_cmp(0), bypassUnreadable(false) {} enum SEGMENT_TYPE { UNKNOWN_RANGE, EMPTY_RANGE, KV }; static const SEGMENT_TYPE typeMap[12]; @@ -59,6 +59,8 @@ public: void skipContiguousBack(KeyRef key); + void bypassUnreadableProtection() { bypassUnreadable = true; } + WriteMap::iterator& extractWriteMapIterator(); // Really this should return an iterator by value, but for performance it's convenient to actually grab the internal // one. Consider copying the return value if performance isn't critical. If you modify the returned iterator, it @@ -72,6 +74,8 @@ private: SnapshotCache::iterator cache; WriteMap::iterator writes; KeyValueRef temp; + bool bypassUnreadable; // When set, allows read from sections of keyspace that have become unreadable because of + // versionstamp operations void updateCmp(); }; diff --git a/fdbclient/ReadYourWrites.actor.cpp b/fdbclient/ReadYourWrites.actor.cpp index a6e047b979..6c7b77ff65 100644 --- a/fdbclient/ReadYourWrites.actor.cpp +++ b/fdbclient/ReadYourWrites.actor.cpp @@ -71,7 +71,7 @@ public: : begin(begin), end(end), limits(limits) {} KeySelector begin, end; GetRangeLimits limits; - typedef Standalone Result; + using Result = RangeResult; }; // read() Performs a read (get, getKey, getRange, etc), in the context of the given transaction. Snapshot or RYW @@ -84,6 +84,9 @@ public: static Future> read(ReadYourWritesTransaction* ryw, GetValueReq read, Iter* it) { // This overload is required to provide postcondition: it->extractWriteMapIterator().segmentContains(read.key) + if (ryw->options.bypassUnreadable) { + it->bypassUnreadableProtection(); + } it->skip(read.key); state bool dependent = it->is_dependent(); if (it->is_kv()) { @@ -126,7 +129,7 @@ public: ACTOR template static Future read(ReadYourWritesTransaction* ryw, GetKeyReq read, Iter* it) { if (read.key.offset > 0) { - Standalone result = + RangeResult result = wait(getRangeValue(ryw, read.key, firstGreaterOrEqual(ryw->getMaxReadKey()), GetRangeLimits(1), it)); if (result.readToBegin) return allKeys.begin; @@ -135,7 +138,7 @@ public: return result[0].key; } else { read.key.offset++; - Standalone result = + RangeResult result = wait(getRangeValueBack(ryw, firstGreaterOrEqual(allKeys.begin), read.key, GetRangeLimits(1), it)); if (result.readThroughEnd) return ryw->getMaxReadKey(); @@ -146,12 +149,12 @@ public: }; template - static Future> read(ReadYourWritesTransaction* ryw, GetRangeReq read, Iter* it) { + static Future read(ReadYourWritesTransaction* ryw, GetRangeReq read, Iter* it) { return getRangeValue(ryw, read.begin, read.end, read.limits, it); }; template - static Future> read(ReadYourWritesTransaction* ryw, GetRangeReq read, Iter* it) { + static Future read(ReadYourWritesTransaction* ryw, GetRangeReq read, Iter* it) { return getRangeValueBack(ryw, read.begin, read.end, read.limits, it); }; @@ -171,9 +174,7 @@ public: } ACTOR template - static Future> readThrough(ReadYourWritesTransaction* ryw, - GetRangeReq read, - bool snapshot) { + static Future readThrough(ReadYourWritesTransaction* ryw, GetRangeReq read, bool snapshot) { if (Reverse && read.end.offset > 1) { // FIXME: Optimistically assume that this will not run into the system keys, and only reissue if the result // actually does. @@ -184,16 +185,15 @@ public: read.end = KeySelector(firstGreaterOrEqual(key), key.arena()); } - Standalone v = wait(ryw->tr.getRange(read.begin, read.end, read.limits, snapshot, Reverse)); + RangeResult v = wait(ryw->tr.getRange(read.begin, read.end, read.limits, snapshot, Reverse)); KeyRef maxKey = ryw->getMaxReadKey(); if (v.size() > 0) { if (!Reverse && v[v.size() - 1].key >= maxKey) { - state Standalone _v = v; + state RangeResult _v = v; int i = _v.size() - 2; for (; i >= 0 && _v[i].key >= maxKey; --i) { } - return Standalone(RangeResultRef(VectorRef(&_v[0], i + 1), false), - _v.arena()); + return RangeResult(RangeResultRef(VectorRef(&_v[0], i + 1), false), _v.arena()); } } @@ -230,7 +230,7 @@ public: static void addConflictRange(ReadYourWritesTransaction* ryw, GetRangeReq read, WriteMap::iterator& it, - Standalone const& result) { + RangeResult const& result) { KeyRef rangeBegin, rangeEnd; bool endInArena = false; @@ -265,7 +265,7 @@ public: static void addConflictRange(ReadYourWritesTransaction* ryw, GetRangeReq read, WriteMap::iterator& it, - Standalone const& result) { + RangeResult const& result) { KeyRef rangeBegin, rangeEnd; bool endInArena = false; @@ -527,14 +527,14 @@ public: // TODO: read to begin, read through end flags for result ACTOR template - static Future> getRangeValue(ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - Iter* pit) { + static Future getRangeValue(ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + Iter* pit) { state Iter& it(*pit); state Iter itEnd(*pit); - state Standalone result; + state RangeResult result; state int64_t additionalRows = 0; state int itemsPastEnd = 0; state int requestCount = 0; @@ -690,8 +690,7 @@ public: //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows); additionalRows = 0; - Standalone snapshot_read = - wait(ryw->tr.getRange(read_begin, read_end, requestLimit, true, false)); + RangeResult snapshot_read = wait(ryw->tr.getRange(read_begin, read_end, requestLimit, true, false)); KeyRangeRef range = getKnownKeyRange(snapshot_read, read_begin, read_end, ryw->arena); //TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough); @@ -829,14 +828,14 @@ public: } ACTOR template - static Future> getRangeValueBack(ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - Iter* pit) { + static Future getRangeValueBack(ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + Iter* pit) { state Iter& it(*pit); state Iter itEnd(*pit); - state Standalone result; + state RangeResult result; state int64_t additionalRows = 0; state int itemsPastBegin = 0; state int requestCount = 0; @@ -994,8 +993,7 @@ public: //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows); additionalRows = 0; - Standalone snapshot_read = - wait(ryw->tr.getRange(read_begin, read_end, requestLimit, true, true)); + RangeResult snapshot_read = wait(ryw->tr.getRange(read_begin, read_end, requestLimit, true, true)); KeyRangeRef range = getKnownKeyRangeBack(snapshot_read, read_begin, read_end, ryw->arena); //TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough); @@ -1329,7 +1327,7 @@ ACTOR Future> getJSON(Database db) { return getValueFromJSON(statusObj); } -ACTOR Future> getWorkerInterfaces(Reference clusterFile) { +ACTOR Future getWorkerInterfaces(Reference clusterFile) { state Reference>> clusterInterface(new AsyncVar>); state Future leaderMon = monitorLeader(clusterFile, clusterInterface); @@ -1340,7 +1338,7 @@ ACTOR Future> getWorkerInterfaces(Referenceget().get().getClientWorkers.getReply(GetClientWorkersRequest())) : Never())) { - Standalone result; + RangeResult result; for (auto& it : workers) { result.push_back_deep( result.arena(), @@ -1434,11 +1432,11 @@ Future ReadYourWritesTransaction::getKey(const KeySelector& key, bool snaps return result; } -Future> ReadYourWritesTransaction::getRange(KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool snapshot, - bool reverse) { +Future ReadYourWritesTransaction::getRange(KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool snapshot, + bool reverse) { if (getDatabase()->apiVersionAtLeast(630)) { if (specialKeys.contains(begin.getKey()) && specialKeys.begin <= end.getKey() && end.getKey() <= specialKeys.end) { @@ -1450,7 +1448,7 @@ Future> ReadYourWritesTransaction::getRange(KeySelect if (tr.getDatabase().getPtr() && tr.getDatabase()->getConnectionFile()) { return getWorkerInterfaces(tr.getDatabase()->getConnectionFile()); } else { - return Standalone(); + return RangeResult(); } } } @@ -1469,7 +1467,7 @@ Future> ReadYourWritesTransaction::getRange(KeySelect // This optimization prevents nullptr operations from being added to the conflict range if (limits.isReached()) { TEST(true); // RYW range read limit 0 - return Standalone(); + return RangeResult(); } if (!limits.isValid()) @@ -1483,10 +1481,10 @@ Future> ReadYourWritesTransaction::getRange(KeySelect if (begin.offset >= end.offset && begin.getKey() >= end.getKey()) { TEST(true); // RYW range inverted - return Standalone(); + return RangeResult(); } - Future> result = + Future result = reverse ? RYWImpl::readWithConflictRange(this, RYWImpl::GetRangeReq(begin, end, limits), snapshot) : RYWImpl::readWithConflictRange(this, RYWImpl::GetRangeReq(begin, end, limits), snapshot); @@ -1494,11 +1492,11 @@ Future> ReadYourWritesTransaction::getRange(KeySelect return result; } -Future> ReadYourWritesTransaction::getRange(const KeySelector& begin, - const KeySelector& end, - int limit, - bool snapshot, - bool reverse) { +Future ReadYourWritesTransaction::getRange(const KeySelector& begin, + const KeySelector& end, + int limit, + bool snapshot, + bool reverse) { return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse); } @@ -1739,11 +1737,11 @@ void ReadYourWritesTransaction::setToken(uint64_t token) { tr.setToken(token); } -Standalone ReadYourWritesTransaction::getReadConflictRangeIntersecting(KeyRangeRef kr) { +RangeResult ReadYourWritesTransaction::getReadConflictRangeIntersecting(KeyRangeRef kr) { TEST(true); // Special keys read conflict range ASSERT(readConflictRangeKeysRange.contains(kr)); ASSERT(!tr.options.checkWritesEnabled); - Standalone result; + RangeResult result; if (!options.readYourWritesDisabled) { kr = kr.removePrefix(readConflictRangeKeysRange.begin); auto iter = readConflicts.rangeContainingKeyBefore(kr.begin); @@ -1781,10 +1779,10 @@ Standalone ReadYourWritesTransaction::getReadConflictRangeInters return result; } -Standalone ReadYourWritesTransaction::getWriteConflictRangeIntersecting(KeyRangeRef kr) { +RangeResult ReadYourWritesTransaction::getWriteConflictRangeIntersecting(KeyRangeRef kr) { TEST(true); // Special keys write conflict range ASSERT(writeConflictRangeKeysRange.contains(kr)); - Standalone result; + RangeResult result; // Memory owned by result CoalescedKeyRefRangeMap writeConflicts{ LiteralStringRef("0"), specialKeys.end }; @@ -2237,6 +2235,10 @@ void ReadYourWritesTransaction::setOptionImpl(FDBTransactionOptions::Option opti validateOptionValue(value, false); options.specialKeySpaceChangeConfiguration = true; break; + case FDBTransactionOptions::BYPASS_UNREADABLE: + validateOptionValue(value, false); + options.bypassUnreadable = true; + break; default: break; } diff --git a/fdbclient/ReadYourWrites.h b/fdbclient/ReadYourWrites.h index f8e8e390bd..977dab1f24 100644 --- a/fdbclient/ReadYourWrites.h +++ b/fdbclient/ReadYourWrites.h @@ -42,6 +42,7 @@ struct ReadYourWritesTransactionOptions { double timeoutInSeconds; int maxRetries; int snapshotRywEnabled; + bool bypassUnreadable : 1; ReadYourWritesTransactionOptions() {} explicit ReadYourWritesTransactionOptions(Transaction const& tr); @@ -78,30 +79,27 @@ public: Optional getCachedReadVersion() { return tr.getCachedReadVersion(); } Future> get(const Key& key, bool snapshot = false); Future getKey(const KeySelector& key, bool snapshot = false); - Future> getRange(const KeySelector& begin, - const KeySelector& end, - int limit, - bool snapshot = false, - bool reverse = false); - Future> getRange(KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false); - Future> getRange(const KeyRange& keys, - int limit, - bool snapshot = false, - bool reverse = false) { + Future getRange(const KeySelector& begin, + const KeySelector& end, + int limit, + bool snapshot = false, + bool reverse = false); + Future getRange(KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false); + Future getRange(const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false) { return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()), KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limit, snapshot, reverse); } - Future> getRange(const KeyRange& keys, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) { + Future getRange(const KeyRange& keys, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) { return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()), KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limits, @@ -169,9 +167,9 @@ public: void setToken(uint64_t token); // Read from the special key space readConflictRangeKeysRange - Standalone getReadConflictRangeIntersecting(KeyRangeRef kr); + RangeResult getReadConflictRangeIntersecting(KeyRangeRef kr); // Read from the special key space writeConflictRangeKeysRange - Standalone getWriteConflictRangeIntersecting(KeyRangeRef kr); + RangeResult getWriteConflictRangeIntersecting(KeyRangeRef kr); bool specialKeySpaceRelaxed() const { return options.specialKeySpaceRelaxed; } bool specialKeySpaceChangeConfiguration() const { return options.specialKeySpaceChangeConfiguration; } diff --git a/fdbclient/Schemas.cpp b/fdbclient/Schemas.cpp index 2f6843392a..5fef5fb6eb 100644 --- a/fdbclient/Schemas.cpp +++ b/fdbclient/Schemas.cpp @@ -144,6 +144,16 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema( "counter":0, "roughness":0.0 }, + "fetched_versions":{ + "hz":0.0, + "counter":0, + "roughness":0.0 + }, + "fetches_from_logs":{ + "hz":0.0, + "counter":0, + "roughness":0.0 + }, "grv_latency_statistics":{ "default":{ "count":0, @@ -194,6 +204,18 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema( "p99":0.0, "p99.9":0.0 }, + "commit_batching_window_size":{ + "count":0, + "min":0.0, + "max":0.0, + "median":0.0, + "mean":0.0, + "p25":0.0, + "p90":0.0, + "p95":0.0, + "p99":0.0, + "p99.9":0.0 + }, "grv_latency_bands":{ "$map": 1 }, @@ -636,6 +658,10 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema( "data_distribution_disabled_for_rebalance":true, "data_distribution_disabled":true, "active_primary_dc":"pv", + "bounce_impact":{ + "can_clean_bounce":true, + "reason":"" + }, "configuration":{ "log_anti_quorum":0, "log_replicas":2, diff --git a/fdbclient/SnapshotCache.h b/fdbclient/SnapshotCache.h index df389eb4ab..eabd289aee 100644 --- a/fdbclient/SnapshotCache.h +++ b/fdbclient/SnapshotCache.h @@ -203,6 +203,7 @@ public: bool is_empty_range() const { return type() == EMPTY_RANGE; } bool is_dependent() const { return false; } bool is_unreadable() const { return false; } + void bypassUnreadableProtection() {} ExtStringRef beginKey() const { if (offset == 0) { diff --git a/fdbclient/SpecialKeySpace.actor.cpp b/fdbclient/SpecialKeySpace.actor.cpp index 543b089753..6b147eaa07 100644 --- a/fdbclient/SpecialKeySpace.actor.cpp +++ b/fdbclient/SpecialKeySpace.actor.cpp @@ -102,9 +102,7 @@ std::set SpecialKeySpace::options = { "excluded/force", "failed/for std::set SpecialKeySpace::tracingOptions = { kTracingTransactionIdKey, kTracingTokenKey }; -Standalone rywGetRange(ReadYourWritesTransaction* ryw, - const KeyRangeRef& kr, - const Standalone& res); +RangeResult rywGetRange(ReadYourWritesTransaction* ryw, const KeyRangeRef& kr, const RangeResult& res); // This function will move the given KeySelector as far as possible to the standard form: // orEqual == false && offset == 1 (Standard form) @@ -115,13 +113,13 @@ Standalone rywGetRange(ReadYourWritesTransaction* ryw, ACTOR Future moveKeySelectorOverRangeActor(const SpecialKeyRangeReadImpl* skrImpl, ReadYourWritesTransaction* ryw, KeySelector* ks, - Optional>* cache) { + Optional* cache) { ASSERT(!ks->orEqual); // should be removed before calling ASSERT(ks->offset != 1); // never being called if KeySelector is already normalized state Key startKey(skrImpl->getKeyRange().begin); state Key endKey(skrImpl->getKeyRange().end); - state Standalone result; + state RangeResult result; if (ks->offset < 1) { // less than the given key @@ -142,10 +140,10 @@ ACTOR Future moveKeySelectorOverRangeActor(const SpecialKeyRangeReadImpl* if (skrImpl->isAsync()) { const SpecialKeyRangeAsyncImpl* ptr = dynamic_cast(skrImpl); - Standalone result_ = wait(ptr->getRange(ryw, KeyRangeRef(startKey, endKey), cache)); + RangeResult result_ = wait(ptr->getRange(ryw, KeyRangeRef(startKey, endKey), cache)); result = result_; } else { - Standalone result_ = wait(skrImpl->getRange(ryw, KeyRangeRef(startKey, endKey))); + RangeResult result_ = wait(skrImpl->getRange(ryw, KeyRangeRef(startKey, endKey))); result = result_; } @@ -194,8 +192,8 @@ ACTOR Future normalizeKeySelectorActor(SpecialKeySpace* sks, KeySelector* ks, KeyRangeRef boundary, int* actualOffset, - Standalone* result, - Optional>* cache) { + RangeResult* result, + Optional* cache) { // If offset < 1, where we need to move left, iter points to the range containing at least one smaller key // (It's a wasting of time to walk through the range whose begin key is same as ks->key) // (rangeContainingKeyBefore itself handles the case where ks->key == Key()) @@ -265,15 +263,15 @@ void SpecialKeySpace::modulesBoundaryInit() { } } -ACTOR Future> SpecialKeySpace::checkRYWValid(SpecialKeySpace* sks, - ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool reverse) { +ACTOR Future SpecialKeySpace::checkRYWValid(SpecialKeySpace* sks, + ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool reverse) { ASSERT(ryw); choose { - when(Standalone result = + when(RangeResult result = wait(SpecialKeySpace::getRangeAggregationActor(sks, ryw, begin, end, limits, reverse))) { return result; } @@ -281,22 +279,22 @@ ACTOR Future> SpecialKeySpace::checkRYWValid(SpecialK } } -ACTOR Future> SpecialKeySpace::getRangeAggregationActor(SpecialKeySpace* sks, - ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool reverse) { +ACTOR Future SpecialKeySpace::getRangeAggregationActor(SpecialKeySpace* sks, + ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool reverse) { // This function handles ranges which cover more than one keyrange and aggregates all results // KeySelector, GetRangeLimits and reverse are all handled here - state Standalone result; - state Standalone pairs; + state RangeResult result; + state RangeResult pairs; state RangeMap::iterator iter; state int actualBeginOffset; state int actualEndOffset; state KeyRangeRef moduleBoundary; // used to cache result from potential first read - state Optional> cache; + state Optional cache; if (ryw->specialKeySpaceRelaxed()) { moduleBoundary = sks->range; @@ -345,10 +343,10 @@ ACTOR Future> SpecialKeySpace::getRangeAggregationAct KeyRef keyEnd = kr.contains(end.getKey()) ? end.getKey() : kr.end; if (iter->value()->isAsync() && cache.present()) { const SpecialKeyRangeAsyncImpl* ptr = dynamic_cast(iter->value()); - Standalone pairs_ = wait(ptr->getRange(ryw, KeyRangeRef(keyStart, keyEnd), &cache)); + RangeResult pairs_ = wait(ptr->getRange(ryw, KeyRangeRef(keyStart, keyEnd), &cache)); pairs = pairs_; } else { - Standalone pairs_ = wait(iter->value()->getRange(ryw, KeyRangeRef(keyStart, keyEnd))); + RangeResult pairs_ = wait(iter->value()->getRange(ryw, KeyRangeRef(keyStart, keyEnd))); pairs = pairs_; } result.arena().dependsOn(pairs.arena()); @@ -376,10 +374,10 @@ ACTOR Future> SpecialKeySpace::getRangeAggregationAct KeyRef keyEnd = kr.contains(end.getKey()) ? end.getKey() : kr.end; if (iter->value()->isAsync() && cache.present()) { const SpecialKeyRangeAsyncImpl* ptr = dynamic_cast(iter->value()); - Standalone pairs_ = wait(ptr->getRange(ryw, KeyRangeRef(keyStart, keyEnd), &cache)); + RangeResult pairs_ = wait(ptr->getRange(ryw, KeyRangeRef(keyStart, keyEnd), &cache)); pairs = pairs_; } else { - Standalone pairs_ = wait(iter->value()->getRange(ryw, KeyRangeRef(keyStart, keyEnd))); + RangeResult pairs_ = wait(iter->value()->getRange(ryw, KeyRangeRef(keyStart, keyEnd))); pairs = pairs_; } result.arena().dependsOn(pairs.arena()); @@ -402,17 +400,17 @@ ACTOR Future> SpecialKeySpace::getRangeAggregationAct return result; } -Future> SpecialKeySpace::getRange(ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool reverse) { +Future SpecialKeySpace::getRange(ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool reverse) { // validate limits here if (!limits.isValid()) return range_limits_invalid(); if (limits.isReached()) { TEST(true); // read limit 0 - return Standalone(); + return RangeResult(); } // make sure orEqual == false begin.removeOrEqual(begin.arena()); @@ -420,7 +418,7 @@ Future> SpecialKeySpace::getRange(ReadYourWritesTrans if (begin.offset >= end.offset && begin.getKey() >= end.getKey()) { TEST(true); // range inverted - return Standalone(); + return RangeResult(); } return checkRYWValid(this, ryw, begin, end, limits, reverse); @@ -430,11 +428,11 @@ ACTOR Future> SpecialKeySpace::getActor(SpecialKeySpace* sks, ReadYourWritesTransaction* ryw, KeyRef key) { // use getRange to workaround this - Standalone result = wait(sks->getRange(ryw, - KeySelector(firstGreaterOrEqual(key)), - KeySelector(firstGreaterOrEqual(keyAfter(key))), - GetRangeLimits(CLIENT_KNOBS->TOO_MANY), - false)); + RangeResult result = wait(sks->getRange(ryw, + KeySelector(firstGreaterOrEqual(key)), + KeySelector(firstGreaterOrEqual(keyAfter(key))), + GetRangeLimits(CLIENT_KNOBS->TOO_MANY), + false)); ASSERT(result.size() <= 1); if (result.size()) { return Optional(result[0].value); @@ -552,16 +550,20 @@ ACTOR Future commitActor(SpecialKeySpace* sks, ReadYourWritesTransaction* state RangeMap>, KeyRangeRef>::Ranges ranges = ryw->getSpecialKeySpaceWriteMap().containedRanges(specialKeys); state RangeMap>, KeyRangeRef>::iterator iter = ranges.begin(); - state std::set writeModulePtrs; + state std::vector writeModulePtrs; + std::unordered_set deduplicate; while (iter != ranges.end()) { std::pair> entry = iter->value(); if (entry.first) { auto modulePtr = sks->getRWImpls().rangeContaining(iter->begin())->value(); - writeModulePtrs.insert(modulePtr); + auto [_, inserted] = deduplicate.insert(modulePtr); + if (inserted) { + writeModulePtrs.push_back(modulePtr); + } } ++iter; } - state std::set::const_iterator it; + state std::vector::const_iterator it; for (it = writeModulePtrs.begin(); it != writeModulePtrs.end(); ++it) { Optional msg = wait((*it)->commit(ryw)); if (msg.present()) { @@ -581,7 +583,7 @@ Future SpecialKeySpace::commit(ReadYourWritesTransaction* ryw) { SKSCTestImpl::SKSCTestImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> SKSCTestImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { +Future SKSCTestImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { ASSERT(range.contains(kr)); auto resultFuture = ryw->getRange(kr, CLIENT_KNOBS->TOO_MANY); // all keys are written to RYW, since GRV is set, the read should happen locally @@ -599,27 +601,25 @@ Future> SKSCTestImpl::commit(ReadYourWritesTransaction* ry ReadConflictRangeImpl::ReadConflictRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {} -ACTOR static Future> getReadConflictRangeImpl(ReadYourWritesTransaction* ryw, KeyRange kr) { +ACTOR static Future getReadConflictRangeImpl(ReadYourWritesTransaction* ryw, KeyRange kr) { wait(ryw->pendingReads()); return ryw->getReadConflictRangeIntersecting(kr); } -Future> ReadConflictRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future ReadConflictRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return getReadConflictRangeImpl(ryw, kr); } WriteConflictRangeImpl::WriteConflictRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {} -Future> WriteConflictRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future WriteConflictRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return ryw->getWriteConflictRangeIntersecting(kr); } ConflictingKeysImpl::ConflictingKeysImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {} -Future> ConflictingKeysImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { - Standalone result; +Future ConflictingKeysImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { + RangeResult result; if (ryw->getTransactionInfo().conflictingKeys) { auto krMapPtr = ryw->getTransactionInfo().conflictingKeys.get(); auto beginIter = krMapPtr->rangeContaining(kr.begin); @@ -635,13 +635,13 @@ Future> ConflictingKeysImpl::getRange(ReadYourWritesT return result; } -ACTOR Future> ddMetricsGetRangeActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { +ACTOR Future ddMetricsGetRangeActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { loop { try { auto keys = kr.removePrefix(ddStatsRange.begin); Standalone> resultWithoutPrefix = wait( waitDataDistributionMetricsList(ryw->getDatabase(), keys, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT)); - Standalone result; + RangeResult result; for (const auto& ddMetricsRef : resultWithoutPrefix) { // each begin key is the previous end key, thus we only encode the begin key in the result KeyRef beginKey = ddMetricsRef.beginKey.withPrefix(ddStatsRange.begin, result.arena()); @@ -669,7 +669,7 @@ ACTOR Future> ddMetricsGetRangeActor(ReadYourWritesTr DDStatsRangeImpl::DDStatsRangeImpl(KeyRangeRef kr) : SpecialKeyRangeAsyncImpl(kr) {} -Future> DDStatsRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { +Future DDStatsRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return ddMetricsGetRangeActor(ryw, kr); } @@ -682,9 +682,8 @@ Key SpecialKeySpace::getManagementApiCommandOptionSpecialKey(const std::string& ManagementCommandsOptionsImpl::ManagementCommandsOptionsImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> ManagementCommandsOptionsImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { - Standalone result; +Future ManagementCommandsOptionsImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { + RangeResult result; // Since we only have limit number of options, a brute force loop here is enough for (const auto& option : SpecialKeySpace::getManagementApiOptionsSet()) { auto key = getKeyRange().begin.withSuffix(option); @@ -726,14 +725,12 @@ Future> ManagementCommandsOptionsImpl::commit(ReadYourWrit return Optional(); } -Standalone rywGetRange(ReadYourWritesTransaction* ryw, - const KeyRangeRef& kr, - const Standalone& res) { +RangeResult rywGetRange(ReadYourWritesTransaction* ryw, const KeyRangeRef& kr, const RangeResult& res) { // "res" is the read result regardless of your writes, if ryw disabled, return immediately if (ryw->readYourWritesDisabled()) return res; // If ryw enabled, we update it with writes from the transaction - Standalone result; + RangeResult result; RangeMap>, KeyRangeRef>::Ranges ranges = ryw->getSpecialKeySpaceWriteMap().containedRanges(kr); RangeMap>, KeyRangeRef>::iterator iter = ranges.begin(); @@ -778,13 +775,13 @@ Standalone rywGetRange(ReadYourWritesTransaction* ryw, } // read from those readwrite modules in which special keys have one-to-one mapping with real persisted keys -ACTOR Future> rwModuleWithMappingGetRangeActor(ReadYourWritesTransaction* ryw, - const SpecialKeyRangeRWImpl* impl, - KeyRangeRef kr) { - Standalone resultWithoutPrefix = +ACTOR Future rwModuleWithMappingGetRangeActor(ReadYourWritesTransaction* ryw, + const SpecialKeyRangeRWImpl* impl, + KeyRangeRef kr) { + RangeResult resultWithoutPrefix = wait(ryw->getTransaction().getRange(ryw->getDatabase()->specialKeySpace->decode(kr), CLIENT_KNOBS->TOO_MANY)); ASSERT(!resultWithoutPrefix.more && resultWithoutPrefix.size() < CLIENT_KNOBS->TOO_MANY); - Standalone result; + RangeResult result; for (const KeyValueRef& kv : resultWithoutPrefix) result.push_back_deep(result.arena(), KeyValueRef(impl->encode(kv.key), kv.value)); return rywGetRange(ryw, kr, result); @@ -792,8 +789,7 @@ ACTOR Future> rwModuleWithMappingGetRangeActor(ReadYo ExcludeServersRangeImpl::ExcludeServersRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> ExcludeServersRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future ExcludeServersRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return rwModuleWithMappingGetRangeActor(ryw, this, kr); } @@ -1032,8 +1028,7 @@ Future> ExcludeServersRangeImpl::commit(ReadYourWritesTran FailedServersRangeImpl::FailedServersRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> FailedServersRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future FailedServersRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return rwModuleWithMappingGetRangeActor(ryw, this, kr); } @@ -1056,10 +1051,8 @@ Future> FailedServersRangeImpl::commit(ReadYourWritesTrans return excludeCommitActor(ryw, true); } -ACTOR Future> ExclusionInProgressActor(ReadYourWritesTransaction* ryw, - KeyRef prefix, - KeyRangeRef kr) { - state Standalone result; +ACTOR Future ExclusionInProgressActor(ReadYourWritesTransaction* ryw, KeyRef prefix, KeyRangeRef kr) { + state RangeResult result; state Transaction& tr = ryw->getTransaction(); tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS); tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); // necessary? @@ -1070,7 +1063,7 @@ ACTOR Future> ExclusionInProgressActor(ReadYourWrites state std::set inProgressExclusion; // Just getting a consistent read version proves that a set of tlogs satisfying the exclusions has completed // recovery Check that there aren't any storage servers with addresses violating the exclusions - state Standalone serverList = wait(tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY)); + state RangeResult serverList = wait(tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY)); ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY); for (auto& s : serverList) { @@ -1115,21 +1108,18 @@ ACTOR Future> ExclusionInProgressActor(ReadYourWrites ExclusionInProgressRangeImpl::ExclusionInProgressRangeImpl(KeyRangeRef kr) : SpecialKeyRangeAsyncImpl(kr) {} -Future> ExclusionInProgressRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future ExclusionInProgressRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return ExclusionInProgressActor(ryw, getKeyRange().begin, kr); } -ACTOR Future> getProcessClassActor(ReadYourWritesTransaction* ryw, - KeyRef prefix, - KeyRangeRef kr) { +ACTOR Future getProcessClassActor(ReadYourWritesTransaction* ryw, KeyRef prefix, KeyRangeRef kr) { vector _workers = wait(getWorkers(&ryw->getTransaction())); auto workers = _workers; // strip const // Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5 std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) { return formatIpPort(lhs.address.ip, lhs.address.port) < formatIpPort(rhs.address.ip, rhs.address.port); }); - Standalone result; + RangeResult result; for (auto& w : workers) { // exclude :tls in keys even the network addresss is TLS KeyRef k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port), result.arena())); @@ -1183,8 +1173,7 @@ ACTOR Future> processClassCommitActor(ReadYourWritesTransa ProcessClassRangeImpl::ProcessClassRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> ProcessClassRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future ProcessClassRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return getProcessClassActor(ryw, getKeyRange().begin, kr); } @@ -1237,16 +1226,14 @@ void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& ryw, "setclass", "Clear range operation is meaningless thus forbidden for setclass"); } -ACTOR Future> getProcessClassSourceActor(ReadYourWritesTransaction* ryw, - KeyRef prefix, - KeyRangeRef kr) { +ACTOR Future getProcessClassSourceActor(ReadYourWritesTransaction* ryw, KeyRef prefix, KeyRangeRef kr) { vector _workers = wait(getWorkers(&ryw->getTransaction())); auto workers = _workers; // strip const // Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5 std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) { return formatIpPort(lhs.address.ip, lhs.address.port) < formatIpPort(rhs.address.ip, rhs.address.port); }); - Standalone result; + RangeResult result; for (auto& w : workers) { // exclude :tls in keys even the network addresss is TLS Key k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port))); @@ -1262,15 +1249,14 @@ ACTOR Future> getProcessClassSourceActor(ReadYourWrit ProcessClassSourceRangeImpl::ProcessClassSourceRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {} -Future> ProcessClassSourceRangeImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future ProcessClassSourceRangeImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return getProcessClassSourceActor(ryw, getKeyRange().begin, kr); } -ACTOR Future> getLockedKeyActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { +ACTOR Future getLockedKeyActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE); Optional val = wait(ryw->getTransaction().get(databaseLockedKey)); - Standalone result; + RangeResult result; if (val.present()) { result.push_back_deep(result.arena(), KeyValueRef(kr.begin, val.get())); } @@ -1279,13 +1265,13 @@ ACTOR Future> getLockedKeyActor(ReadYourWritesTransac LockDatabaseImpl::LockDatabaseImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> LockDatabaseImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { +Future LockDatabaseImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { // single key range, the queried range should always be the same as the underlying range ASSERT(kr == getKeyRange()); auto lockEntry = ryw->getSpecialKeySpaceWriteMap()[SpecialKeySpace::getManagementApiCommandPrefix("lock")]; if (!ryw->readYourWritesDisabled() && lockEntry.first) { // ryw enabled and we have written to the special key - Standalone result; + RangeResult result; if (lockEntry.second.present()) { result.push_back_deep(result.arena(), KeyValueRef(kr.begin, lockEntry.second.get())); } @@ -1336,12 +1322,12 @@ Future> LockDatabaseImpl::commit(ReadYourWritesTransaction } } -ACTOR Future> getConsistencyCheckKeyActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { +ACTOR Future getConsistencyCheckKeyActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE); ryw->getTransaction().setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); Optional val = wait(ryw->getTransaction().get(fdbShouldConsistencyCheckBeSuspended)); bool ccSuspendSetting = val.present() ? BinaryReader::fromStringRef(val.get(), Unversioned()) : false; - Standalone result; + RangeResult result; if (ccSuspendSetting) { result.push_back_deep(result.arena(), KeyValueRef(kr.begin, ValueRef())); } @@ -1350,14 +1336,13 @@ ACTOR Future> getConsistencyCheckKeyActor(ReadYourWri ConsistencyCheckImpl::ConsistencyCheckImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> ConsistencyCheckImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future ConsistencyCheckImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { // single key range, the queried range should always be the same as the underlying range ASSERT(kr == getKeyRange()); auto entry = ryw->getSpecialKeySpaceWriteMap()[SpecialKeySpace::getManagementApiCommandPrefix("consistencycheck")]; if (!ryw->readYourWritesDisabled() && entry.first) { // ryw enabled and we have written to the special key - Standalone result; + RangeResult result; if (entry.second.present()) { result.push_back_deep(result.arena(), KeyValueRef(kr.begin, entry.second.get())); } @@ -1383,8 +1368,8 @@ GlobalConfigImpl::GlobalConfigImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) { // framework within the range specified. The special-key-space getrange // function should only be used for informational purposes. All values are // returned as strings regardless of their true type. -Future> GlobalConfigImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { - Standalone result; +Future GlobalConfigImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { + RangeResult result; auto& globalConfig = GlobalConfig::globalConfig(); KeyRangeRef modified = @@ -1428,7 +1413,7 @@ ACTOR Future> globalConfigCommitActor(GlobalConfigImpl* gl // History should only contain three most recent updates. If it currently // has three items, remove the oldest to make room for a new item. - Standalone history = wait(tr.getRange(globalConfigHistoryKeys, CLIENT_KNOBS->TOO_MANY)); + RangeResult history = wait(tr.getRange(globalConfigHistoryKeys, CLIENT_KNOBS->TOO_MANY)); constexpr int kGlobalConfigMaxHistorySize = 3; if (history.size() > kGlobalConfigMaxHistorySize - 1) { for (int i = 0; i < history.size() - (kGlobalConfigMaxHistorySize - 1); ++i) { @@ -1500,8 +1485,8 @@ void GlobalConfigImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) TracingOptionsImpl::TracingOptionsImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> TracingOptionsImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { - Standalone result; +Future TracingOptionsImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { + RangeResult result; for (const auto& option : SpecialKeySpace::getTracingOptions()) { auto key = getKeyRange().begin.withSuffix(option); if (!kr.contains(key)) { @@ -1559,8 +1544,8 @@ void TracingOptionsImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key CoordinatorsImpl::CoordinatorsImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> CoordinatorsImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { - Standalone result; +Future CoordinatorsImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { + RangeResult result; KeyRef prefix(getKeyRange().begin); // the constructor of ClusterConnectionFile already checks whether the file is valid auto cs = ClusterConnectionFile(ryw->getDatabase()->getConnectionFile()->getFilename()).getConnectionString(); @@ -1705,9 +1690,8 @@ void CoordinatorsImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) CoordinatorsAutoImpl::CoordinatorsAutoImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {} -ACTOR static Future> CoordinatorsAutoImplActor(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) { - state Standalone res; +ACTOR static Future CoordinatorsAutoImplActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { + state RangeResult res; state std::string autoCoordinatorsKey; state Transaction& tr = ryw->getTransaction(); @@ -1743,18 +1727,16 @@ ACTOR static Future> CoordinatorsAutoImplActor(ReadYo return res; } -Future> CoordinatorsAutoImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future CoordinatorsAutoImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { // single key range, the queried range should always be the same as the underlying range ASSERT(kr == getKeyRange()); return CoordinatorsAutoImplActor(ryw, kr); } -ACTOR static Future> getMinCommitVersionActor(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) { +ACTOR static Future getMinCommitVersionActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) { ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE); Optional val = wait(ryw->getTransaction().get(minRequiredCommitVersionKey)); - Standalone result; + RangeResult result; if (val.present()) { Version minRequiredCommitVersion = BinaryReader::fromStringRef(val.get(), Unversioned()); ValueRef version(result.arena(), boost::lexical_cast(minRequiredCommitVersion)); @@ -1765,13 +1747,13 @@ ACTOR static Future> getMinCommitVersionActor(ReadYou AdvanceVersionImpl::AdvanceVersionImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -Future> AdvanceVersionImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { +Future AdvanceVersionImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { // single key range, the queried range should always be the same as the underlying range ASSERT(kr == getKeyRange()); auto entry = ryw->getSpecialKeySpaceWriteMap()[SpecialKeySpace::getManagementApiCommandPrefix("advanceversion")]; if (!ryw->readYourWritesDisabled() && entry.first) { // ryw enabled and we have written to the special key - Standalone result; + RangeResult result; if (entry.second.present()) { result.push_back_deep(result.arena(), KeyValueRef(kr.begin, entry.second.get())); } @@ -1820,10 +1802,10 @@ Future> AdvanceVersionImpl::commit(ReadYourWritesTransacti ClientProfilingImpl::ClientProfilingImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} -ACTOR static Future> ClientProfilingGetRangeActor(ReadYourWritesTransaction* ryw, - KeyRef prefix, - KeyRangeRef kr) { - state Standalone result; +ACTOR static Future ClientProfilingGetRangeActor(ReadYourWritesTransaction* ryw, + KeyRef prefix, + KeyRangeRef kr) { + state RangeResult result; // client_txn_sample_rate state Key sampleRateKey = LiteralStringRef("client_txn_sample_rate").withPrefix(prefix); if (kr.contains(sampleRateKey)) { @@ -1866,7 +1848,7 @@ ACTOR static Future> ClientProfilingGetRangeActor(Rea } // TODO : add limitation on set operation -Future> ClientProfilingImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { +Future ClientProfilingImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return ClientProfilingGetRangeActor(ryw, getKeyRange().begin, kr); } @@ -1930,10 +1912,10 @@ MaintenanceImpl::MaintenanceImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} // we will calculate the remaining time(truncated to integer, the same as fdbcli) and return back as the value // If the zoneId is the special one `ignoreSSFailuresZoneString`, // value will be 0 (same as fdbcli) -ACTOR static Future> MaintenanceGetRangeActor(ReadYourWritesTransaction* ryw, - KeyRef prefix, - KeyRangeRef kr) { - state Standalone result; +ACTOR static Future MaintenanceGetRangeActor(ReadYourWritesTransaction* ryw, + KeyRef prefix, + KeyRangeRef kr) { + state RangeResult result; // zoneId ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE); Optional val = wait(ryw->getTransaction().get(healthyZoneKey)); @@ -1955,7 +1937,7 @@ ACTOR static Future> MaintenanceGetRangeActor(ReadYou return rywGetRange(ryw, kr, result); } -Future> MaintenanceImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { +Future MaintenanceImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return MaintenanceGetRangeActor(ryw, getKeyRange().begin, kr); } @@ -2024,10 +2006,10 @@ Future> MaintenanceImpl::commit(ReadYourWritesTransaction* DataDistributionImpl::DataDistributionImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {} // Read the system keys dataDistributionModeKey and rebalanceDDIgnoreKey -ACTOR static Future> DataDistributionGetRangeActor(ReadYourWritesTransaction* ryw, - KeyRef prefix, - KeyRangeRef kr) { - state Standalone result; +ACTOR static Future DataDistributionGetRangeActor(ReadYourWritesTransaction* ryw, + KeyRef prefix, + KeyRangeRef kr) { + state RangeResult result; // dataDistributionModeKey state Key modeKey = LiteralStringRef("mode").withPrefix(prefix); if (kr.contains(modeKey)) { @@ -2055,8 +2037,7 @@ ACTOR static Future> DataDistributionGetRangeActor(Re return rywGetRange(ryw, kr, result); } -Future> DataDistributionImpl::getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr) const { +Future DataDistributionImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const { return DataDistributionGetRangeActor(ryw, getKeyRange().begin, kr); } diff --git a/fdbclient/SpecialKeySpace.actor.h b/fdbclient/SpecialKeySpace.actor.h index 23eb715022..084135bfb6 100644 --- a/fdbclient/SpecialKeySpace.actor.h +++ b/fdbclient/SpecialKeySpace.actor.h @@ -36,7 +36,7 @@ class SpecialKeyRangeReadImpl { public: // Each derived class only needs to implement this simple version of getRange - virtual Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const = 0; + virtual Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const = 0; explicit SpecialKeyRangeReadImpl(KeyRangeRef kr) : range(kr) {} KeyRangeRef getKeyRange() const { return range; } @@ -100,28 +100,26 @@ class SpecialKeyRangeAsyncImpl : public SpecialKeyRangeReadImpl { public: explicit SpecialKeyRangeAsyncImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {} - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override = 0; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override = 0; // calling with a cache object to have consistent results if we need to call rpc - Future> getRange(ReadYourWritesTransaction* ryw, - KeyRangeRef kr, - Optional>* cache) const { + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr, Optional* cache) const { return getRangeAsyncActor(this, ryw, kr, cache); } bool isAsync() const override { return true; } - ACTOR static Future> getRangeAsyncActor(const SpecialKeyRangeReadImpl* skrAyncImpl, - ReadYourWritesTransaction* ryw, - KeyRangeRef kr, - Optional>* cache) { + ACTOR static Future getRangeAsyncActor(const SpecialKeyRangeReadImpl* skrAyncImpl, + ReadYourWritesTransaction* ryw, + KeyRangeRef kr, + Optional* cache) { ASSERT(skrAyncImpl->getKeyRange().contains(kr)); ASSERT(cache != nullptr); if (!cache->present()) { // For simplicity, every time we need to cache, we read the whole range // Although sometimes the range can be narrowed, // there is not a general way to do it in complicated scenarios - Standalone result_ = wait(skrAyncImpl->getRange(ryw, skrAyncImpl->getKeyRange())); + RangeResult result_ = wait(skrAyncImpl->getRange(ryw, skrAyncImpl->getKeyRange())); *cache = result_; } const auto& allResults = cache->get(); @@ -131,11 +129,11 @@ public: while (end > 0 && allResults[end - 1].key >= kr.end) --end; if (start < end) { - Standalone result = RangeResultRef(allResults.slice(start, end), false); + RangeResult result = RangeResultRef(allResults.slice(start, end), false); result.arena().dependsOn(allResults.arena()); return result; } else - return Standalone(); + return RangeResult(); } }; @@ -166,11 +164,11 @@ public: Future> get(ReadYourWritesTransaction* ryw, const Key& key); - Future> getRange(ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool reverse = false); + Future getRange(ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool reverse = false); void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value); @@ -206,18 +204,18 @@ public: private: ACTOR static Future> getActor(SpecialKeySpace* sks, ReadYourWritesTransaction* ryw, KeyRef key); - ACTOR static Future> checkRYWValid(SpecialKeySpace* sks, - ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool reverse); - ACTOR static Future> getRangeAggregationActor(SpecialKeySpace* sks, - ReadYourWritesTransaction* ryw, - KeySelector begin, - KeySelector end, - GetRangeLimits limits, - bool reverse); + ACTOR static Future checkRYWValid(SpecialKeySpace* sks, + ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool reverse); + ACTOR static Future getRangeAggregationActor(SpecialKeySpace* sks, + ReadYourWritesTransaction* ryw, + KeySelector begin, + KeySelector end, + GetRangeLimits limits, + bool reverse); KeyRangeMap readImpls; KeyRangeMap modules; @@ -238,7 +236,7 @@ private: class SKSCTestImpl : public SpecialKeyRangeRWImpl { public: explicit SKSCTestImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; }; @@ -251,31 +249,31 @@ public: class ConflictingKeysImpl : public SpecialKeyRangeReadImpl { public: explicit ConflictingKeysImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; class ReadConflictRangeImpl : public SpecialKeyRangeReadImpl { public: explicit ReadConflictRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; class WriteConflictRangeImpl : public SpecialKeyRangeReadImpl { public: explicit WriteConflictRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; class DDStatsRangeImpl : public SpecialKeyRangeAsyncImpl { public: explicit DDStatsRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; class ManagementCommandsOptionsImpl : public SpecialKeyRangeRWImpl { public: explicit ManagementCommandsOptionsImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override; void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override; void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override; @@ -285,7 +283,7 @@ public: class ExcludeServersRangeImpl : public SpecialKeyRangeRWImpl { public: explicit ExcludeServersRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override; Key decode(const KeyRef& key) const override; Key encode(const KeyRef& key) const override; @@ -295,7 +293,7 @@ public: class FailedServersRangeImpl : public SpecialKeyRangeRWImpl { public: explicit FailedServersRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override; Key decode(const KeyRef& key) const override; Key encode(const KeyRef& key) const override; @@ -305,13 +303,13 @@ public: class ExclusionInProgressRangeImpl : public SpecialKeyRangeAsyncImpl { public: explicit ExclusionInProgressRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; class ProcessClassRangeImpl : public SpecialKeyRangeRWImpl { public: explicit ProcessClassRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override; void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override; @@ -320,27 +318,27 @@ public: class ProcessClassSourceRangeImpl : public SpecialKeyRangeReadImpl { public: explicit ProcessClassSourceRangeImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; class LockDatabaseImpl : public SpecialKeyRangeRWImpl { public: explicit LockDatabaseImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; }; class ConsistencyCheckImpl : public SpecialKeyRangeRWImpl { public: explicit ConsistencyCheckImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; }; class GlobalConfigImpl : public SpecialKeyRangeRWImpl { public: explicit GlobalConfigImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override; Future> commit(ReadYourWritesTransaction* ryw) override; void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override; @@ -350,7 +348,7 @@ public: class TracingOptionsImpl : public SpecialKeyRangeRWImpl { public: explicit TracingOptionsImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override; Future> commit(ReadYourWritesTransaction* ryw) override; void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override; @@ -360,7 +358,7 @@ public: class CoordinatorsImpl : public SpecialKeyRangeRWImpl { public: explicit CoordinatorsImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override; void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override; @@ -369,20 +367,20 @@ public: class CoordinatorsAutoImpl : public SpecialKeyRangeReadImpl { public: explicit CoordinatorsAutoImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; }; class AdvanceVersionImpl : public SpecialKeyRangeRWImpl { public: explicit AdvanceVersionImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; }; class ClientProfilingImpl : public SpecialKeyRangeRWImpl { public: explicit ClientProfilingImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override; void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override; @@ -391,13 +389,13 @@ public: class MaintenanceImpl : public SpecialKeyRangeRWImpl { public: explicit MaintenanceImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; }; class DataDistributionImpl : public SpecialKeyRangeRWImpl { public: explicit DataDistributionImpl(KeyRangeRef kr); - Future> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; + Future getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override; Future> commit(ReadYourWritesTransaction* ryw) override; }; diff --git a/fdbclient/SystemData.cpp b/fdbclient/SystemData.cpp index 734728d331..7dec6edaa7 100644 --- a/fdbclient/SystemData.cpp +++ b/fdbclient/SystemData.cpp @@ -48,9 +48,7 @@ const Key keyServersKey(const KeyRef& k) { const KeyRef keyServersKey(const KeyRef& k, Arena& arena) { return k.withPrefix(keyServersPrefix, arena); } -const Value keyServersValue(Standalone result, - const std::vector& src, - const std::vector& dest) { +const Value keyServersValue(RangeResult result, const std::vector& src, const std::vector& dest) { if (!CLIENT_KNOBS->TAG_ENCODE_KEY_SERVERS) { BinaryWriter wr(IncludeVersion(ProtocolVersion::withKeyServerValue())); wr << src << dest; @@ -95,7 +93,7 @@ const Value keyServersValue(const std::vector& srcTag, const std::vector result, +void decodeKeyServersValue(RangeResult result, const ValueRef& value, std::vector& src, std::vector& dest, diff --git a/fdbclient/SystemData.h b/fdbclient/SystemData.h index d948ea4da3..fc07e7cb78 100644 --- a/fdbclient/SystemData.h +++ b/fdbclient/SystemData.h @@ -52,12 +52,12 @@ extern const KeyRangeRef keyServersKeys, keyServersKeyServersKeys; extern const KeyRef keyServersPrefix, keyServersEnd, keyServersKeyServersKey; const Key keyServersKey(const KeyRef& k); const KeyRef keyServersKey(const KeyRef& k, Arena& arena); -const Value keyServersValue(Standalone result, +const Value keyServersValue(RangeResult result, const std::vector& src, const std::vector& dest = std::vector()); const Value keyServersValue(const std::vector& srcTag, const std::vector& destTag = std::vector()); // `result` must be the full result of getting serverTagKeys -void decodeKeyServersValue(Standalone result, +void decodeKeyServersValue(RangeResult result, const ValueRef& value, std::vector& src, std::vector& dest, diff --git a/fdbclient/TagThrottle.actor.cpp b/fdbclient/TagThrottle.actor.cpp index c278db116d..76adbb5431 100644 --- a/fdbclient/TagThrottle.actor.cpp +++ b/fdbclient/TagThrottle.actor.cpp @@ -179,7 +179,7 @@ ACTOR Future> getThrottledTags(Database db, int lim if (!containsRecommend) { wait(store(reportAuto, getValidAutoEnabled(&tr, db))); } - Standalone throttles = wait(tr.getRange( + RangeResult throttles = wait(tr.getRange( reportAuto ? tagThrottleKeys : KeyRangeRef(tagThrottleKeysPrefix, tagThrottleAutoKeysPrefix), limit)); std::vector results; for (auto throttle : throttles) { @@ -202,7 +202,7 @@ ACTOR Future> getRecommendedTags(Database db, int l return std::vector(); } - Standalone throttles = + RangeResult throttles = wait(tr.getRange(KeyRangeRef(tagThrottleAutoKeysPrefix, tagThrottleKeys.end), limit)); std::vector results; for (auto throttle : throttles) { @@ -339,7 +339,7 @@ ACTOR Future unthrottleMatchingThrottles(Database db, loop { try { - state Standalone tags = wait(tr.getRange(begin, end, 1000)); + state RangeResult tags = wait(tr.getRange(begin, end, 1000)); state uint64_t unthrottledTags = 0; uint64_t manualUnthrottledTags = 0; for (auto tag : tags) { diff --git a/fdbclient/TaskBucket.actor.cpp b/fdbclient/TaskBucket.actor.cpp index 6f0f63a7f0..4e17a1c9f7 100644 --- a/fdbclient/TaskBucket.actor.cpp +++ b/fdbclient/TaskBucket.actor.cpp @@ -243,8 +243,7 @@ public: state Reference task(new Task()); task->key = taskUID; - state Standalone values = - wait(tr->getRange(taskAvailableSpace.range(), CLIENT_KNOBS->TOO_MANY)); + state RangeResult values = wait(tr->getRange(taskAvailableSpace.range(), CLIENT_KNOBS->TOO_MANY)); Version version = wait(tr->getReadVersion()); task->timeoutVersion = version + (uint64_t)(taskBucket->timeout * @@ -602,19 +601,19 @@ public: taskBucket->setOptions(tr); // Check all available priorities for keys - state std::vector>> resultFutures; + state std::vector> resultFutures; for (int pri = 0; pri <= CLIENT_KNOBS->TASKBUCKET_MAX_PRIORITY; ++pri) resultFutures.push_back(tr->getRange(taskBucket->getAvailableSpace(pri).range(), 1)); // If any priority levels have any keys then the taskbucket is not empty so return false state int i; for (i = 0; i < resultFutures.size(); ++i) { - Standalone results = wait(resultFutures[i]); + RangeResult results = wait(resultFutures[i]); if (results.size() > 0) return false; } - Standalone values = wait(tr->getRange(taskBucket->timeouts.range(), 1)); + RangeResult values = wait(tr->getRange(taskBucket->timeouts.range(), 1)); if (values.size() > 0) return false; @@ -625,14 +624,14 @@ public: taskBucket->setOptions(tr); // Check all available priorities for emptiness - state std::vector>> resultFutures; + state std::vector> resultFutures; for (int pri = 0; pri <= CLIENT_KNOBS->TASKBUCKET_MAX_PRIORITY; ++pri) resultFutures.push_back(tr->getRange(taskBucket->getAvailableSpace(pri).range(), 1)); // If any priority levels have any keys then return true as the level is 'busy' state int i; for (i = 0; i < resultFutures.size(); ++i) { - Standalone results = wait(resultFutures[i]); + RangeResult results = wait(resultFutures[i]); if (results.size() > 0) return true; } @@ -650,7 +649,7 @@ public: t.append(task->timeoutVersion); t.append(task->key); - Standalone values = wait(tr->getRange(taskBucket->timeouts.range(t), 1)); + RangeResult values = wait(tr->getRange(taskBucket->timeouts.range(t), 1)); if (values.size() > 0) return false; @@ -742,7 +741,7 @@ public: state KeyRange range( KeyRangeRef(taskBucket->timeouts.get(0).range().begin, taskBucket->timeouts.get(end).range().end)); - Standalone values = wait(tr->getRange(range, CLIENT_KNOBS->TASKBUCKET_MAX_TASK_KEYS)); + RangeResult values = wait(tr->getRange(range, CLIENT_KNOBS->TASKBUCKET_MAX_TASK_KEYS)); // Keys will be tuples of (taskUID, param) -> paramValue // Unfortunately we need to know the priority parameter for a taskUID before we can know which available-tasks @@ -793,7 +792,7 @@ public: ACTOR static Future debugPrintRange(Reference tr, Subspace subspace, Key msg) { tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::LOCK_AWARE); - Standalone values = wait(tr->getRange(subspace.range(), CLIENT_KNOBS->TOO_MANY)); + RangeResult values = wait(tr->getRange(subspace.range(), CLIENT_KNOBS->TOO_MANY)); TraceEvent("TaskBucketDebugPrintRange") .detail("Key", subspace.key()) .detail("Count", values.size()) @@ -851,7 +850,7 @@ public: } else { TEST(true); // Extended a task without updating parameters // Otherwise, read and transplant the params from the old to new timeout spaces - Standalone params = wait(tr->getRange(oldTimeoutSpace.range(), CLIENT_KNOBS->TOO_MANY)); + RangeResult params = wait(tr->getRange(oldTimeoutSpace.range(), CLIENT_KNOBS->TOO_MANY)); for (auto& kv : params) { Tuple paramKey = oldTimeoutSpace.unpack(kv.key); tr->set(newTimeoutSpace.pack(paramKey), kv.value); @@ -1114,7 +1113,7 @@ public: ACTOR static Future isSet(Reference tr, Reference taskFuture) { taskFuture->futureBucket->setOptions(tr); - Standalone values = wait(tr->getRange(taskFuture->blocks.range(), 1)); + RangeResult values = wait(tr->getRange(taskFuture->blocks.range(), 1)); if (values.size() > 0) return false; @@ -1177,7 +1176,7 @@ public: Reference taskFuture) { taskFuture->futureBucket->setOptions(tr); - Standalone values = wait(tr->getRange(taskFuture->callbacks.range(), CLIENT_KNOBS->TOO_MANY)); + RangeResult values = wait(tr->getRange(taskFuture->callbacks.range(), CLIENT_KNOBS->TOO_MANY)); tr->clear(taskFuture->callbacks.range()); std::vector> actions; diff --git a/fdbclient/ThreadSafeTransaction.cpp b/fdbclient/ThreadSafeTransaction.cpp index b8f2bc6a0a..eb5d1e17e2 100644 --- a/fdbclient/ThreadSafeTransaction.cpp +++ b/fdbclient/ThreadSafeTransaction.cpp @@ -152,6 +152,12 @@ ThreadSafeTransaction::ThreadSafeTransaction(DatabaseContext* cx) { nullptr); } +// This constructor is only used while refactoring fdbcli and only called from the main thread +ThreadSafeTransaction::ThreadSafeTransaction(ReadYourWritesTransaction* ryw) : tr(ryw) { + if (tr) + tr->addref(); +} + ThreadSafeTransaction::~ThreadSafeTransaction() { ReadYourWritesTransaction* tr = this->tr; if (tr) @@ -217,31 +223,31 @@ ThreadFuture>> ThreadSafeTransaction::getRangeSplit }); } -ThreadFuture> ThreadSafeTransaction::getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - int limit, - bool snapshot, - bool reverse) { +ThreadFuture ThreadSafeTransaction::getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + int limit, + bool snapshot, + bool reverse) { KeySelector b = begin; KeySelector e = end; ReadYourWritesTransaction* tr = this->tr; - return onMainThread([tr, b, e, limit, snapshot, reverse]() -> Future> { + return onMainThread([tr, b, e, limit, snapshot, reverse]() -> Future { tr->checkDeferredError(); return tr->getRange(b, e, limit, snapshot, reverse); }); } -ThreadFuture> ThreadSafeTransaction::getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - GetRangeLimits limits, - bool snapshot, - bool reverse) { +ThreadFuture ThreadSafeTransaction::getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + GetRangeLimits limits, + bool snapshot, + bool reverse) { KeySelector b = begin; KeySelector e = end; ReadYourWritesTransaction* tr = this->tr; - return onMainThread([tr, b, e, limits, snapshot, reverse]() -> Future> { + return onMainThread([tr, b, e, limits, snapshot, reverse]() -> Future { tr->checkDeferredError(); return tr->getRange(b, e, limits, snapshot, reverse); }); diff --git a/fdbclient/ThreadSafeTransaction.h b/fdbclient/ThreadSafeTransaction.h index d8502f7613..8f00fd077c 100644 --- a/fdbclient/ThreadSafeTransaction.h +++ b/fdbclient/ThreadSafeTransaction.h @@ -73,32 +73,35 @@ public: explicit ThreadSafeTransaction(DatabaseContext* cx); ~ThreadSafeTransaction() override; + // Note: used while refactoring fdbcli, need to be removed later + explicit ThreadSafeTransaction(ReadYourWritesTransaction* ryw); + void cancel() override; void setVersion(Version v) override; ThreadFuture getReadVersion() override; ThreadFuture> get(const KeyRef& key, bool snapshot = false) override; ThreadFuture getKey(const KeySelectorRef& key, bool snapshot = false) override; - ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - int limit, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeySelectorRef& begin, - const KeySelectorRef& end, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) override; - ThreadFuture> getRange(const KeyRangeRef& keys, - int limit, - bool snapshot = false, - bool reverse = false) override { + ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + int limit, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeySelectorRef& begin, + const KeySelectorRef& end, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) override; + ThreadFuture getRange(const KeyRangeRef& keys, + int limit, + bool snapshot = false, + bool reverse = false) override { return getRange(firstGreaterOrEqual(keys.begin), firstGreaterOrEqual(keys.end), limit, snapshot, reverse); } - ThreadFuture> getRange(const KeyRangeRef& keys, - GetRangeLimits limits, - bool snapshot = false, - bool reverse = false) override { + ThreadFuture getRange(const KeyRangeRef& keys, + GetRangeLimits limits, + bool snapshot = false, + bool reverse = false) override { return getRange(firstGreaterOrEqual(keys.begin), firstGreaterOrEqual(keys.end), limits, snapshot, reverse); } ThreadFuture>> getAddressesForKey(const KeyRef& key) override; diff --git a/fdbclient/vexillographer/fdb.options b/fdbclient/vexillographer/fdb.options index c6a4a9749c..2d3a5b57ce 100644 --- a/fdbclient/vexillographer/fdb.options +++ b/fdbclient/vexillographer/fdb.options @@ -192,6 +192,9 @@ description is not currently required but encouraged. description="Enable tracing for all transactions. This is the default." />