Merge remote-tracking branch 'origin/master' into config-db
This commit is contained in:
commit
a775f92fca
|
@ -7,7 +7,7 @@ bindings/java/foundationdb-client*.jar
|
|||
bindings/java/foundationdb-tests*.jar
|
||||
bindings/java/fdb-java-*-sources.jar
|
||||
packaging/msi/FDBInstaller.msi
|
||||
|
||||
builds/
|
||||
# Generated source, build, and packaging files
|
||||
*.g.cpp
|
||||
*.g.h
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
# limitations under the License.
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
project(foundationdb
|
||||
VERSION 7.0.0
|
||||
VERSION 7.1.0
|
||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||
HOMEPAGE_URL "http://www.foundationdb.org/"
|
||||
LANGUAGES C CXX ASM)
|
||||
|
|
|
@ -36,7 +36,7 @@ Members of the Apple FoundationDB team are part of the core committers helping r
|
|||
|
||||
## Contributing
|
||||
### Opening a Pull Request
|
||||
We love pull requests! For minor changes, feel free to open up a PR directly. For larger feature development and any changes that may require community discussion, we ask that you discuss your ideas on the [community forums](https://forums.foundationdb.org) prior to opening a PR, and then reference that thread within your PR comment.
|
||||
We love pull requests! For minor changes, feel free to open up a PR directly. For larger feature development and any changes that may require community discussion, we ask that you discuss your ideas on the [community forums](https://forums.foundationdb.org) prior to opening a PR, and then reference that thread within your PR comment. Please refer to [FoundationDB Commit Process](https://github.com/apple/foundationdb/wiki/FoundationDB-Commit-Process) for more detailed guidelines.
|
||||
|
||||
CI will be run automatically for core committers, and for community PRs it will be initiated by the request of a core committer. Tests can also be run locally via `ctest`, and core committers can run additional validation on pull requests prior to merging them.
|
||||
|
||||
|
|
|
@ -157,11 +157,11 @@ The build under MacOS will work the same way as on Linux. To get boost and ninja
|
|||
cmake -G Ninja <PATH_TO_FOUNDATIONDB_SOURCE>
|
||||
```
|
||||
|
||||
To generate a installable package, you can use cpack:
|
||||
To generate a installable package,
|
||||
|
||||
```sh
|
||||
ninja
|
||||
cpack -G productbuild
|
||||
$SRCDIR/packaging/osx/buildpkg.sh . $SRCDIR
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
@ -171,7 +171,7 @@ that Visual Studio is used to compile.
|
|||
|
||||
1. Install Visual Studio 2017 (Community Edition is tested)
|
||||
1. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
|
||||
1. Download version 1.72 of [Boost](https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2)
|
||||
1. Download version 1.72 of [Boost](https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2)
|
||||
1. Unpack boost (you don't need to compile it)
|
||||
1. Install [Mono](http://www.mono-project.com/download/stable/)
|
||||
1. (Optional) Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
add_subdirectory(c)
|
||||
if(NOT OPEN_FOR_IDE)
|
||||
# flow bindings currently doesn't support that
|
||||
add_subdirectory(c)
|
||||
add_subdirectory(flow)
|
||||
endif()
|
||||
add_subdirectory(python)
|
||||
|
|
|
@ -26,7 +26,7 @@ sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings',
|
|||
|
||||
import util
|
||||
|
||||
FDB_API_VERSION = 700
|
||||
FDB_API_VERSION = 710
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
|
|
|
@ -157,7 +157,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
|
|||
api_version = min_version
|
||||
elif random.random() < 0.9:
|
||||
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
|
||||
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700] if v >= min_version and v <= max_version])
|
||||
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700, 710] if v >= min_version and v <= max_version])
|
||||
else:
|
||||
api_version = random.randint(min_version, max_version)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
import os
|
||||
|
||||
MAX_API_VERSION = 700
|
||||
MAX_API_VERSION = 710
|
||||
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
|
||||
ALL_TYPES = COMMON_TYPES + ['versionstamp']
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ fdb.api_version(FDB_API_VERSION)
|
|||
|
||||
|
||||
class ScriptedTest(Test):
|
||||
TEST_API_VERSION = 700
|
||||
TEST_API_VERSION = 710
|
||||
|
||||
def __init__(self, subspace):
|
||||
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)
|
||||
|
|
|
@ -19,10 +19,11 @@
|
|||
*/
|
||||
|
||||
#include <cstdint>
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#define FDB_INCLUDE_LEGACY_TYPES
|
||||
|
||||
#include "fdbclient/MultiVersionTransaction.h"
|
||||
#include "fdbclient/MultiVersionAssignmentVars.h"
|
||||
#include "foundationdb/fdb_c.h"
|
||||
|
||||
int g_api_version = 0;
|
||||
|
@ -364,6 +365,22 @@ extern "C" DLLEXPORT double fdb_database_get_main_thread_busyness(FDBDatabase* d
|
|||
return DB(d)->getMainThreadBusyness();
|
||||
}
|
||||
|
||||
// Returns the protocol version reported by the coordinator this client is connected to
|
||||
// If an expected version is non-zero, the future won't return until the protocol version is different than expected
|
||||
// Note: this will never return if the server is running a protocol from FDB 5.0 or older
|
||||
extern "C" DLLEXPORT FDBFuture* fdb_database_get_server_protocol(FDBDatabase* db, uint64_t expected_version) {
|
||||
Optional<ProtocolVersion> expected;
|
||||
if (expected_version > 0) {
|
||||
expected = ProtocolVersion(expected_version);
|
||||
}
|
||||
|
||||
return (
|
||||
FDBFuture*)(mapThreadFuture<ProtocolVersion,
|
||||
uint64_t>(DB(db)->getServerProtocol(expected), [](ErrorOr<ProtocolVersion> result) {
|
||||
return result.map<uint64_t>([](ProtocolVersion pv) { return pv.versionWithFlags(); });
|
||||
}).extractPtr());
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT void fdb_transaction_destroy(FDBTransaction* tr) {
|
||||
try {
|
||||
TXN(tr)->delref();
|
||||
|
@ -583,10 +600,6 @@ extern "C" DLLEXPORT FDBFuture* fdb_transaction_get_approximate_size(FDBTransact
|
|||
return (FDBFuture*)TXN(tr)->getApproximateSize().extractPtr();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT FDBFuture* fdb_get_server_protocol(const char* clusterFilePath) {
|
||||
return (FDBFuture*)(API->getServerProtocol(clusterFilePath ? clusterFilePath : "").extractPtr());
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT FDBFuture* fdb_transaction_get_versionstamp(FDBTransaction* tr) {
|
||||
return (FDBFuture*)(TXN(tr)->getVersionstamp().extractPtr());
|
||||
}
|
||||
|
|
|
@ -27,10 +27,10 @@
|
|||
#endif
|
||||
|
||||
#if !defined(FDB_API_VERSION)
|
||||
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 700)
|
||||
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 710)
|
||||
#elif FDB_API_VERSION < 13
|
||||
#error API version no longer supported (upgrade to 13)
|
||||
#elif FDB_API_VERSION > 700
|
||||
#elif FDB_API_VERSION > 710
|
||||
#error Requested API version requires a newer version of this header
|
||||
#endif
|
||||
|
||||
|
@ -97,7 +97,7 @@ typedef struct key {
|
|||
const uint8_t* key;
|
||||
int key_length;
|
||||
} FDBKey;
|
||||
#if FDB_API_VERSION >= 700
|
||||
#if FDB_API_VERSION >= 710
|
||||
typedef struct keyvalue {
|
||||
const uint8_t* key;
|
||||
int key_length;
|
||||
|
@ -189,6 +189,8 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_create_snapshot(FDBDatabase
|
|||
|
||||
DLLEXPORT WARN_UNUSED_RESULT double fdb_database_get_main_thread_busyness(FDBDatabase* db);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_get_server_protocol(FDBDatabase* db, uint64_t expected_version);
|
||||
|
||||
DLLEXPORT void fdb_transaction_destroy(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT void fdb_transaction_cancel(FDBTransaction* tr);
|
||||
|
@ -281,8 +283,6 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_transaction_get_committed_version(F
|
|||
*/
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_approximate_size(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_get_server_protocol(const char* clusterFilePath);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_versionstamp(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_on_error(FDBTransaction* tr, fdb_error_t error);
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
fdb_select_api_version(700);
|
||||
fdb_select_api_version(710);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1172,6 +1172,14 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
|
|||
#endif
|
||||
}
|
||||
|
||||
/* Set client Log group */
|
||||
if (strlen(args->log_group) != 0) {
|
||||
err = fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP, (uint8_t*)args->log_group, strlen(args->log_group));
|
||||
if (err) {
|
||||
fprintf(stderr, "ERROR: fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP): %s\n", fdb_get_error(err));
|
||||
}
|
||||
}
|
||||
|
||||
/* enable tracing if specified */
|
||||
if (args->trace) {
|
||||
fprintf(debugme,
|
||||
|
@ -1345,6 +1353,7 @@ int init_args(mako_args_t* args) {
|
|||
args->verbose = 1;
|
||||
args->flatbuffers = 0; /* internal */
|
||||
args->knobs[0] = '\0';
|
||||
args->log_group[0] = '\0';
|
||||
args->trace = 0;
|
||||
args->tracepath[0] = '\0';
|
||||
args->traceformat = 0; /* default to client's default (XML) */
|
||||
|
@ -1505,6 +1514,7 @@ void usage() {
|
|||
printf("%-24s %s\n", "-m, --mode=MODE", "Specify the mode (build, run, clean)");
|
||||
printf("%-24s %s\n", "-z, --zipf", "Use zipfian distribution instead of uniform distribution");
|
||||
printf("%-24s %s\n", " --commitget", "Commit GETs");
|
||||
printf("%-24s %s\n", " --loggroup=LOGGROUP", "Set client log group");
|
||||
printf("%-24s %s\n", " --trace", "Enable tracing");
|
||||
printf("%-24s %s\n", " --tracepath=PATH", "Set trace file path");
|
||||
printf("%-24s %s\n", " --trace_format <xml|json>", "Set trace format (Default: json)");
|
||||
|
@ -1546,6 +1556,7 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
|
|||
{ "verbose", required_argument, NULL, 'v' },
|
||||
{ "mode", required_argument, NULL, 'm' },
|
||||
{ "knobs", required_argument, NULL, ARG_KNOBS },
|
||||
{ "loggroup", required_argument, NULL, ARG_LOGGROUP },
|
||||
{ "tracepath", required_argument, NULL, ARG_TRACEPATH },
|
||||
{ "trace_format", required_argument, NULL, ARG_TRACEFORMAT },
|
||||
{ "streaming", required_argument, NULL, ARG_STREAMING_MODE },
|
||||
|
@ -1656,6 +1667,9 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
|
|||
case ARG_KNOBS:
|
||||
memcpy(args->knobs, optarg, strlen(optarg) + 1);
|
||||
break;
|
||||
case ARG_LOGGROUP:
|
||||
memcpy(args->log_group, optarg, strlen(optarg) + 1);
|
||||
break;
|
||||
case ARG_TRACE:
|
||||
args->trace = 1;
|
||||
break;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#pragma once
|
||||
|
||||
#ifndef FDB_API_VERSION
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#endif
|
||||
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
@ -68,6 +68,7 @@ enum Arguments {
|
|||
ARG_VERSION,
|
||||
ARG_KNOBS,
|
||||
ARG_FLATBUFFERS,
|
||||
ARG_LOGGROUP,
|
||||
ARG_TRACE,
|
||||
ARG_TRACEPATH,
|
||||
ARG_TRACEFORMAT,
|
||||
|
@ -97,6 +98,7 @@ typedef struct {
|
|||
int ops[MAX_OP][3];
|
||||
} mako_txnspec_t;
|
||||
|
||||
#define LOGGROUP_MAX 256
|
||||
#define KNOB_MAX 256
|
||||
#define TAGPREFIXLENGTH_MAX 8
|
||||
|
||||
|
@ -122,6 +124,7 @@ typedef struct {
|
|||
int verbose;
|
||||
mako_txnspec_t txnspec;
|
||||
char cluster_file[PATH_MAX];
|
||||
char log_group[LOGGROUP_MAX];
|
||||
int trace;
|
||||
char tracepath[PATH_MAX];
|
||||
int traceformat; /* 0 - XML, 1 - JSON */
|
||||
|
|
|
@ -641,7 +641,7 @@ void runTests(struct ResultSet* rs) {
|
|||
int main(int argc, char** argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet* rs = newResultSet();
|
||||
checkError(fdb_select_api_version(700), "select API version", rs);
|
||||
checkError(fdb_select_api_version(710), "select API version", rs);
|
||||
printf("Running performance test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
valueStr = (uint8_t*)malloc((sizeof(uint8_t)) * valueSize);
|
||||
|
|
|
@ -285,7 +285,7 @@ void runTests(struct ResultSet* rs) {
|
|||
int main(int argc, char** argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet* rs = newResultSet();
|
||||
checkError(fdb_select_api_version(700), "select API version", rs);
|
||||
checkError(fdb_select_api_version(710), "select API version", rs);
|
||||
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
keys = generateKeys(numKeys, keySize);
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <inttypes.h>
|
||||
|
||||
#ifndef FDB_API_VERSION
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#endif
|
||||
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
|
|
@ -97,7 +97,7 @@ void runTests(struct ResultSet* rs) {
|
|||
int main(int argc, char** argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet* rs = newResultSet();
|
||||
checkError(fdb_select_api_version(700), "select API version", rs);
|
||||
checkError(fdb_select_api_version(710), "select API version", rs);
|
||||
printf("Running performance test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
keys = generateKeys(numKeys, KEY_SIZE);
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
#include <string>
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
// Unit tests for API setup, network initialization functions from the FDB C API.
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
@ -42,13 +42,13 @@ TEST_CASE("setup") {
|
|||
CHECK(err);
|
||||
|
||||
// Select current API version
|
||||
fdb_check(fdb_select_api_version(700));
|
||||
fdb_check(fdb_select_api_version(710));
|
||||
|
||||
// Error to call again after a successful return
|
||||
err = fdb_select_api_version(700);
|
||||
err = fdb_select_api_version(710);
|
||||
CHECK(err);
|
||||
|
||||
CHECK(fdb_get_max_api_version() >= 700);
|
||||
CHECK(fdb_get_max_api_version() >= 710);
|
||||
|
||||
fdb_check(fdb_setup_network());
|
||||
// Calling a second time should fail
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
// Unit tests for the FoundationDB C API.
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
@ -263,13 +263,15 @@ TEST_CASE("fdb_future_set_callback") {
|
|||
&context));
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
|
||||
context.event.wait(); // Wait until callback is called
|
||||
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
context.event.wait();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -515,10 +517,10 @@ TEST_CASE("write system key") {
|
|||
fdb::Transaction tr(db);
|
||||
|
||||
std::string syskey("\xff\x02");
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_ACCESS_SYSTEM_KEYS, nullptr, 0));
|
||||
tr.set(syskey, "bar");
|
||||
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_ACCESS_SYSTEM_KEYS, nullptr, 0));
|
||||
tr.set(syskey, "bar");
|
||||
fdb::EmptyFuture f1 = tr.commit();
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
|
@ -949,16 +951,25 @@ TEST_CASE("fdb_transaction_clear") {
|
|||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_ADD") {
|
||||
insert_data(db, create_data({ { "foo", "a" } }));
|
||||
insert_data(db, create_data({ { "foo", "\x00" } }));
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
int8_t param = 1;
|
||||
int potentialCommitCount = 0;
|
||||
while (1) {
|
||||
tr.atomic_op(key("foo"), (const uint8_t*)¶m, sizeof(param), FDB_MUTATION_TYPE_ADD);
|
||||
if (potentialCommitCount + 1 == 256) {
|
||||
// Trying to commit again might overflow the one unsigned byte we're looking at
|
||||
break;
|
||||
}
|
||||
++potentialCommitCount;
|
||||
fdb::EmptyFuture f1 = tr.commit();
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
if (fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE_NOT_COMMITTED, err)) {
|
||||
--potentialCommitCount;
|
||||
}
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
|
@ -969,7 +980,8 @@ TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_ADD") {
|
|||
auto value = get_value(key("foo"), /* snapshot */ false, {});
|
||||
REQUIRE(value.has_value());
|
||||
CHECK(value->size() == 1);
|
||||
CHECK(value->data()[0] == 'b'); // incrementing 'a' results in 'b'
|
||||
CHECK(uint8_t(value->data()[0]) > 0);
|
||||
CHECK(uint8_t(value->data()[0]) <= potentialCommitCount);
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_BIT_AND") {
|
||||
|
@ -1139,14 +1151,19 @@ TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_BIT_XOR") {
|
|||
|
||||
fdb::Transaction tr(db);
|
||||
char param[] = { 'a', 'd' };
|
||||
int potentialCommitCount = 0;
|
||||
while (1) {
|
||||
tr.atomic_op(key("foo"), (const uint8_t*)"b", 1, FDB_MUTATION_TYPE_BIT_XOR);
|
||||
tr.atomic_op(key("bar"), (const uint8_t*)param, 2, FDB_MUTATION_TYPE_BIT_XOR);
|
||||
tr.atomic_op(key("baz"), (const uint8_t*)"d", 1, FDB_MUTATION_TYPE_BIT_XOR);
|
||||
++potentialCommitCount;
|
||||
fdb::EmptyFuture f1 = tr.commit();
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
if (fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE_NOT_COMMITTED, err)) {
|
||||
--potentialCommitCount;
|
||||
}
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
|
@ -1154,6 +1171,11 @@ TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_BIT_XOR") {
|
|||
break;
|
||||
}
|
||||
|
||||
if (potentialCommitCount != 1) {
|
||||
MESSAGE("Transaction may not have committed exactly once. Suppressing assertions");
|
||||
return;
|
||||
}
|
||||
|
||||
auto value = get_value(key("foo"), /* snapshot */ false, {});
|
||||
REQUIRE(value.has_value());
|
||||
CHECK(value->size() == 1);
|
||||
|
@ -1204,13 +1226,18 @@ TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_APPEND_IF_FITS") {
|
|||
insert_data(db, create_data({ { "foo", "f" } }));
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
int potentialCommitCount = 0;
|
||||
while (1) {
|
||||
tr.atomic_op(key("foo"), (const uint8_t*)"db", 2, FDB_MUTATION_TYPE_APPEND_IF_FITS);
|
||||
tr.atomic_op(key("bar"), (const uint8_t*)"foundation", 10, FDB_MUTATION_TYPE_APPEND_IF_FITS);
|
||||
++potentialCommitCount;
|
||||
fdb::EmptyFuture f1 = tr.commit();
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
if (fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE_NOT_COMMITTED, err)) {
|
||||
--potentialCommitCount;
|
||||
}
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
|
@ -1218,13 +1245,18 @@ TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_APPEND_IF_FITS") {
|
|||
break;
|
||||
}
|
||||
|
||||
auto value = get_value(key("foo"), /* snapshot */ false, {});
|
||||
REQUIRE(value.has_value());
|
||||
CHECK(value->compare("fdb") == 0);
|
||||
auto value_foo = get_value(key("foo"), /* snapshot */ false, {});
|
||||
REQUIRE(value_foo.has_value());
|
||||
|
||||
value = get_value(key("bar"), /* snapshot */ false, {});
|
||||
REQUIRE(value.has_value());
|
||||
CHECK(value->compare("foundation") == 0);
|
||||
auto value_bar = get_value(key("bar"), /* snapshot */ false, {});
|
||||
REQUIRE(value_bar.has_value());
|
||||
|
||||
if (potentialCommitCount != 1) {
|
||||
MESSAGE("Transaction may not have committed exactly once. Suppressing assertions");
|
||||
} else {
|
||||
CHECK(value_foo.value() == "fdb");
|
||||
CHECK(value_bar.value() == "foundation");
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_atomic_op FDB_MUTATION_TYPE_MAX") {
|
||||
|
@ -1513,17 +1545,17 @@ TEST_CASE("fdb_transaction_get_approximate_size") {
|
|||
}
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_get_server_protocol") {
|
||||
TEST_CASE("fdb_database_get_server_protocol") {
|
||||
// We don't really have any expectations other than "don't crash" here
|
||||
FDBFuture* protocolFuture = fdb_get_server_protocol(clusterFilePath.c_str());
|
||||
FDBFuture* protocolFuture = fdb_database_get_server_protocol(db, 0);
|
||||
uint64_t out;
|
||||
|
||||
fdb_check(fdb_future_block_until_ready(protocolFuture));
|
||||
fdb_check(fdb_future_get_uint64(protocolFuture, &out));
|
||||
fdb_future_destroy(protocolFuture);
|
||||
|
||||
// "Default" cluster file version
|
||||
protocolFuture = fdb_get_server_protocol(nullptr);
|
||||
// Passing in an expected version that's different than the cluster version
|
||||
protocolFuture = fdb_database_get_server_protocol(db, 0x0FDB00A200090000LL);
|
||||
fdb_check(fdb_future_block_until_ready(protocolFuture));
|
||||
fdb_check(fdb_future_get_uint64(protocolFuture, &out));
|
||||
fdb_future_destroy(protocolFuture);
|
||||
|
@ -1576,7 +1608,7 @@ TEST_CASE("fdb_transaction_watch max watches") {
|
|||
fdb_check(f1.set_callback(
|
||||
+[](FDBFuture* f, void* param) {
|
||||
fdb_error_t err = fdb_future_get_error(f);
|
||||
if (err != 1101) { // operation_cancelled
|
||||
if (err != /*operation_cancelled*/ 1101 && !fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE, err)) {
|
||||
CHECK(err == 1032); // too_many_watches
|
||||
}
|
||||
auto* event = static_cast<std::shared_ptr<FdbEvent>*>(param);
|
||||
|
@ -1587,7 +1619,7 @@ TEST_CASE("fdb_transaction_watch max watches") {
|
|||
fdb_check(f2.set_callback(
|
||||
+[](FDBFuture* f, void* param) {
|
||||
fdb_error_t err = fdb_future_get_error(f);
|
||||
if (err != 1101) { // operation_cancelled
|
||||
if (err != /*operation_cancelled*/ 1101 && !fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE, err)) {
|
||||
CHECK(err == 1032); // too_many_watches
|
||||
}
|
||||
auto* event = static_cast<std::shared_ptr<FdbEvent>*>(param);
|
||||
|
@ -1598,7 +1630,7 @@ TEST_CASE("fdb_transaction_watch max watches") {
|
|||
fdb_check(f3.set_callback(
|
||||
+[](FDBFuture* f, void* param) {
|
||||
fdb_error_t err = fdb_future_get_error(f);
|
||||
if (err != 1101) { // operation_cancelled
|
||||
if (err != /*operation_cancelled*/ 1101 && !fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE, err)) {
|
||||
CHECK(err == 1032); // too_many_watches
|
||||
}
|
||||
auto* event = static_cast<std::shared_ptr<FdbEvent>*>(param);
|
||||
|
@ -1609,7 +1641,7 @@ TEST_CASE("fdb_transaction_watch max watches") {
|
|||
fdb_check(f4.set_callback(
|
||||
+[](FDBFuture* f, void* param) {
|
||||
fdb_error_t err = fdb_future_get_error(f);
|
||||
if (err != 1101) { // operation_cancelled
|
||||
if (err != /*operation_cancelled*/ 1101 && !fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE, err)) {
|
||||
CHECK(err == 1032); // too_many_watches
|
||||
}
|
||||
auto* event = static_cast<std::shared_ptr<FdbEvent>*>(param);
|
||||
|
@ -1671,7 +1703,7 @@ TEST_CASE("fdb_transaction_cancel") {
|
|||
// ... until the transaction has been reset.
|
||||
tr.reset();
|
||||
fdb::ValueFuture f2 = tr.get("foo", /* snapshot */ false);
|
||||
fdb_check(wait_future(f2));
|
||||
CHECK(wait_future(f2) != 1025); // transaction_cancelled
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_add_conflict_range") {
|
||||
|
@ -2146,22 +2178,29 @@ TEST_CASE("monitor_network_busyness") {
|
|||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc != 3 && argc != 4) {
|
||||
if (argc < 3) {
|
||||
std::cout << "Unit tests for the FoundationDB C API.\n"
|
||||
<< "Usage: fdb_c_unit_tests /path/to/cluster_file key_prefix [externalClient]" << std::endl;
|
||||
<< "Usage: fdb_c_unit_tests /path/to/cluster_file key_prefix [externalClient] [doctest args]"
|
||||
<< std::endl;
|
||||
return 1;
|
||||
}
|
||||
fdb_check(fdb_select_api_version(700));
|
||||
if (argc == 4) {
|
||||
fdb_check(fdb_select_api_version(710));
|
||||
if (argc >= 4) {
|
||||
std::string externalClientLibrary = argv[3];
|
||||
if (externalClientLibrary.substr(0, 2) != "--") {
|
||||
fdb_check(fdb_network_set_option(
|
||||
FDBNetworkOption::FDB_NET_OPTION_DISABLE_LOCAL_CLIENT, reinterpret_cast<const uint8_t*>(""), 0));
|
||||
fdb_check(fdb_network_set_option(FDBNetworkOption::FDB_NET_OPTION_EXTERNAL_CLIENT_LIBRARY,
|
||||
reinterpret_cast<const uint8_t*>(externalClientLibrary.c_str()),
|
||||
externalClientLibrary.size()));
|
||||
}
|
||||
}
|
||||
|
||||
/* fdb_check(fdb_network_set_option( */
|
||||
/* FDBNetworkOption::FDB_NET_OPTION_CLIENT_BUGGIFY_ENABLE, reinterpret_cast<const uint8_t*>(""), 0)); */
|
||||
|
||||
doctest::Context context;
|
||||
context.applyCommandLine(argc, argv);
|
||||
|
||||
fdb_check(fdb_setup_network());
|
||||
std::thread network_thread{ &fdb_run_network };
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include "foundationdb/fdb_c.h"
|
||||
#undef DLLEXPORT
|
||||
#include "workloads.h"
|
||||
|
@ -266,7 +266,7 @@ struct SimpleWorkload : FDBWorkload {
|
|||
insertsPerTx = context->getOption("insertsPerTx", 100ul);
|
||||
opsPerTx = context->getOption("opsPerTx", 100ul);
|
||||
runFor = context->getOption("runFor", 10.0);
|
||||
auto err = fdb_select_api_version(700);
|
||||
auto err = fdb_select_api_version(710);
|
||||
if (err) {
|
||||
context->trace(
|
||||
FDBSeverity::Info, "SelectAPIVersionFailed", { { "Error", std::string(fdb_get_error(err)) } });
|
||||
|
|
|
@ -37,7 +37,7 @@ THREAD_FUNC networkThread(void* fdb) {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> _test() {
|
||||
API* fdb = FDB::API::selectAPIVersion(700);
|
||||
API* fdb = FDB::API::selectAPIVersion(710);
|
||||
auto db = fdb->createDatabase();
|
||||
state Reference<Transaction> tr = db->createTransaction();
|
||||
|
||||
|
@ -81,7 +81,7 @@ ACTOR Future<Void> _test() {
|
|||
}
|
||||
|
||||
void fdb_flow_test() {
|
||||
API* fdb = FDB::API::selectAPIVersion(700);
|
||||
API* fdb = FDB::API::selectAPIVersion(710);
|
||||
fdb->setupNetwork();
|
||||
startThread(networkThread, fdb);
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
#include <flow/flow.h>
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <bindings/c/foundationdb/fdb_c.h>
|
||||
#undef DLLEXPORT
|
||||
|
||||
|
|
|
@ -1863,7 +1863,7 @@ ACTOR void _test_versionstamp() {
|
|||
try {
|
||||
g_network = newNet2(TLSConfig());
|
||||
|
||||
API* fdb = FDB::API::selectAPIVersion(700);
|
||||
API* fdb = FDB::API::selectAPIVersion(710);
|
||||
|
||||
fdb->setupNetwork();
|
||||
startThread(networkThread, fdb);
|
||||
|
|
|
@ -9,7 +9,7 @@ This package requires:
|
|||
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
|
||||
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
|
||||
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-700.
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-710.
|
||||
|
||||
To install this package, you can run the "fdb-go-install.sh" script (for versions 5.0.x and greater):
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below:
|
|||
|
||||
func main() {
|
||||
// Different API versions may expose different runtime behaviors.
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
|
||||
// Open the default database from the system cluster
|
||||
db := fdb.MustOpenDefault()
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
@ -108,7 +108,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
|
|||
// library, an error will be returned. APIVersion must be called prior to any
|
||||
// other functions in the fdb package.
|
||||
//
|
||||
// Currently, this package supports API versions 200 through 700.
|
||||
// Currently, this package supports API versions 200 through 710.
|
||||
//
|
||||
// Warning: When using the multi-version client API, setting an API version that
|
||||
// is not supported by a particular client library will prevent that client from
|
||||
|
@ -116,7 +116,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
|
|||
// the API version of your application after upgrading your client until the
|
||||
// cluster has also been upgraded.
|
||||
func APIVersion(version int) error {
|
||||
headerVersion := 700
|
||||
headerVersion := 710
|
||||
|
||||
networkMutex.Lock()
|
||||
defer networkMutex.Unlock()
|
||||
|
@ -128,7 +128,7 @@ func APIVersion(version int) error {
|
|||
return errAPIVersionAlreadySet
|
||||
}
|
||||
|
||||
if version < 200 || version > 700 {
|
||||
if version < 200 || version > 710 {
|
||||
return errAPIVersionNotSupported
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
func ExampleOpenDefault() {
|
||||
var e error
|
||||
|
||||
e = fdb.APIVersion(700)
|
||||
e = fdb.APIVersion(710)
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to set API version: %v\n", e)
|
||||
return
|
||||
|
@ -52,7 +52,7 @@ func ExampleOpenDefault() {
|
|||
}
|
||||
|
||||
func TestVersionstamp(t *testing.T) {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
|
||||
|
@ -98,7 +98,7 @@ func TestVersionstamp(t *testing.T) {
|
|||
}
|
||||
|
||||
func ExampleTransactor() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error {
|
||||
|
@ -149,7 +149,7 @@ func ExampleTransactor() {
|
|||
}
|
||||
|
||||
func ExampleReadTransactor() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
|
||||
|
@ -202,7 +202,7 @@ func ExampleReadTransactor() {
|
|||
}
|
||||
|
||||
func ExamplePrefixRange() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
tr, e := db.CreateTransaction()
|
||||
|
@ -241,7 +241,7 @@ func ExamplePrefixRange() {
|
|||
}
|
||||
|
||||
func ExampleRangeIterator() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
tr, e := db.CreateTransaction()
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
package fdb
|
||||
|
||||
// #cgo LDFLAGS: -lfdb_c -lm
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <string.h>
|
||||
//
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -141,8 +141,6 @@ endif()
|
|||
target_include_directories(fdb_java PRIVATE ${JNI_INCLUDE_DIRS})
|
||||
# libfdb_java.so is loaded by fdb-java.jar and doesn't need to depened on jvm shared libraries.
|
||||
target_link_libraries(fdb_java PRIVATE fdb_c)
|
||||
set_target_properties(fdb_java PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib/${SYSTEM_NAME}/amd64/)
|
||||
if(APPLE)
|
||||
set_target_properties(fdb_java PROPERTIES SUFFIX ".jnilib")
|
||||
endif()
|
||||
|
@ -216,8 +214,12 @@ if(NOT OPEN_FOR_IDE)
|
|||
set(lib_destination "windows/amd64")
|
||||
elseif(APPLE)
|
||||
set(lib_destination "osx/x86_64")
|
||||
else()
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
set(lib_destination "linux/aarch64")
|
||||
else()
|
||||
set(lib_destination "linux/amd64")
|
||||
endif()
|
||||
endif()
|
||||
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
|
||||
set(jni_package "${CMAKE_BINARY_DIR}/packages/lib")
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
*/
|
||||
|
||||
#include <foundationdb/ClientWorkload.h>
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
#include <jni.h>
|
||||
|
@ -375,7 +375,7 @@ struct JVM {
|
|||
jmethodID selectMethod =
|
||||
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;");
|
||||
checkException();
|
||||
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(700));
|
||||
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(710));
|
||||
checkException();
|
||||
env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V"));
|
||||
checkException();
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include <jni.h>
|
||||
#include <string.h>
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.junit.jupiter.api.extension.ExtendWith;
|
|||
*/
|
||||
@ExtendWith(RequiresDatabase.class)
|
||||
class DirectoryTest {
|
||||
private static final FDB fdb = FDB.selectAPIVersion(700);
|
||||
private static final FDB fdb = FDB.selectAPIVersion(710);
|
||||
|
||||
@Test
|
||||
void testCanCreateDirectory() throws Exception {
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.junit.jupiter.api.extension.ExtendWith;
|
|||
*/
|
||||
@ExtendWith(RequiresDatabase.class)
|
||||
class RangeQueryIntegrationTest {
|
||||
private static final FDB fdb = FDB.selectAPIVersion(700);
|
||||
private static final FDB fdb = FDB.selectAPIVersion(710);
|
||||
|
||||
@BeforeEach
|
||||
@AfterEach
|
||||
|
|
|
@ -80,7 +80,7 @@ public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback {
|
|||
* assume that if we are here, then canRunIntegrationTest() is returning true and we don't have to bother
|
||||
* checking it.
|
||||
*/
|
||||
try (Database db = FDB.selectAPIVersion(700).open()) {
|
||||
try (Database db = FDB.selectAPIVersion(710).open()) {
|
||||
db.run(tr -> {
|
||||
CompletableFuture<byte[]> future = tr.get("test".getBytes());
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ public class FDBLibraryRule implements BeforeAllCallback {
|
|||
|
||||
public FDBLibraryRule(int apiVersion) { this.apiVersion = apiVersion; }
|
||||
|
||||
public static FDBLibraryRule current() { return new FDBLibraryRule(700); }
|
||||
public static FDBLibraryRule current() { return new FDBLibraryRule(710); }
|
||||
|
||||
public static FDBLibraryRule v63() { return new FDBLibraryRule(630); }
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
* This call is required before using any other part of the API. The call allows
|
||||
* an error to be thrown at this point to prevent client code from accessing a later library
|
||||
* with incorrect assumptions from the current version. The API version documented here is version
|
||||
* {@code 700}.<br><br>
|
||||
* {@code 710}.<br><br>
|
||||
* FoundationDB encapsulates multiple versions of its interface by requiring
|
||||
* the client to explicitly specify the version of the API it uses. The purpose
|
||||
* of this design is to allow you to upgrade the server, client libraries, or
|
||||
|
@ -183,8 +183,8 @@ public class FDB {
|
|||
}
|
||||
if(version < 510)
|
||||
throw new IllegalArgumentException("API version not supported (minimum 510)");
|
||||
if(version > 700)
|
||||
throw new IllegalArgumentException("API version not supported (maximum 700)");
|
||||
if(version > 710)
|
||||
throw new IllegalArgumentException("API version not supported (maximum 710)");
|
||||
|
||||
Select_API_version(version);
|
||||
singleton = new FDB(version);
|
||||
|
|
|
@ -36,11 +36,7 @@ class JNIUtil {
|
|||
private static final String TEMPFILE_PREFIX = "fdbjni";
|
||||
private static final String TEMPFILE_SUFFIX = ".library";
|
||||
|
||||
private enum OS {
|
||||
WIN32("windows", "amd64", false),
|
||||
LINUX("linux", "amd64", true),
|
||||
OSX("osx", "x86_64", true);
|
||||
|
||||
private static class OS {
|
||||
private final String name;
|
||||
private final String arch;
|
||||
private final boolean canDeleteEager;
|
||||
|
@ -171,14 +167,20 @@ class JNIUtil {
|
|||
|
||||
private static OS getRunningOS() {
|
||||
String osname = System.getProperty("os.name").toLowerCase();
|
||||
if(osname.startsWith("windows"))
|
||||
return OS.WIN32;
|
||||
if(osname.startsWith("linux"))
|
||||
return OS.LINUX;
|
||||
if(osname.startsWith("mac") || osname.startsWith("darwin"))
|
||||
return OS.OSX;
|
||||
String arch = System.getProperty("os.arch");
|
||||
if (!arch.equals("amd64") && !arch.equals("x86_64") && !arch.equals("aarch64")) {
|
||||
throw new IllegalStateException("Unknown or unsupported arch: " + arch);
|
||||
}
|
||||
if (osname.startsWith("windows")) {
|
||||
return new OS("windows", arch, /* canDeleteEager */ false);
|
||||
} else if (osname.startsWith("linux")) {
|
||||
return new OS("linux", arch, /* canDeleteEager */ true);
|
||||
} else if (osname.startsWith("mac") || osname.startsWith("darwin")) {
|
||||
return new OS("osx", arch, /* canDeleteEager */ true);
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown or unsupported OS: " + osname);
|
||||
}
|
||||
}
|
||||
|
||||
private JNIUtil() {}
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ and then added to your classpath.<br>
|
|||
<h1>Getting started</h1>
|
||||
To start using FoundationDB from Java, create an instance of the
|
||||
{@link com.apple.foundationdb.FDB FoundationDB API interface} with the version of the
|
||||
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 700}).
|
||||
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 710}).
|
||||
With this API object you can then open {@link com.apple.foundationdb.Cluster Cluster}s and
|
||||
{@link com.apple.foundationdb.Database Database}s and start using
|
||||
{@link com.apple.foundationdb.Transaction Transaction}s.
|
||||
|
@ -29,7 +29,7 @@ import com.apple.foundationdb.tuple.Tuple;
|
|||
|
||||
public class Example {
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
|
||||
try(Database db = fdb.open()) {
|
||||
// Run an operation on the database
|
||||
|
|
|
@ -27,7 +27,7 @@ import com.apple.foundationdb.Database;
|
|||
import com.apple.foundationdb.FDB;
|
||||
|
||||
public abstract class AbstractTester {
|
||||
public static final int API_VERSION = 700;
|
||||
public static final int API_VERSION = 710;
|
||||
protected static final int NUM_RUNS = 25;
|
||||
protected static final Charset ASCII = Charset.forName("ASCII");
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ public class BlockingBenchmark {
|
|||
private static final int PARALLEL = 100;
|
||||
|
||||
public static void main(String[] args) throws InterruptedException {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
|
||||
// The cluster file DOES NOT need to be valid, although it must exist.
|
||||
// This is because the database is never really contacted in this test.
|
||||
|
|
|
@ -48,7 +48,7 @@ public class ConcurrentGetSetGet {
|
|||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
try(Database database = FDB.selectAPIVersion(700).open()) {
|
||||
try(Database database = FDB.selectAPIVersion(710).open()) {
|
||||
new ConcurrentGetSetGet().apply(database);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import com.apple.foundationdb.tuple.Tuple;
|
|||
|
||||
public class Example {
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
|
||||
try(Database db = fdb.open()) {
|
||||
// Run an operation on the database
|
||||
|
|
|
@ -31,7 +31,7 @@ public class IterableTest {
|
|||
public static void main(String[] args) throws InterruptedException {
|
||||
final int reps = 1000;
|
||||
try {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try(Database db = fdb.open()) {
|
||||
runTests(reps, db);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import com.apple.foundationdb.tuple.ByteArrayUtil;
|
|||
public class LocalityTests {
|
||||
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try(Database database = fdb.open(args[0])) {
|
||||
try(Transaction tr = database.createTransaction()) {
|
||||
String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join();
|
||||
|
|
|
@ -43,7 +43,7 @@ public class ParallelRandomScan {
|
|||
private static final int PARALLELISM_STEP = 5;
|
||||
|
||||
public static void main(String[] args) throws InterruptedException {
|
||||
FDB api = FDB.selectAPIVersion(700);
|
||||
FDB api = FDB.selectAPIVersion(710);
|
||||
try(Database database = api.open(args[0])) {
|
||||
for(int i = PARALLELISM_MIN; i <= PARALLELISM_MAX; i += PARALLELISM_STEP) {
|
||||
runTest(database, i, ROWS, DURATION_MS);
|
||||
|
|
|
@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction;
|
|||
import com.apple.foundationdb.async.AsyncIterable;
|
||||
|
||||
public class RangeTest {
|
||||
private static final int API_VERSION = 700;
|
||||
private static final int API_VERSION = 710;
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println("About to use version " + API_VERSION);
|
||||
|
|
|
@ -34,7 +34,7 @@ public class SerialInsertion {
|
|||
private static final int NODES = 1000000;
|
||||
|
||||
public static void main(String[] args) {
|
||||
FDB api = FDB.selectAPIVersion(700);
|
||||
FDB api = FDB.selectAPIVersion(710);
|
||||
try(Database database = api.open()) {
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ public class SerialIteration {
|
|||
private static final int THREAD_COUNT = 1;
|
||||
|
||||
public static void main(String[] args) throws InterruptedException {
|
||||
FDB api = FDB.selectAPIVersion(700);
|
||||
FDB api = FDB.selectAPIVersion(710);
|
||||
try(Database database = api.open(args[0])) {
|
||||
for(int i = 1; i <= THREAD_COUNT; i++) {
|
||||
runThreadedTest(database, i);
|
||||
|
|
|
@ -30,7 +30,7 @@ public class SerialTest {
|
|||
public static void main(String[] args) throws InterruptedException {
|
||||
final int reps = 1000;
|
||||
try {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try(Database db = fdb.open()) {
|
||||
runTests(reps, db);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ public class SnapshotTransactionTest {
|
|||
private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges"));
|
||||
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try(Database db = fdb.open()) {
|
||||
snapshotReadShouldNotConflict(db);
|
||||
snapshotShouldNotAddConflictRange(db);
|
||||
|
|
|
@ -37,7 +37,7 @@ public class TupleTest {
|
|||
public static void main(String[] args) throws NoSuchFieldException {
|
||||
final int reps = 1000;
|
||||
try {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try(Database db = fdb.open()) {
|
||||
runTests(reps, db);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import com.apple.foundationdb.tuple.Versionstamp;
|
|||
|
||||
public class VersionstampSmokeTest {
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try(Database db = fdb.open()) {
|
||||
db.run(tr -> {
|
||||
tr.clear(Tuple.from("prefix").range());
|
||||
|
|
|
@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction;
|
|||
public class WatchTest {
|
||||
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(700);
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try(Database database = fdb.open(args[0])) {
|
||||
database.options().setLocationCacheSize(42);
|
||||
try(Transaction tr = database.createTransaction()) {
|
||||
|
|
|
@ -52,7 +52,7 @@ def get_api_version():
|
|||
|
||||
|
||||
def api_version(ver):
|
||||
header_version = 700
|
||||
header_version = 710
|
||||
|
||||
if '_version' in globals():
|
||||
if globals()['_version'] != ver:
|
||||
|
@ -95,7 +95,6 @@ def api_version(ver):
|
|||
'transactional',
|
||||
'options',
|
||||
'StreamingMode',
|
||||
'get_server_protocol'
|
||||
)
|
||||
|
||||
_add_symbols(fdb.impl, list)
|
||||
|
|
|
@ -253,7 +253,7 @@ def transactional(*tr_args, **tr_kwargs):
|
|||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
# We can't throw this from the decorator, as when a user runs
|
||||
# >>> import fdb ; fdb.api_version(700)
|
||||
# >>> import fdb ; fdb.api_version(710)
|
||||
# the code above uses @transactional before the API version is set
|
||||
if fdb.get_api_version() >= 630 and inspect.isgeneratorfunction(func):
|
||||
raise ValueError("Generators can not be wrapped with fdb.transactional")
|
||||
|
@ -1531,9 +1531,6 @@ def init_c_api():
|
|||
_capi.fdb_transaction_get_approximate_size.argtypes = [ctypes.c_void_p]
|
||||
_capi.fdb_transaction_get_approximate_size.restype = ctypes.c_void_p
|
||||
|
||||
_capi.fdb_get_server_protocol.argtypes = [ctypes.c_char_p]
|
||||
_capi.fdb_get_server_protocol.restype = ctypes.c_void_p
|
||||
|
||||
_capi.fdb_transaction_get_versionstamp.argtypes = [ctypes.c_void_p]
|
||||
_capi.fdb_transaction_get_versionstamp.restype = ctypes.c_void_p
|
||||
|
||||
|
@ -1733,13 +1730,6 @@ open_databases = {}
|
|||
|
||||
cacheLock = threading.Lock()
|
||||
|
||||
def get_server_protocol(clusterFilePath=None):
|
||||
with _network_thread_reentrant_lock:
|
||||
if not _network_thread:
|
||||
init()
|
||||
|
||||
return FutureUInt64(_capi.fdb_get_server_protocol(optionalParamToBytes(clusterFilePath)[0]))
|
||||
|
||||
def open(cluster_file=None, event_model=None):
|
||||
"""Opens the given database (or the default database of the cluster indicated
|
||||
by the fdb.cluster file in a platform-specific location, if no cluster_file
|
||||
|
|
|
@ -22,7 +22,7 @@ import fdb
|
|||
import sys
|
||||
|
||||
if __name__ == '__main__':
|
||||
fdb.api_version(700)
|
||||
fdb.api_version(710)
|
||||
|
||||
@fdb.transactional
|
||||
def setValue(tr, key, value):
|
||||
|
|
|
@ -36,7 +36,7 @@ module FDB
|
|||
end
|
||||
end
|
||||
def self.api_version(version)
|
||||
header_version = 700
|
||||
header_version = 710
|
||||
if self.is_api_version_selected?()
|
||||
if @@chosen_version != version
|
||||
raise "FDB API already loaded at version #{@@chosen_version}."
|
||||
|
|
|
@ -13,7 +13,7 @@ RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.1
|
|||
cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/
|
||||
|
||||
# install boost
|
||||
RUN curl -L https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_72_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
|
||||
RUN curl -L https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
|
||||
cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\
|
||||
sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_72_0/boost /usr/local/include/ &&\
|
||||
rm -rf boost.tar.bz2 boost_1_72_0
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
fdb_select_api_version(700);
|
||||
fdb_select_api_version(710);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ then
|
|||
python setup.py install
|
||||
successOr "Installing python bindings failed"
|
||||
popd
|
||||
python -c 'import fdb; fdb.api_version(700)'
|
||||
python -c 'import fdb; fdb.api_version(710)'
|
||||
successOr "Loading python bindings failed"
|
||||
|
||||
# Test cmake and pkg-config integration: https://github.com/apple/foundationdb/issues/1483
|
||||
|
|
|
@ -22,6 +22,8 @@ RUN sed -i -e '/enabled/d' /etc/yum.repos.d/CentOS-Base.repo && \
|
|||
curl \
|
||||
debbuild \
|
||||
devtoolset-8 \
|
||||
devtoolset-8-libasan-devel \
|
||||
devtoolset-8-libtsan-devel \
|
||||
devtoolset-8-libubsan-devel \
|
||||
devtoolset-8-valgrind-devel \
|
||||
dos2unix \
|
||||
|
@ -37,6 +39,7 @@ RUN sed -i -e '/enabled/d' /etc/yum.repos.d/CentOS-Base.repo && \
|
|||
lz4-devel \
|
||||
lz4-static \
|
||||
mono-devel \
|
||||
redhat-lsb-core \
|
||||
rpm-build \
|
||||
tcl-devel \
|
||||
unzip \
|
||||
|
@ -155,7 +158,7 @@ RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocks
|
|||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.67 to /opt
|
||||
RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
|
||||
sha256sum -c boost-sha-67.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
|
||||
|
@ -164,7 +167,7 @@ RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.
|
|||
|
||||
# install boost 1.72 to /opt
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
|
||||
sha256sum -c boost-sha-72.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \
|
||||
|
|
|
@ -76,4 +76,9 @@ RUN rm -f /root/anaconda-ks.cfg && \
|
|||
' j start --tarball $(find ${HOME}/build_output/packages -name correctness\*.tar.gz) "${@}"' \
|
||||
'}' \
|
||||
'' \
|
||||
'USER_BASHRC="$HOME/src/.bashrc.local"' \
|
||||
'if test -f "$USER_BASHRC"; then' \
|
||||
' source $USER_BASHRC' \
|
||||
'fi' \
|
||||
'' \
|
||||
>> .bashrc
|
|
@ -18,6 +18,8 @@ RUN rpmkeys --import mono-project.com.rpmkey.pgp && \
|
|||
curl \
|
||||
debbuild \
|
||||
devtoolset-8 \
|
||||
devtoolset-8-libasan-devel \
|
||||
devtoolset-8-libtsan-devel \
|
||||
devtoolset-8-libubsan-devel \
|
||||
devtoolset-8-systemtap-sdt-devel \
|
||||
docker-ce \
|
||||
|
@ -34,6 +36,7 @@ RUN rpmkeys --import mono-project.com.rpmkey.pgp && \
|
|||
lz4-devel \
|
||||
lz4-static \
|
||||
mono-devel \
|
||||
redhat-lsb-core \
|
||||
rpm-build \
|
||||
tcl-devel \
|
||||
unzip \
|
||||
|
@ -138,7 +141,7 @@ RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocks
|
|||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.67 to /opt
|
||||
RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
|
||||
sha256sum -c boost-sha-67.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
|
||||
|
@ -147,7 +150,7 @@ RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.
|
|||
|
||||
# install boost 1.72 to /opt
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
|
||||
sha256sum -c boost-sha-72.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \
|
||||
|
|
|
@ -104,5 +104,10 @@ RUN rm -f /root/anaconda-ks.cfg && \
|
|||
' j start --tarball $(find ${HOME}/build_output/packages -name correctness\*.tar.gz) "${@}"' \
|
||||
'}' \
|
||||
'' \
|
||||
'USER_BASHRC="$HOME/src/.bashrc.local"' \
|
||||
'if test -f "$USER_BASHRC"; then' \
|
||||
' source $USER_BASHRC' \
|
||||
'fi' \
|
||||
'' \
|
||||
'bash ${HOME}/docker_proxy.sh' \
|
||||
>> .bashrc
|
|
@ -9,24 +9,6 @@ elseif(CPACK_GENERATOR MATCHES "DEB")
|
|||
set(CPACK_COMPONENTS_ALL clients-deb server-deb clients-versioned server-versioned)
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
|
||||
elseif(CPACK_GENERATOR MATCHES "productbuild")
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(CPACK_COMPONENTS_ALL clients-pm server-pm)
|
||||
set(CPACK_STRIP_FILES TRUE)
|
||||
set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
|
||||
set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall)
|
||||
set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
|
||||
# Commenting out this readme file until it works within packaging
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
|
||||
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
|
||||
# Changing the path of this file as CMAKE_BINARY_DIR does not seem to be defined
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt)
|
||||
if(NOT FDB_RELEASE)
|
||||
set(prerelease_string "-PRERELEASE")
|
||||
else()
|
||||
set(prerelease_string "")
|
||||
endif()
|
||||
set(CPACK_PACKAGE_FILE_NAME "FoundationDB-${PROJECT_VERSION}${prerelease_string}")
|
||||
elseif(CPACK_GENERATOR MATCHES "TGZ")
|
||||
set(CPACK_STRIP_FILES TRUE)
|
||||
set(CPACK_COMPONENTS_ALL clients-tgz server-tgz)
|
||||
|
|
|
@ -38,7 +38,7 @@ function(compile_boost)
|
|||
include(ExternalProject)
|
||||
set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install")
|
||||
ExternalProject_add("${MY_TARGET}Project"
|
||||
URL "https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2"
|
||||
URL "https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2"
|
||||
URL_HASH SHA256=59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722
|
||||
CONFIGURE_COMMAND ./bootstrap.sh ${BOOTSTRAP_ARGS}
|
||||
BUILD_COMMAND ${B2_COMMAND} link=static ${MY_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install
|
||||
|
|
|
@ -280,7 +280,12 @@ else()
|
|||
-Wno-unknown-attributes)
|
||||
endif()
|
||||
add_compile_options(
|
||||
-Wall -Wextra
|
||||
-Wall
|
||||
-Wextra
|
||||
-Wredundant-move
|
||||
-Wpessimizing-move
|
||||
-Woverloaded-virtual
|
||||
-Wshift-sign-overflow
|
||||
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 10
|
||||
-Wno-comment
|
||||
-Wno-dangling-else
|
||||
|
@ -288,16 +293,12 @@ else()
|
|||
-Wno-format
|
||||
-Wno-mismatched-tags
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-overloaded-virtual
|
||||
-Wno-reorder
|
||||
-Wno-reorder-ctor
|
||||
-Wno-sign-compare
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wno-undefined-var-template
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wredundant-move
|
||||
-Wpessimizing-move
|
||||
-Woverloaded-virtual
|
||||
-Wno-unknown-pragmas
|
||||
-Wno-unknown-warning-option
|
||||
-Wno-unused-function
|
||||
|
|
|
@ -214,7 +214,7 @@ endfunction()
|
|||
|
||||
function(fdb_install)
|
||||
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||
set(one_value_options COMPONENT DESTINATION EXPORT DESTINATION_SUFFIX)
|
||||
set(one_value_options COMPONENT DESTINATION EXPORT DESTINATION_SUFFIX RENAME)
|
||||
set(multi_value_options TARGETS FILES PROGRAMS DIRECTORY)
|
||||
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
|
||||
|
@ -237,6 +237,9 @@ function(fdb_install)
|
|||
get_install_dest(${pkg} ${destination} install_path)
|
||||
string(TOLOWER "${pkg}" package)
|
||||
if(install_export)
|
||||
if(IN_RENAME)
|
||||
message(FATAL_ERROR "RENAME for EXPORT target not implemented")
|
||||
endif()
|
||||
install(
|
||||
EXPORT "${IN_EXPORT}-${package}"
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
|
@ -248,6 +251,14 @@ function(fdb_install)
|
|||
set(export_args EXPORT "${IN_EXPORT}-${package}")
|
||||
endif()
|
||||
if(NOT ${install_path} STREQUAL "")
|
||||
if(IN_RENAME)
|
||||
install(
|
||||
${args}
|
||||
${export_args}
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
COMPONENT "${IN_COMPONENT}-${package}"
|
||||
RENAME ${IN_RENAME})
|
||||
else()
|
||||
install(
|
||||
${args}
|
||||
${export_args}
|
||||
|
@ -255,6 +266,7 @@ function(fdb_install)
|
|||
COMPONENT "${IN_COMPONENT}-${package}")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
||||
|
|
|
@ -46,10 +46,6 @@ function(install_symlink)
|
|||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}local/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "bin")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
|
@ -61,10 +57,6 @@ function(install_symlink)
|
|||
COMPONENTS "${IN_COMPONENT}-el6"
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/local/bin/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}bin/${IN_FILE_NAME}"
|
||||
|
@ -76,10 +68,6 @@ function(install_symlink)
|
|||
COMPONENTS "${IN_COMPONENT}-el6"
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/local/lib/foundationdb/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}")
|
||||
endif()
|
||||
|
@ -103,59 +91,16 @@ function(symlink_files)
|
|||
endif()
|
||||
endfunction()
|
||||
|
||||
# 'map' from (destination, package) to path
|
||||
# format vars like install_destination_for_${destination}_${package}
|
||||
set(install_destination_for_bin_tgz "bin")
|
||||
set(install_destination_for_bin_deb "usr/bin")
|
||||
set(install_destination_for_bin_el6 "usr/bin")
|
||||
set(install_destination_for_bin_el7 "usr/bin")
|
||||
set(install_destination_for_bin_pm "usr/local/bin")
|
||||
set(install_destination_for_sbin_tgz "sbin")
|
||||
set(install_destination_for_sbin_deb "usr/sbin")
|
||||
set(install_destination_for_sbin_el6 "usr/sbin")
|
||||
set(install_destination_for_sbin_el7 "usr/sbin")
|
||||
set(install_destination_for_sbin_pm "usr/local/libexec")
|
||||
set(install_destination_for_lib_tgz "lib")
|
||||
set(install_destination_for_lib_deb "usr/lib")
|
||||
set(install_destination_for_lib_el6 "usr/lib64")
|
||||
set(install_destination_for_lib_el7 "usr/lib64")
|
||||
set(install_destination_for_lib_pm "usr/local/lib")
|
||||
set(install_destination_for_fdbmonitor_tgz "sbin")
|
||||
set(install_destination_for_fdbmonitor_deb "usr/lib/foundationdb")
|
||||
set(install_destination_for_fdbmonitor_el6 "usr/lib/foundationdb")
|
||||
set(install_destination_for_fdbmonitor_el7 "usr/lib/foundationdb")
|
||||
set(install_destination_for_fdbmonitor_pm "usr/local/libexec")
|
||||
set(install_destination_for_include_tgz "include")
|
||||
set(install_destination_for_include_deb "usr/include")
|
||||
set(install_destination_for_include_el6 "usr/include")
|
||||
set(install_destination_for_include_el7 "usr/include")
|
||||
set(install_destination_for_include_pm "usr/local/include")
|
||||
set(install_destination_for_etc_tgz "etc/foundationdb")
|
||||
set(install_destination_for_etc_deb "etc/foundationdb")
|
||||
set(install_destination_for_etc_el6 "etc/foundationdb")
|
||||
set(install_destination_for_etc_el7 "etc/foundationdb")
|
||||
set(install_destination_for_etc_pm "usr/local/etc/foundationdb")
|
||||
set(install_destination_for_log_tgz "log/foundationdb")
|
||||
set(install_destination_for_log_deb "var/log/foundationdb")
|
||||
set(install_destination_for_log_el6 "var/log/foundationdb")
|
||||
set(install_destination_for_log_el7 "var/log/foundationdb")
|
||||
set(install_destination_for_log_pm "usr/local/foundationdb/logs")
|
||||
set(install_destination_for_data_tgz "lib/foundationdb")
|
||||
set(install_destination_for_data_deb "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_el6 "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_el7 "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_pm "usr/local/foundationdb/data")
|
||||
fdb_install_packages(TGZ DEB EL7 PM VERSIONED)
|
||||
fdb_install_dirs(BIN SBIN LIB FDBMONITOR INCLUDE ETC LOG DATA)
|
||||
fdb_install_packages(TGZ DEB EL7 VERSIONED)
|
||||
fdb_install_dirs(BIN SBIN LIB FDBMONITOR INCLUDE ETC LOG DATA BACKUPAGENT)
|
||||
message(STATUS "FDB_INSTALL_DIRS -> ${FDB_INSTALL_DIRS}")
|
||||
|
||||
# 'map' from (destination, package) to path
|
||||
# format vars like install_destination_for_${destination}_${package}
|
||||
install_destinations(TGZ
|
||||
BIN bin
|
||||
SBIN sbin
|
||||
LIB lib
|
||||
FDBMONITOR sbin
|
||||
BACKUPAGENT usr/lib/foundationdb
|
||||
INCLUDE include
|
||||
ETC etc/foundationdb
|
||||
LOG log/foundationdb
|
||||
|
@ -166,19 +111,13 @@ install_destinations(DEB
|
|||
SBIN usr/sbin
|
||||
LIB usr/lib
|
||||
FDBMONITOR usr/lib/foundationdb
|
||||
BACKUPAGENT usr/lib/foundationdb
|
||||
INCLUDE usr/include
|
||||
ETC etc/foundationdb
|
||||
LOG var/log/foundationdb
|
||||
DATA var/lib/foundationdb)
|
||||
DATA var/lib/foundationdb/data)
|
||||
copy_install_destinations(DEB EL7)
|
||||
install_destinations(EL7 LIB usr/lib64)
|
||||
install_destinations(PM
|
||||
BIN usr/local/bin
|
||||
SBIN usr/local/sbin
|
||||
LIB lib
|
||||
FDBMONITOR usr/local/libexec
|
||||
INCLUDE usr/local/include
|
||||
ETC usr/local/etc/foundationdb)
|
||||
|
||||
# This can be used for debugging in case above is behaving funky
|
||||
#print_install_destinations()
|
||||
|
@ -186,7 +125,7 @@ install_destinations(PM
|
|||
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
|
||||
|
||||
if(APPLE)
|
||||
set(CPACK_GENERATOR TGZ productbuild)
|
||||
set(CPACK_GENERATOR TGZ)
|
||||
else()
|
||||
set(CPACK_GENERATOR RPM DEB TGZ)
|
||||
endif()
|
||||
|
@ -227,6 +166,13 @@ set(LIB_DIR lib64)
|
|||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients/postinst-el7" @ONLY)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/prerm" "${script_dir}/clients" @ONLY)
|
||||
|
||||
|
||||
################################################################################
|
||||
# Move Docker Setup
|
||||
################################################################################
|
||||
|
||||
file(COPY "${PROJECT_SOURCE_DIR}/packaging/docker" DESTINATION "${PROJECT_BINARY_DIR}/packages/")
|
||||
|
||||
################################################################################
|
||||
# General CPack configuration
|
||||
################################################################################
|
||||
|
@ -249,19 +195,16 @@ set(CPACK_PACKAGE_CONTACT "The FoundationDB Community")
|
|||
set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7)
|
||||
set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
|
||||
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
|
||||
set(CPACK_COMPONENT_SERVER-PM_DEPENDS clients-pm)
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DEPENDS clients-versioned)
|
||||
|
||||
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-PM_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DISPLAY_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
|
||||
set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-PM_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
||||
|
||||
|
||||
|
@ -419,19 +362,6 @@ set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_CONTROL_EXTRA
|
|||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm)
|
||||
|
||||
################################################################################
|
||||
# MacOS configuration
|
||||
################################################################################
|
||||
|
||||
if(APPLE)
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh
|
||||
DESTINATION "usr/local/foundationdb"
|
||||
COMPONENT clients-pm)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist
|
||||
DESTINATION "Library/LaunchDaemons"
|
||||
COMPONENT server-pm)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Configuration for DEB
|
||||
################################################################################
|
||||
|
@ -450,9 +380,6 @@ set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
|
|||
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
|
||||
|
||||
if(NOT WIN32)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new
|
||||
DESTINATION "usr/local/etc"
|
||||
COMPONENT server-pm)
|
||||
fdb_install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
|
||||
DESTINATION etc
|
||||
COMPONENT server)
|
||||
|
|
|
@ -717,7 +717,7 @@ namespace SummarizeTest
|
|||
|
||||
delegate IEnumerable<Magnesium.Event> parseDelegate(System.IO.Stream stream, string file,
|
||||
bool keepOriginalElement = false, double startTime = -1, double endTime = Double.MaxValue,
|
||||
double samplingFactor = 1.0);
|
||||
double samplingFactor = 1.0, Action<string> nonFatalErrorMessage = null);
|
||||
|
||||
static int Summarize(string[] traceFiles, string summaryFileName,
|
||||
string errorFileName, bool? killed, List<string> outputErrors, int? exitCode, long? peakMemory,
|
||||
|
@ -750,12 +750,14 @@ namespace SummarizeTest
|
|||
{
|
||||
try
|
||||
{
|
||||
// Use Action to set this because IEnumerables with yield can't have an out variable
|
||||
string nonFatalParseError = null;
|
||||
parseDelegate parse;
|
||||
if (traceFileName.EndsWith(".json"))
|
||||
parse = Magnesium.JsonParser.Parse;
|
||||
else
|
||||
parse = Magnesium.XmlParser.Parse;
|
||||
foreach (var ev in parse(traceFile, traceFileName))
|
||||
foreach (var ev in parse(traceFile, traceFileName, nonFatalErrorMessage: (x) => { nonFatalParseError = x; }))
|
||||
{
|
||||
Magnesium.Severity newSeverity;
|
||||
if (severityMap.TryGetValue(new KeyValuePair<string, Magnesium.Severity>(ev.Type, ev.Severity), out newSeverity))
|
||||
|
@ -876,6 +878,11 @@ namespace SummarizeTest
|
|||
if (ev.Type == "StderrSeverity")
|
||||
stderrSeverity = int.Parse(ev.Details.NewSeverity);
|
||||
}
|
||||
if (nonFatalParseError != null) {
|
||||
xout.Add(new XElement("NonFatalParseError",
|
||||
new XAttribute("Severity", (int)Magnesium.Severity.SevWarnAlways),
|
||||
new XAttribute("ErrorMessage", nonFatalParseError)));
|
||||
}
|
||||
|
||||
}
|
||||
catch (Exception e)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* JsonParser.cs
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
|
@ -34,9 +34,10 @@ namespace Magnesium
|
|||
{
|
||||
static Random r = new Random();
|
||||
|
||||
// dummy parameter nonFatalParseError to match xml
|
||||
public static IEnumerable<Event> Parse(System.IO.Stream stream, string file,
|
||||
bool keepOriginalElement = false, double startTime = -1, double endTime = Double.MaxValue,
|
||||
double samplingFactor = 1.0)
|
||||
double samplingFactor = 1.0, Action<string> nonFatalErrorMessage = null)
|
||||
{
|
||||
using (var reader = new System.IO.StreamReader(stream))
|
||||
{
|
||||
|
|
|
@ -33,14 +33,29 @@ namespace Magnesium
|
|||
|
||||
public static IEnumerable<Event> Parse(System.IO.Stream stream, string file,
|
||||
bool keepOriginalElement = false, double startTime = -1, double endTime = Double.MaxValue,
|
||||
double samplingFactor = 1.0)
|
||||
double samplingFactor = 1.0, Action<string> nonFatalErrorMessage = null)
|
||||
{
|
||||
using (var reader = XmlReader.Create(stream))
|
||||
{
|
||||
reader.ReadToDescendant("Trace");
|
||||
reader.Read();
|
||||
foreach (var xev in StreamElements(reader))
|
||||
|
||||
// foreach (var xev in StreamElements(reader))
|
||||
// need to be able to catch and save non-fatal exceptions in StreamElements, so use explicit iterator instead of foreach
|
||||
var iter = StreamElements(reader).GetEnumerator();
|
||||
while (true)
|
||||
{
|
||||
try {
|
||||
if (!iter.MoveNext()) {
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (nonFatalErrorMessage != null) {
|
||||
nonFatalErrorMessage(e.Message);
|
||||
}
|
||||
break;
|
||||
}
|
||||
var xev = iter.Current;
|
||||
Event ev = null;
|
||||
try
|
||||
{
|
||||
|
@ -165,29 +180,21 @@ namespace Magnesium
|
|||
}
|
||||
}
|
||||
|
||||
// throws exceptions if xml is invalid
|
||||
private static IEnumerable<XElement> StreamElements(this XmlReader reader)
|
||||
{
|
||||
while (!reader.EOF)
|
||||
{
|
||||
if (reader.NodeType == XmlNodeType.Element)
|
||||
{
|
||||
XElement node = null;
|
||||
try
|
||||
{
|
||||
node = XElement.ReadFrom(reader) as XElement;
|
||||
}
|
||||
catch (Exception) { break; }
|
||||
XElement node = XElement.ReadFrom(reader) as XElement;
|
||||
if (node != null)
|
||||
yield return node;
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
reader.Read();
|
||||
}
|
||||
catch (Exception) { break; }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,219 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# apiversioner.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
LOG_FORMAT = '%(created)f [%(levelname)s] %(message)s'
|
||||
|
||||
EXCLUDED_FILES = list(map(re.compile, [
|
||||
# Output directories
|
||||
r'\.git/.*', r'bin/.*', r'packages/.*', r'\.objs/.*', r'\.deps/.*', r'bindings/go/build/.*', r'documentation/sphinx/\.out/.*',
|
||||
|
||||
# Generated files
|
||||
r'.*\.g\.cpp$', r'.*\.g\.h$', r'(^|.*/)generated.mk$', r'.*\.g\.S$',
|
||||
r'.*/MutationType\.java', r'.*/generated\.go',
|
||||
|
||||
# Binary files
|
||||
r'.*\.class$', r'.*\.o$', r'.*\.a$', r'.*[\.-]debug', r'.*\.so$', r'.*\.dylib$', r'.*\.dll$', r'.*\.tar[^/]*$', r'.*\.jar$', r'.*pyc$', r'bindings/flow/bin/.*',
|
||||
r'.*\.pdf$', r'.*\.jp[e]*g', r'.*\.png', r'.*\.ico',
|
||||
r'packaging/msi/art/.*',
|
||||
|
||||
# Project configuration files
|
||||
r'.*foundationdb\.VC\.db$', r'.*foundationdb\.VC\.VC\.opendb$', r'.*iml$',
|
||||
|
||||
# Source files from someone else
|
||||
r'(^|.*/)Hash3\..*', r'(^|.*/)sqlite.*',
|
||||
r'bindings/go/godoc-resources/.*',
|
||||
r'bindings/go/src/fdb/tuple/testdata/tuples.golden',
|
||||
r'fdbcli/linenoise/.*',
|
||||
r'fdbrpc/rapidjson/.*', r'fdbrpc/rapidxml/.*', r'fdbrpc/zlib/.*', r'fdbrpc/sha1/.*',
|
||||
r'fdbrpc/xml2json.hpp$', r'fdbrpc/libcoroutine/.*', r'fdbrpc/libeio/.*', r'fdbrpc/lib64/.*',
|
||||
r'fdbrpc/generated-constants.cpp$',
|
||||
|
||||
# Miscellaneous
|
||||
r'bindings/nodejs/node_modules/.*', r'bindings/go/godoc/.*', r'.*trace.*xml$', r'.*log$', r'.*\.DS_Store$', r'simfdb/\.*', r'.*~$', r'.*.swp$'
|
||||
]))
|
||||
|
||||
SUSPECT_PHRASES = map(re.compile, [
|
||||
r'#define\s+FDB_API_VERSION\s+(\d+)',
|
||||
r'\.\s*selectApiVersion\s*\(\s*(\d+)\s*\)',
|
||||
r'\.\s*APIVersion\s*\(\s*(\d+)\s*\)',
|
||||
r'\.\s*MustAPIVersion\s*\(\s*(\d+)\s*\)',
|
||||
r'header_version\s+=\s+(\d+)',
|
||||
r'\.\s*apiVersion\s*\(\s*(\d+)\s*\)',
|
||||
r'API_VERSION\s*=\s*(\d+)',
|
||||
r'fdb_select_api_version\s*\((\d+)\)'
|
||||
])
|
||||
|
||||
DIM_CODE = '\033[2m'
|
||||
BOLD_CODE = '\033[1m'
|
||||
RED_COLOR = '\033[91m'
|
||||
GREEN_COLOR = '\033[92m'
|
||||
END_COLOR = '\033[0m'
|
||||
|
||||
|
||||
def positive_response(val):
|
||||
return val.lower() in {'y', 'yes'}
|
||||
|
||||
|
||||
# Returns: new line list + a dirty flag
|
||||
def rewrite_lines(lines, version_re, new_version, suspect_only=True, print_diffs=False, ask_confirm=False, grayscale=False):
|
||||
new_lines = []
|
||||
dirty = False
|
||||
new_str = str(new_version)
|
||||
regexes = SUSPECT_PHRASES if suspect_only else [version_re]
|
||||
group_index = 1 if suspect_only else 2
|
||||
for line_no, line in enumerate(lines):
|
||||
new_line = line
|
||||
offset = 0
|
||||
|
||||
for regex in regexes:
|
||||
for m in regex.finditer(line):
|
||||
# Replace suspect code with new version.
|
||||
start = m.start(group_index)
|
||||
end = m.end(group_index)
|
||||
new_line = new_line[:start + offset] + new_str + new_line[end + offset:]
|
||||
offset += len(new_str) - (end - start)
|
||||
|
||||
if (print_diffs or ask_confirm) and line != new_line:
|
||||
print('Rewrite:')
|
||||
print('\n'.join(map(lambda pair: ' {:4d}: {}'.format(line_no - 1 + pair[0], pair[1]), enumerate(lines[line_no - 2:line_no]))))
|
||||
print((DIM_CODE if grayscale else RED_COLOR) + '-{:4d}: {}'.format(line_no + 1, line) + END_COLOR)
|
||||
print((BOLD_CODE if grayscale else GREEN_COLOR) + '+{:4d}: {}'.format(line_no + 1, new_line) + END_COLOR)
|
||||
print('\n'.join(map(lambda pair: ' {:4d}: {}'.format(line_no + 2 + pair[0], pair[1]), enumerate(lines[line_no + 1:line_no + 3]))))
|
||||
|
||||
if ask_confirm:
|
||||
text = input('Looks good (y/n)? ')
|
||||
if not positive_response(text):
|
||||
print('Okay, skipping.')
|
||||
new_line = line
|
||||
|
||||
dirty = dirty or (new_line != line)
|
||||
new_lines.append(new_line)
|
||||
|
||||
return new_lines, dirty
|
||||
|
||||
|
||||
def address_file(base_path, file_path, version, new_version=None, suspect_only=False, show_diffs=False,
|
||||
rewrite=False, ask_confirm=True, grayscale=False, paths_only=False):
|
||||
if any(map(lambda x: x.match(file_path), EXCLUDED_FILES)):
|
||||
logging.debug('skipping file %s as matches excluded list', file_path)
|
||||
return True
|
||||
|
||||
# Look for all instances of the version number where it is not part of a larger number
|
||||
version_re = re.compile('(^|[^\\d])(' + str(version) + ')([^\\d]|$)')
|
||||
try:
|
||||
contents = open(os.path.join(base_path, file_path), 'r').read()
|
||||
lines = contents.split('\n')
|
||||
new_lines = lines
|
||||
dirty = False
|
||||
|
||||
if suspect_only:
|
||||
# Look for suspect lines (lines that attempt to set a version)
|
||||
found = False
|
||||
for line_no, line in enumerate(lines):
|
||||
for suspect_phrase in SUSPECT_PHRASES:
|
||||
for match in suspect_phrase.finditer(line):
|
||||
curr_version = int(match.groups()[0])
|
||||
if (new_version is None and curr_version < version) or (new_version is not None and curr_version < new_version):
|
||||
found = True
|
||||
logging.info('Old version: %s:%d:%s', file_path, line_no + 1, line)
|
||||
|
||||
if found and new_version is not None and (show_diffs or rewrite):
|
||||
new_lines, dirty = rewrite_lines(lines, version_re, new_version, True, print_diffs=True,
|
||||
ask_confirm=(rewrite and ask_confirm), grayscale=grayscale)
|
||||
|
||||
else:
|
||||
matching_lines = filter(lambda pair: version_re.search(pair[1]), enumerate(lines))
|
||||
|
||||
# Look for lines with the version
|
||||
if matching_lines:
|
||||
if paths_only:
|
||||
logging.info('File %s matches', file_path)
|
||||
else:
|
||||
for line_no, line in matching_lines:
|
||||
logging.info('Match: %s:%d:%s', file_path, line_no + 1, line)
|
||||
if new_version is not None and (show_diffs or rewrite):
|
||||
new_lines, dirty = rewrite_lines(lines, version_re, new_version, False, print_diffs=True,
|
||||
ask_confirm=(rewrite and ask_confirm), grayscale=grayscale)
|
||||
else:
|
||||
logging.debug('File %s does not match', file_path)
|
||||
|
||||
if dirty and rewrite:
|
||||
logging.info('Rewriting %s', os.path.join(base_path, file_path))
|
||||
with open(os.path.join(base_path, file_path), 'w') as fout:
|
||||
fout.write('\n'.join(new_lines))
|
||||
|
||||
return True
|
||||
except (OSError, UnicodeDecodeError) as e:
|
||||
logging.exception('Unable to read file %s due to OSError', os.path.join(base_path, file_path))
|
||||
return False
|
||||
|
||||
|
||||
def address_path(path, version, new_version=None, suspect_only=False, show_diffs=False, rewrite=False, ask_confirm=True, grayscale=False, paths_only=False):
|
||||
try:
|
||||
if os.path.exists(path):
|
||||
if os.path.isdir(path):
|
||||
status = True
|
||||
for dir_path, dir_names, file_names in os.walk(path):
|
||||
for file_name in file_names:
|
||||
file_path = os.path.relpath(os.path.join(dir_path, file_name), path)
|
||||
status = address_file(path, file_path, version, new_version, suspect_only, show_diffs,
|
||||
rewrite, ask_confirm, grayscale, paths_only) and status
|
||||
return status
|
||||
else:
|
||||
base_name, file_name = os.path.split(path)
|
||||
return address_file(base_name, file_name, version, new_version, suspect_only, show_diffs, rewrite, ask_confirm, grayscale)
|
||||
else:
|
||||
logging.error('Path %s does not exist', path)
|
||||
return False
|
||||
except OSError as e:
|
||||
logging.exception('Unable to find all API versions due to OSError')
|
||||
return False
|
||||
|
||||
|
||||
def run(arg_list):
|
||||
parser = argparse.ArgumentParser(description='finds and rewrites the API version in FDB source files')
|
||||
parser.add_argument('path', help='path to search for FDB source files')
|
||||
parser.add_argument('version', type=int, help='current/old version to search for')
|
||||
parser.add_argument('--new-version', type=int, default=None, help='new version to update to')
|
||||
parser.add_argument('--suspect-only', action='store_true', default=False, help='only look for phrases trying to set the API version')
|
||||
parser.add_argument('--show-diffs', action='store_true', default=False, help='show suggested diffs for fixing version')
|
||||
parser.add_argument('--rewrite', action='store_true', default=False, help='rewrite offending files')
|
||||
parser.add_argument('-y', '--skip-confirm', action='store_true', default=False, help='do not ask for confirmation before rewriting')
|
||||
parser.add_argument('--grayscale', action='store_true', default=False,
|
||||
help='print diffs using grayscale output instead of red and green')
|
||||
parser.add_argument('--paths-only', action='store_true', default=False, help='display only the path instead of the offending lines')
|
||||
args = parser.parse_args(arg_list)
|
||||
return address_path(args.path, args.version, args.new_version, args.suspect_only, args.show_diffs,
|
||||
args.rewrite, not args.skip_confirm, args.grayscale, args.paths_only)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
|
||||
if not run(sys.argv[1:]):
|
||||
exit(1)
|
|
@ -0,0 +1,134 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
#
|
||||
# grv_test.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import inspect
|
||||
import sys
|
||||
|
||||
import rate_model
|
||||
import workload_model
|
||||
import proxy_model
|
||||
import ratekeeper_model
|
||||
from priority import Priority
|
||||
from plot import Plotter
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-w', '--workload', type=str, help='Name of workload to run')
|
||||
parser.add_argument('-r', '--ratekeeper', type=str, help='Name of ratekeeper model')
|
||||
parser.add_argument('-d', '--duration', type=int, default=240, help='Duration of simulated test, in seconds. Defaults to 240.')
|
||||
parser.add_argument('-L', '--limiter', type=str, default='Original', help='Name of limiter implementation. Defaults to \'Original\'.')
|
||||
parser.add_argument('-p', '--proxy', type=str, default='ProxyModel', help='Name of proxy implementation. Defaults to \'ProxyModel\'.')
|
||||
parser.add_argument('--list', action='store_true', default=False, help='List options for all models.')
|
||||
parser.add_argument('--no-graph', action='store_true', default=False, help='Disable graphical output.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
def print_choices_list(context=None):
|
||||
if context == 'workload' or context is None:
|
||||
print('Workloads:')
|
||||
for w in workload_model.predefined_workloads.keys():
|
||||
print(' %s' % w)
|
||||
|
||||
if context == 'ratekeeper' or context is None:
|
||||
print('\nRatekeeper models:')
|
||||
for r in ratekeeper_model.predefined_ratekeeper.keys():
|
||||
print(' %s' % r)
|
||||
|
||||
proxy_model_classes = [c for c in [getattr(proxy_model, a) for a in dir(proxy_model)] if inspect.isclass(c)]
|
||||
|
||||
if context == 'proxy' or context is None:
|
||||
print('\nProxy models:')
|
||||
for p in proxy_model_classes:
|
||||
if issubclass(p, proxy_model.ProxyModel):
|
||||
print(' %s' % p.__name__)
|
||||
|
||||
if context == 'limiter' or context is None:
|
||||
print('\nProxy limiters:')
|
||||
for p in proxy_model_classes:
|
||||
if issubclass(p, proxy_model.Limiter) and p != proxy_model.Limiter:
|
||||
name = p.__name__
|
||||
if name.endswith('Limiter'):
|
||||
name = name[0:-len('Limiter')]
|
||||
print(' %s' % name)
|
||||
|
||||
if args.workload is None or args.ratekeeper is None:
|
||||
print('ERROR: A workload (-w/--workload) and ratekeeper model (-r/--ratekeeper) must be specified.\n')
|
||||
print_choices_list()
|
||||
sys.exit(1)
|
||||
|
||||
if args.list:
|
||||
print_choices_list()
|
||||
sys.exit(0)
|
||||
|
||||
def validate_class_type(var, name, superclass):
|
||||
cls = getattr(var, name, None)
|
||||
return cls is not None and inspect.isclass(cls) and issubclass(cls, superclass)
|
||||
|
||||
if not args.ratekeeper in ratekeeper_model.predefined_ratekeeper:
|
||||
print('Invalid ratekeeper model `%s\'' % args.ratekeeper)
|
||||
print_choices_list('ratekeeper')
|
||||
sys.exit(1)
|
||||
|
||||
if not args.workload in workload_model.predefined_workloads:
|
||||
print('Invalid workload model `%s\'' % args.workload)
|
||||
print_choices_list('workload')
|
||||
sys.exit(1)
|
||||
|
||||
if not validate_class_type(proxy_model, args.proxy, proxy_model.ProxyModel):
|
||||
print('Invalid proxy model `%s\'' % args.proxy)
|
||||
print_choices_list('proxy')
|
||||
sys.exit(1)
|
||||
|
||||
limiter_name = args.limiter
|
||||
if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter):
|
||||
limiter_name += 'Limiter'
|
||||
if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter):
|
||||
print('Invalid proxy limiter `%s\'' % args.limiter)
|
||||
print_choices_list('limiter')
|
||||
sys.exit(1)
|
||||
|
||||
ratekeeper = ratekeeper_model.predefined_ratekeeper[args.ratekeeper]
|
||||
workload = workload_model.predefined_workloads[args.workload]
|
||||
|
||||
limiter = getattr(proxy_model, limiter_name)
|
||||
proxy = getattr(proxy_model, args.proxy)(args.duration, ratekeeper, workload, limiter)
|
||||
|
||||
proxy.run()
|
||||
|
||||
for priority in workload.priorities():
|
||||
latencies = sorted([p for t in proxy.results.latencies[priority].values() for p in t])
|
||||
total_started = sum(proxy.results.started[priority].values())
|
||||
still_queued = sum([r.count for r in proxy.request_queue if r.priority == priority])
|
||||
|
||||
if len(latencies) > 0:
|
||||
print('\n%s: %d requests in %d seconds (rate=%f). %d still queued.' % (priority, total_started, proxy.time, float(total_started)/proxy.time, still_queued))
|
||||
print(' Median latency: %f' % latencies[len(latencies)//2])
|
||||
print(' 90%% latency: %f' % latencies[int(0.9*len(latencies))])
|
||||
print(' 99%% latency: %f' % latencies[int(0.99*len(latencies))])
|
||||
print(' 99.9%% latency: %f' % latencies[int(0.999*len(latencies))])
|
||||
print(' Max latency: %f' % latencies[-1])
|
||||
|
||||
print('')
|
||||
|
||||
if not args.no_graph:
|
||||
plotter = Plotter(proxy.results)
|
||||
plotter.display()
|
|
@ -0,0 +1,107 @@
|
|||
#
|
||||
# plot.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
class Plotter:
|
||||
def __init__(self, results):
|
||||
self.results = results
|
||||
|
||||
def add_plot(data, time_resolution, label, use_avg=False):
|
||||
out_data = {}
|
||||
counts = {}
|
||||
for t in data.keys():
|
||||
out_data.setdefault(t//time_resolution*time_resolution, 0)
|
||||
counts.setdefault(t//time_resolution*time_resolution, 0)
|
||||
out_data[t//time_resolution*time_resolution] += data[t]
|
||||
counts[t//time_resolution*time_resolution] += 1
|
||||
|
||||
if use_avg:
|
||||
out_data = { t: v/counts[t] for t,v in out_data.items() }
|
||||
|
||||
plt.plot(list(out_data.keys()), list(out_data.values()), label=label)
|
||||
|
||||
def add_plot_with_times(data, label):
|
||||
plt.plot(list(data.keys()), list(data.values()), label=label)
|
||||
|
||||
def display(self, time_resolution=0.1):
|
||||
plt.figure(figsize=(40,9))
|
||||
plt.subplot(3, 3, 1)
|
||||
for priority in self.results.started.keys():
|
||||
Plotter.add_plot(self.results.started[priority], time_resolution, priority)
|
||||
|
||||
plt.xlabel('Time (s)')
|
||||
plt.ylabel('Released/s')
|
||||
plt.legend()
|
||||
|
||||
plt.subplot(3, 3, 2)
|
||||
for priority in self.results.queued.keys():
|
||||
Plotter.add_plot(self.results.queued[priority], time_resolution, priority)
|
||||
|
||||
plt.xlabel('Time (s)')
|
||||
plt.ylabel('Requests/s')
|
||||
plt.legend()
|
||||
|
||||
plt.subplot(3, 3, 3)
|
||||
for priority in self.results.unprocessed_queue_sizes.keys():
|
||||
data = {k: max(v) for (k,v) in self.results.unprocessed_queue_sizes[priority].items()}
|
||||
Plotter.add_plot(data, time_resolution, priority)
|
||||
|
||||
plt.xlabel('Time (s)')
|
||||
plt.ylabel('Max queue size')
|
||||
plt.legend()
|
||||
|
||||
num = 4
|
||||
for priority in self.results.latencies.keys():
|
||||
plt.subplot(3, 3, num)
|
||||
median_latencies = {k: v[int(0.5*len(v))] if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()}
|
||||
percentile90_latencies = {k: v[int(0.9*len(v))] if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()}
|
||||
max_latencies = {k: max(v) if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()}
|
||||
|
||||
Plotter.add_plot(median_latencies, time_resolution, 'median')
|
||||
Plotter.add_plot(percentile90_latencies, time_resolution, '90th percentile')
|
||||
Plotter.add_plot(max_latencies, time_resolution, 'max')
|
||||
|
||||
plt.xlabel('Time (s)')
|
||||
plt.ylabel(str(priority) + ' Latency (s)')
|
||||
plt.yscale('log')
|
||||
plt.legend()
|
||||
num += 1
|
||||
|
||||
for priority in self.results.rate.keys():
|
||||
plt.subplot(3, 3, num)
|
||||
if len(self.results.rate[priority]) > 0:
|
||||
Plotter.add_plot(self.results.rate[priority], time_resolution, 'Rate', use_avg=True)
|
||||
if len(self.results.released[priority]) > 0:
|
||||
Plotter.add_plot(self.results.released[priority], time_resolution, 'Released', use_avg=True)
|
||||
if len(self.results.limit[priority]) > 0:
|
||||
Plotter.add_plot(self.results.limit[priority], time_resolution, 'Limit', use_avg=True)
|
||||
if len(self.results.limit_and_budget[priority]) > 0:
|
||||
Plotter.add_plot(self.results.limit_and_budget[priority], time_resolution, 'Limit and budget', use_avg=True)
|
||||
if len(self.results.budget[priority]) > 0:
|
||||
Plotter.add_plot(self.results.budget[priority], time_resolution, 'Budget', use_avg=True)
|
||||
|
||||
plt.xlabel('Time (s)')
|
||||
plt.ylabel('Value (' + str(priority) + ')')
|
||||
plt.legend()
|
||||
num += 1
|
||||
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
#
|
||||
# priority.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import functools
|
||||
|
||||
@functools.total_ordering
|
||||
class Priority:
|
||||
def __init__(self, priority_value, label):
|
||||
self.priority_value = priority_value
|
||||
self.label = label
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.priority_value < other.priority_value
|
||||
|
||||
def __str__(self):
|
||||
return self.label
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.label)
|
||||
|
||||
Priority.SYSTEM = Priority(0, "System")
|
||||
Priority.DEFAULT = Priority(1, "Default")
|
||||
Priority.BATCH = Priority(2, "Batch")
|
|
@ -0,0 +1,338 @@
|
|||
#
|
||||
# proxy_model.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import heapq
|
||||
|
||||
from priority import Priority
|
||||
from smoother import Smoother
|
||||
|
||||
@functools.total_ordering
|
||||
class Task:
|
||||
def __init__(self, time, fxn):
|
||||
self.time = time
|
||||
self.fxn = fxn
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.time < other.time
|
||||
|
||||
class Limiter:
|
||||
class UpdateRateParams:
|
||||
def __init__(self, time):
|
||||
self.time = time
|
||||
|
||||
class UpdateLimitParams:
|
||||
def __init__(self, time, elapsed):
|
||||
self.time = time
|
||||
self.elapsed = elapsed
|
||||
|
||||
class CanStartParams:
|
||||
def __init__(self, time, num_started, count):
|
||||
self.time = time
|
||||
self.num_started = num_started
|
||||
self.count = count
|
||||
|
||||
class UpdateBudgetParams:
|
||||
def __init__(self, time, num_started, num_started_at_priority, min_priority, last_batch, queue_empty, elapsed):
|
||||
self.time = time
|
||||
self.num_started = num_started
|
||||
self.num_started_at_priority = num_started_at_priority
|
||||
self.min_priority = min_priority
|
||||
self.last_batch = last_batch
|
||||
self.queue_empty = queue_empty
|
||||
self.elapsed = elapsed
|
||||
|
||||
def __init__(self, priority, ratekeeper_model, proxy_model):
|
||||
self.priority = priority
|
||||
self.ratekeeper_model = ratekeeper_model
|
||||
self.proxy_model = proxy_model
|
||||
self.limit = 0
|
||||
self.rate = self.ratekeeper_model.get_limit(0, self.priority)
|
||||
|
||||
def update_rate(self, params):
|
||||
pass
|
||||
|
||||
def update_limit(self, params):
|
||||
pass
|
||||
|
||||
def can_start(self, params):
|
||||
pass
|
||||
|
||||
def update_budget(self, params):
|
||||
pass
|
||||
|
||||
class OriginalLimiter(Limiter):
|
||||
def __init__(self, priority, limit_rate_model, proxy_model):
|
||||
Limiter.__init__(self, priority, limit_rate_model, proxy_model)
|
||||
|
||||
def update_rate(self, params):
|
||||
self.rate = self.ratekeeper_model.get_limit(params.time, self.priority)
|
||||
|
||||
def update_limit(self, params):
|
||||
self.limit = min(0, self.limit) + params.elapsed * self.rate
|
||||
self.limit = min(self.limit, self.rate * 0.01)
|
||||
self.limit = min(self.limit, 100000)
|
||||
|
||||
self.proxy_model.results.rate[self.priority][params.time] = self.rate
|
||||
self.proxy_model.results.limit[self.priority][params.time] = self.limit
|
||||
|
||||
def can_start(self, params):
|
||||
return params.num_started < self.limit
|
||||
|
||||
def update_budget(self, params):
|
||||
self.limit -= params.num_started
|
||||
|
||||
class PositiveBudgetLimiter(OriginalLimiter):
|
||||
def __init__(self, priority, limit_rate_model, proxy_model):
|
||||
OriginalLimiter.__init__(self, priority, limit_rate_model, proxy_model)
|
||||
|
||||
def update_limit(self, params):
|
||||
self.limit += params.elapsed * self.rate
|
||||
self.limit = min(self.limit, 2.0 * self.rate)
|
||||
|
||||
class ClampedBudgetLimiter(PositiveBudgetLimiter):
|
||||
def __init__(self, priority, limit_rate_model, proxy_model):
|
||||
PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model)
|
||||
|
||||
def update_budget(self, params):
|
||||
min_budget = -self.rate * 5.0
|
||||
if self.limit > min_budget:
|
||||
self.limit = max(self.limit - params.num_started, min_budget)
|
||||
|
||||
class TimeLimiter(PositiveBudgetLimiter):
|
||||
def __init__(self, priority, limit_rate_model, proxy_model):
|
||||
PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model)
|
||||
self.locked_until = 0
|
||||
|
||||
def can_start(self, params):
|
||||
return params.time >= self.locked_until and PositiveBudgetLimiter.can_start(self, params)
|
||||
|
||||
def update_budget(self, params):
|
||||
#print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch))
|
||||
|
||||
if params.min_priority >= self.priority or params.num_started < self.limit:
|
||||
self.limit -= params.num_started
|
||||
else:
|
||||
self.limit = min(self.limit, max(self.limit - params.num_started, -params.last_batch))
|
||||
self.locked_until = min(params.time + 2.0, max(params.time, self.locked_until) + (params.num_started - self.limit)/self.rate)
|
||||
|
||||
#print('End update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority))
|
||||
|
||||
class TimePositiveBudgetLimiter(PositiveBudgetLimiter):
|
||||
def __init__(self, priority, limit_rate_model, proxy_model):
|
||||
PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model)
|
||||
self.locked_until = 0
|
||||
|
||||
def update_limit(self, params):
|
||||
if params.time >= self.locked_until:
|
||||
PositiveBudgetLimiter.update_limit(self, params)
|
||||
|
||||
def can_start(self, params):
|
||||
return params.num_started + params.count <= self.limit
|
||||
|
||||
def update_budget(self, params):
|
||||
#if params.num_started > 0:
|
||||
#print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch))
|
||||
|
||||
if params.num_started > self.limit:
|
||||
self.locked_until = min(params.time + 2.0, max(params.time, self.locked_until) + penalty/self.rate)
|
||||
self.limit = 0
|
||||
else:
|
||||
self.limit -= params.num_started
|
||||
|
||||
#if params.num_started > 0:
|
||||
#print('End update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority))
|
||||
|
||||
class SmoothingLimiter(OriginalLimiter):
|
||||
def __init__(self, priority, limit_rate_model, proxy_model):
|
||||
OriginalLimiter.__init__(self, priority, limit_rate_model, proxy_model)
|
||||
self.smooth_released = Smoother(2)
|
||||
self.smooth_rate_limit = Smoother(2)
|
||||
self.rate_set = False
|
||||
|
||||
def update_rate(self, params):
|
||||
OriginalLimiter.update_rate(self, params)
|
||||
if not self.rate_set:
|
||||
self.rate_set = True
|
||||
self.smooth_rate_limit.reset(self.rate)
|
||||
else:
|
||||
self.smooth_rate_limit.set_total(params.time, self.rate)
|
||||
|
||||
def update_limit(self, params):
|
||||
self.limit = 2.0 * (self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time))
|
||||
|
||||
def can_start(self, params):
|
||||
return params.num_started + params.count <= self.limit
|
||||
|
||||
def update_budget(self, params):
|
||||
self.smooth_released.add_delta(params.time, params.num_started)
|
||||
|
||||
class SmoothingBudgetLimiter(SmoothingLimiter):
|
||||
def __init__(self, priority, limit_rate_model, proxy_model):
|
||||
SmoothingLimiter.__init__(self, priority, limit_rate_model, proxy_model)
|
||||
#self.smooth_filled = Smoother(2)
|
||||
self.budget = 0
|
||||
|
||||
def update_limit(self, params):
|
||||
release_rate = (self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time))
|
||||
#self.smooth_filled.set_total(params.time, 1 if release_rate > 0 else 0)
|
||||
self.limit = 2.0 * release_rate
|
||||
|
||||
self.proxy_model.results.rate[self.priority][params.time] = self.smooth_rate_limit.smooth_total(params.time)
|
||||
self.proxy_model.results.released[self.priority][params.time] = self.smooth_released.smooth_rate(params.time)
|
||||
self.proxy_model.results.limit[self.priority][params.time] = self.limit
|
||||
self.proxy_model.results.limit_and_budget[self.priority][params.time] = self.limit + self.budget
|
||||
self.proxy_model.results.budget[self.priority][params.time] = self.budget
|
||||
|
||||
#self.budget = max(0, self.budget + params.elapsed * self.smooth_rate_limit.smooth_total(params.time))
|
||||
|
||||
#if self.smooth_filled.smooth_total(params.time) >= 0.1:
|
||||
#self.budget += params.elapsed * self.smooth_rate_limit.smooth_total(params.time)
|
||||
|
||||
#print('Update limit: time=%f, priority=%s, limit=%f, rate=%f, released=%f, budget=%f' % (params.time, self.priority, self.limit, self.smooth_rate_limit.smooth_total(params.time), self.smooth_released.smooth_rate(params.time), self.budget))
|
||||
|
||||
def can_start(self, params):
|
||||
return params.num_started + params.count <= self.limit + self.budget #or params.num_started + params.count <= self.budget
|
||||
|
||||
def update_budget(self, params):
|
||||
self.budget = max(0, self.budget + (self.limit - params.num_started_at_priority) / 2 * params.elapsed)
|
||||
|
||||
if params.queue_empty:
|
||||
self.budget = min(10, self.budget)
|
||||
|
||||
self.smooth_released.add_delta(params.time, params.num_started_at_priority)
|
||||
|
||||
class ProxyModel:
|
||||
class Results:
|
||||
def __init__(self, priorities, duration):
|
||||
self.started = self.init_result(priorities, 0, duration)
|
||||
self.queued = self.init_result(priorities, 0, duration)
|
||||
self.latencies = self.init_result(priorities, [], duration)
|
||||
self.unprocessed_queue_sizes = self.init_result(priorities, [], duration)
|
||||
|
||||
self.rate = {p:{} for p in priorities}
|
||||
self.released = {p:{} for p in priorities}
|
||||
self.limit = {p:{} for p in priorities}
|
||||
self.limit_and_budget = {p:{} for p in priorities}
|
||||
self.budget = {p:{} for p in priorities}
|
||||
|
||||
def init_result(self, priorities, starting_value, duration):
|
||||
return {p: {s: copy.copy(starting_value) for s in range(0, duration)} for p in priorities}
|
||||
|
||||
def __init__(self, duration, ratekeeper_model, workload_model, Limiter):
|
||||
self.time = 0
|
||||
self.log_time = 0
|
||||
self.duration = duration
|
||||
self.priority_limiters = { priority: Limiter(priority, ratekeeper_model, self) for priority in workload_model.priorities() }
|
||||
self.workload_model = workload_model
|
||||
self.request_scheduled = { p: False for p in self.workload_model.priorities()}
|
||||
|
||||
self.tasks = []
|
||||
self.request_queue = []
|
||||
self.results = ProxyModel.Results(self.workload_model.priorities(), duration)
|
||||
|
||||
def run(self):
|
||||
self.update_rate()
|
||||
self.process_requests(self.time)
|
||||
|
||||
for priority in self.workload_model.priorities():
|
||||
next_request = self.workload_model.next_request(self.time, priority)
|
||||
assert next_request is not None
|
||||
heapq.heappush(self.tasks, Task(next_request.time, lambda next_request=next_request: self.receive_request(next_request)))
|
||||
self.request_scheduled[priority] = True
|
||||
|
||||
while True:# or len(self.request_queue) > 0:
|
||||
if int(self.time) > self.log_time:
|
||||
self.log_time = int(self.time)
|
||||
#print(self.log_time)
|
||||
|
||||
task = heapq.heappop(self.tasks)
|
||||
self.time = task.time
|
||||
if self.time >= self.duration:
|
||||
break
|
||||
|
||||
task.fxn()
|
||||
|
||||
def update_rate(self):
|
||||
for limiter in self.priority_limiters.values():
|
||||
limiter.update_rate(Limiter.UpdateRateParams(self.time))
|
||||
|
||||
heapq.heappush(self.tasks, Task(self.time + 0.01, lambda: self.update_rate()))
|
||||
|
||||
def receive_request(self, request):
|
||||
heapq.heappush(self.request_queue, request)
|
||||
|
||||
self.results.queued[request.priority][int(self.time)] += request.count
|
||||
|
||||
next_request = self.workload_model.next_request(self.time, request.priority)
|
||||
if next_request is not None and next_request.time < self.duration:
|
||||
heapq.heappush(self.tasks, Task(next_request.time, lambda: self.receive_request(next_request)))
|
||||
else:
|
||||
self.request_scheduled[request.priority] = False
|
||||
|
||||
def process_requests(self, last_time):
|
||||
elapsed = self.time - last_time
|
||||
for limiter in self.priority_limiters.values():
|
||||
limiter.update_limit(Limiter.UpdateLimitParams(self.time, elapsed))
|
||||
|
||||
current_started = 0
|
||||
started = {p:0 for p in self.workload_model.priorities()}
|
||||
|
||||
min_priority = Priority.SYSTEM
|
||||
last_batch = 0
|
||||
while len(self.request_queue) > 0:
|
||||
request = self.request_queue[0]
|
||||
|
||||
if not self.priority_limiters[request.priority].can_start(Limiter.CanStartParams(self.time, current_started, request.count)):
|
||||
break
|
||||
|
||||
min_priority = request.priority
|
||||
last_batch = request.count
|
||||
|
||||
if self.workload_model.request_completed(request) and not self.request_scheduled[request.priority]:
|
||||
next_request = self.workload_model.next_request(self.time, request.priority)
|
||||
assert next_request is not None
|
||||
heapq.heappush(self.tasks, Task(next_request.time, lambda next_request=next_request: self.receive_request(next_request)))
|
||||
self.request_scheduled[request.priority] = True
|
||||
|
||||
current_started += request.count
|
||||
started[request.priority] += request.count
|
||||
|
||||
heapq.heappop(self.request_queue)
|
||||
self.results.started[request.priority][int(self.time)] += request.count
|
||||
self.results.latencies[request.priority][int(self.time)].append(self.time-request.time)
|
||||
|
||||
if len(self.request_queue) == 0:
|
||||
min_priority = Priority.BATCH
|
||||
|
||||
for priority, limiter in self.priority_limiters.items():
|
||||
started_at_priority = sum([v for p,v in started.items() if p <= priority])
|
||||
limiter.update_budget(Limiter.UpdateBudgetParams(self.time, current_started, started_at_priority, min_priority, last_batch, len(self.request_queue) == 0 or self.request_queue[0].priority > priority, elapsed))
|
||||
|
||||
for priority in self.workload_model.priorities():
|
||||
self.results.unprocessed_queue_sizes[priority][int(self.time)].append(self.workload_model.workload_models[priority].outstanding)
|
||||
|
||||
current_time = self.time
|
||||
|
||||
delay = 0.001
|
||||
heapq.heappush(self.tasks, Task(self.time + delay, lambda: self.process_requests(current_time)))
|
||||
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
#
|
||||
# rate_model.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import numpy
|
||||
|
||||
class RateModel:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_rate(self, time):
|
||||
pass
|
||||
|
||||
class FixedRateModel(RateModel):
|
||||
def __init__(self, rate):
|
||||
RateModel.__init__(self)
|
||||
self.rate = rate
|
||||
|
||||
def get_rate(self, time):
|
||||
return self.rate
|
||||
|
||||
class UnlimitedRateModel(FixedRateModel):
|
||||
def __init__(self):
|
||||
self.rate = 1e9
|
||||
|
||||
class IntervalRateModel(RateModel):
|
||||
def __init__(self, intervals):
|
||||
self.intervals = sorted(intervals)
|
||||
|
||||
def get_rate(self, time):
|
||||
if len(self.intervals) == 0 or time < self.intervals[0][0]:
|
||||
return 0
|
||||
|
||||
target_interval = len(self.intervals)-1
|
||||
for i in range(1, len(self.intervals)):
|
||||
if time < self.intervals[i][0]:
|
||||
target_interval = i-1
|
||||
break
|
||||
|
||||
self.intervals = self.intervals[target_interval:]
|
||||
return self.intervals[0][1]
|
||||
|
||||
class SawtoothRateModel(RateModel):
|
||||
def __init__(self, low, high, frequency):
|
||||
self.low = low
|
||||
self.high = high
|
||||
self.frequency = frequency
|
||||
|
||||
def get_rate(self, time):
|
||||
if int(2*time/self.frequency) % 2 == 0:
|
||||
return self.low
|
||||
else:
|
||||
return self.high
|
||||
|
||||
class DistributionRateModel(RateModel):
|
||||
def __init__(self, distribution, frequency):
|
||||
self.distribution = distribution
|
||||
self.frequency = frequency
|
||||
self.last_change = 0
|
||||
self.rate = None
|
||||
|
||||
def get_rate(self, time):
|
||||
if self.frequency == 0 or int((time - self.last_change) / self.frequency) > int(self.last_change / self.frequency) or self.rate is None:
|
||||
self.last_change = time
|
||||
self.rate = self.distribution()
|
||||
|
||||
return self.rate
|
|
@ -0,0 +1,67 @@
|
|||
#
|
||||
# ratekeeper.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import numpy
|
||||
import rate_model
|
||||
from priority import Priority
|
||||
|
||||
class RatekeeperModel:
|
||||
def __init__(self, limit_models):
|
||||
self.limit_models = limit_models
|
||||
|
||||
def get_limit(self, time, priority):
|
||||
return self.limit_models[priority].get_rate(time)
|
||||
|
||||
predefined_ratekeeper = {}
|
||||
|
||||
predefined_ratekeeper['default200_batch100'] = RatekeeperModel(
|
||||
{
|
||||
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
|
||||
Priority.DEFAULT: rate_model.FixedRateModel(200),
|
||||
Priority.BATCH: rate_model.FixedRateModel(100)
|
||||
})
|
||||
|
||||
predefined_ratekeeper['default_sawtooth'] = RatekeeperModel(
|
||||
{
|
||||
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
|
||||
Priority.DEFAULT: rate_model.SawtoothRateModel(10, 200, 1),
|
||||
Priority.BATCH: rate_model.FixedRateModel(0)
|
||||
})
|
||||
|
||||
predefined_ratekeeper['default_uniform_random'] = RatekeeperModel(
|
||||
{
|
||||
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
|
||||
Priority.DEFAULT: rate_model.DistributionRateModel(lambda: numpy.random.uniform(10, 200), 1),
|
||||
Priority.BATCH: rate_model.FixedRateModel(0)
|
||||
})
|
||||
|
||||
predefined_ratekeeper['default_trickle'] = RatekeeperModel(
|
||||
{
|
||||
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
|
||||
Priority.DEFAULT: rate_model.FixedRateModel(3),
|
||||
Priority.BATCH: rate_model.FixedRateModel(0)
|
||||
})
|
||||
|
||||
predefined_ratekeeper['default1000'] = RatekeeperModel(
|
||||
{
|
||||
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
|
||||
Priority.DEFAULT: rate_model.FixedRateModel(1000),
|
||||
Priority.BATCH: rate_model.FixedRateModel(500)
|
||||
})
|
|
@ -0,0 +1,53 @@
|
|||
#
|
||||
# smoother.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import math
|
||||
|
||||
class Smoother:
|
||||
def __init__(self, folding_time):
|
||||
self.folding_time = folding_time
|
||||
self.reset(0)
|
||||
|
||||
def reset(self, value):
|
||||
self.time = 0
|
||||
self.total = value
|
||||
self.estimate = value
|
||||
|
||||
def set_total(self, time, total):
|
||||
self.add_delta(time, total-self.total)
|
||||
|
||||
def add_delta(self, time, delta):
|
||||
self.update(time)
|
||||
self.total += delta
|
||||
|
||||
def smooth_total(self, time):
|
||||
self.update(time)
|
||||
return self.estimate
|
||||
|
||||
def smooth_rate(self, time):
|
||||
self.update(time)
|
||||
return (self.total-self.estimate) / self.folding_time
|
||||
|
||||
def update(self, time):
|
||||
elapsed = time - self.time
|
||||
if elapsed > 0:
|
||||
self.time = time
|
||||
self.estimate += (self.total-self.estimate) * (1-math.exp(-elapsed/self.folding_time))
|
||||
|
|
@ -0,0 +1,201 @@
|
|||
#
|
||||
# workload_model.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import functools
|
||||
import numpy
|
||||
import math
|
||||
|
||||
import rate_model
|
||||
from priority import Priority
|
||||
|
||||
@functools.total_ordering
|
||||
class Request:
|
||||
def __init__(self, time, count, priority):
|
||||
self.time = time
|
||||
self.count = count
|
||||
self.priority = priority
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.priority < other.priority
|
||||
|
||||
class PriorityWorkloadModel:
|
||||
def __init__(self, priority, rate_model, batch_model, generator, max_outstanding=1e9):
|
||||
self.priority = priority
|
||||
self.rate_model = rate_model
|
||||
self.batch_model = batch_model
|
||||
self.generator = generator
|
||||
self.max_outstanding = max_outstanding
|
||||
self.outstanding = 0
|
||||
|
||||
def next_request(self, time):
|
||||
if self.outstanding >= self.max_outstanding:
|
||||
return None
|
||||
|
||||
batch_size = self.batch_model.next_batch()
|
||||
self.outstanding += batch_size
|
||||
interval = self.generator.next_request_interval(self.rate_model.get_rate(time))
|
||||
return Request(time + interval, batch_size, self.priority)
|
||||
|
||||
def request_completed(self, request):
|
||||
was_full = self.max_outstanding <= self.outstanding
|
||||
self.outstanding -= request.count
|
||||
|
||||
return was_full and self.outstanding < self.max_outstanding
|
||||
|
||||
class WorkloadModel:
|
||||
def __init__(self, workload_models):
|
||||
self.workload_models = workload_models
|
||||
|
||||
def priorities(self):
|
||||
return list(self.workload_models.keys())
|
||||
|
||||
def next_request(self, time, priority):
|
||||
return self.workload_models[priority].next_request(time)
|
||||
|
||||
def request_completed(self, request):
|
||||
return self.workload_models[request.priority].request_completed(request)
|
||||
|
||||
class Distribution:
|
||||
EXPONENTIAL = lambda x: numpy.random.exponential(x)
|
||||
UNIFORM = lambda x: numpy.random.uniform(0, 2.0*x)
|
||||
FIXED = lambda x: x
|
||||
|
||||
class BatchGenerator:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def next_batch(self):
|
||||
pass
|
||||
|
||||
class DistributionBatchGenerator(BatchGenerator):
|
||||
def __init__(self, distribution, size):
|
||||
BatchGenerator.__init__(self)
|
||||
self.distribution = distribution
|
||||
self.size = size
|
||||
|
||||
def next_batch(self):
|
||||
return math.ceil(self.distribution(self.size))
|
||||
|
||||
class RequestGenerator:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def next_request_interval(self, rate):
|
||||
pass
|
||||
|
||||
class DistributionRequestGenerator(RequestGenerator):
|
||||
def __init__(self, distribution):
|
||||
RequestGenerator.__init__(self)
|
||||
self.distribution = distribution
|
||||
|
||||
def next_request_interval(self, rate):
|
||||
if rate == 0:
|
||||
return 1e9
|
||||
|
||||
return self.distribution(1.0/rate)
|
||||
|
||||
predefined_workloads = {}
|
||||
|
||||
predefined_workloads['slow_exponential'] = WorkloadModel(
|
||||
{
|
||||
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
|
||||
rate_model.FixedRateModel(100),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.EXPONENTIAL),
|
||||
max_outstanding=100
|
||||
)
|
||||
})
|
||||
|
||||
predefined_workloads['fixed_uniform'] = WorkloadModel(
|
||||
{
|
||||
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
|
||||
rate_model.FixedRateModel(0),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=10
|
||||
),
|
||||
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
|
||||
rate_model.FixedRateModel(95),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 10),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=200
|
||||
),
|
||||
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
|
||||
rate_model.FixedRateModel(1),
|
||||
DistributionBatchGenerator(Distribution.UNIFORM, 500),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=200
|
||||
)
|
||||
})
|
||||
|
||||
predefined_workloads['batch_starvation'] = WorkloadModel(
|
||||
{
|
||||
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
|
||||
rate_model.FixedRateModel(1),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=10
|
||||
),
|
||||
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
|
||||
rate_model.IntervalRateModel([(0,50), (60,150), (120,90)]),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=200
|
||||
),
|
||||
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
|
||||
rate_model.FixedRateModel(100),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=200
|
||||
)
|
||||
})
|
||||
|
||||
predefined_workloads['default_low_high_low'] = WorkloadModel(
|
||||
{
|
||||
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
|
||||
rate_model.FixedRateModel(0),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=10
|
||||
),
|
||||
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
|
||||
rate_model.IntervalRateModel([(0,100), (60,300), (120,100)]),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=200
|
||||
),
|
||||
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
|
||||
rate_model.FixedRateModel(0),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.UNIFORM),
|
||||
max_outstanding=200
|
||||
)
|
||||
})
|
||||
|
||||
for rate in [83, 100, 180, 190, 200]:
|
||||
predefined_workloads['default%d' % rate] = WorkloadModel(
|
||||
{
|
||||
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
|
||||
rate_model.FixedRateModel(rate),
|
||||
DistributionBatchGenerator(Distribution.FIXED, 1),
|
||||
DistributionRequestGenerator(Distribution.EXPONENTIAL),
|
||||
max_outstanding=1000
|
||||
)
|
||||
})
|
|
@ -20,7 +20,7 @@ Consequently, the special-key-space framework wants to integrate all client func
|
|||
If your feature is exposing information to clients and the results are easily formatted as key-value pairs, then you can use special-key-space to implement your client function.
|
||||
|
||||
## How
|
||||
If you choose to use, you need to implement a function class that inherits from `SpecialKeyRangeReadImpl`, which has an abstract method `Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr)`.
|
||||
If you choose to use, you need to implement a function class that inherits from `SpecialKeyRangeReadImpl`, which has an abstract method `Future<RangeResult> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr)`.
|
||||
This method can be treated as a callback, whose implementation details are determined by the developer.
|
||||
Once you fill out the method, register the function class to the corresponding key range.
|
||||
Below is a detailed example.
|
||||
|
@ -38,10 +38,10 @@ public:
|
|||
CountryToCapitalCity[LiteralStringRef("China")] = LiteralStringRef("Beijing");
|
||||
}
|
||||
// Implement the getRange interface
|
||||
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw,
|
||||
Future<RangeResult> getRange(ReadYourWritesTransaction* ryw,
|
||||
KeyRangeRef kr) const override {
|
||||
|
||||
Standalone<RangeResultRef> result;
|
||||
RangeResult result;
|
||||
for (auto const& country : CountryToCapitalCity) {
|
||||
// the registered range here: [\xff\xff/example/, \xff\xff/example/\xff]
|
||||
Key keyWithPrefix = country.first.withPrefix(range.begin);
|
||||
|
@ -71,7 +71,7 @@ ASSERT(res1.present() && res.getValue() == LiteralStringRef("Tokyo"));
|
|||
// getRange
|
||||
// Note: for getRange(key1, key2), both key1 and key2 should prefixed with \xff\xff
|
||||
// something like getRange("normal_key", "\xff\xff/...") is not supported yet
|
||||
Standalone<RangeResultRef> res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff")));
|
||||
RangeResult res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff")));
|
||||
// res2 should contain USA and UK
|
||||
ASSERT(
|
||||
res2.size() == 2 &&
|
||||
|
|
|
@ -49,7 +49,7 @@ master_doc = 'index'
|
|||
|
||||
# General information about the project.
|
||||
project = u'FoundationDB'
|
||||
copyright = u'2013-2018 Apple, Inc and the FoundationDB project authors'
|
||||
copyright = u'2013-2021 Apple, Inc and the FoundationDB project authors'
|
||||
|
||||
# Load the version information from 'versions.target'
|
||||
import xml.etree.ElementTree as ET
|
||||
|
|
|
@ -799,3 +799,18 @@ Upgrading from Older Versions
|
|||
-----------------------------
|
||||
|
||||
Upgrades from versions older than 5.0.0 are no longer supported.
|
||||
|
||||
Version-specific notes on downgrading
|
||||
=====================================
|
||||
|
||||
In general, downgrades between non-patch releases (i.e. 6.2.x - 6.1.x) are not supported.
|
||||
|
||||
.. _downgrade-specific-version:
|
||||
|
||||
Downgrading from 6.3.13 - 6.2.33
|
||||
--------------------------------
|
||||
After upgrading from 6.2 to 6.3, the option of rolling back and downgrading to 6.2 is still possible, given that the following conditions are met:
|
||||
|
||||
* The 6.3 cluster cannot have ``TLogVersion`` greater than V4 (6.2).
|
||||
* The 6.3 cluster cannot use storage engine types that are not ``ssd-1``, ``ssd-2``, or ``memory``.
|
||||
* The 6.3 cluster must not have any key servers serialized with tag encoding. This condition can only be guaranteed if the ``TAG_ENCODE_KEY_SERVERS`` knob has never been changed to ``true`` on this cluster.
|
||||
|
|
|
@ -133,7 +133,7 @@ API versioning
|
|||
|
||||
Prior to including ``fdb_c.h``, you must define the ``FDB_API_VERSION`` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
.. function:: fdb_error_t fdb_select_api_version(int version)
|
||||
|
|
|
@ -148,7 +148,7 @@
|
|||
.. |atomic-versionstamps-tuple-warning-value| replace::
|
||||
At this time, versionstamped values are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages.
|
||||
|
||||
.. |api-version| replace:: 700
|
||||
.. |api-version| replace:: 710
|
||||
|
||||
.. |streaming-mode-blurb1| replace::
|
||||
When using |get-range-func| and similar interfaces, API clients can request large ranges of the database to iterate over. Making such a request doesn't necessarily mean that the client will consume all of the data in the range - sometimes the client doesn't know how far it intends to iterate in advance. FoundationDB tries to balance latency and bandwidth by requesting data for iteration in batches.
|
||||
|
|
|
@ -108,7 +108,7 @@ Opening a database
|
|||
After importing the ``fdb`` module and selecting an API version, you probably want to open a :class:`Database` using :func:`open`::
|
||||
|
||||
import fdb
|
||||
fdb.api_version(700)
|
||||
fdb.api_version(710)
|
||||
db = fdb.open()
|
||||
|
||||
.. function:: open( cluster_file=None, event_model=None )
|
||||
|
|
|
@ -93,7 +93,7 @@ Opening a database
|
|||
After requiring the ``FDB`` gem and selecting an API version, you probably want to open a :class:`Database` using :func:`open`::
|
||||
|
||||
require 'fdb'
|
||||
FDB.api_version 700
|
||||
FDB.api_version 710
|
||||
db = FDB.open
|
||||
|
||||
.. function:: open( cluster_file=nil ) -> Database
|
||||
|
|
|
@ -9,6 +9,14 @@ This document provides an overview of changes that an application developer may
|
|||
|
||||
For more details about API versions, see :ref:`api-versions`.
|
||||
|
||||
.. _api-version-upgrade-guide-710:
|
||||
|
||||
API version 710
|
||||
===============
|
||||
|
||||
General
|
||||
-------
|
||||
|
||||
.. _api-version-upgrade-guide-700:
|
||||
|
||||
API version 700
|
||||
|
|
|
@ -244,6 +244,9 @@ The ``start`` subcommand is used to start a backup. If there is already a backu
|
|||
``-s <DURATION>`` or ``--snapshot_interval <DURATION>``
|
||||
Specifies the duration, in seconds, of the inconsistent snapshots written to the backup in continuous mode. The default is 864000 which is 10 days.
|
||||
|
||||
``--initial_snapshot_interval <DURATION>``
|
||||
Specifies the duration, in seconds, of the first inconsistent snapshot written to the backup. The default is 0, which means as fast as possible.
|
||||
|
||||
``--partitioned_log_experimental``
|
||||
Specifies the backup uses the partitioned mutation logs generated by backup workers. Since FDB version 6.3, this option is experimental and requires using fast restore for restoring the database from the generated files. The default is to use non-partitioned mutation logs generated by backup agents.
|
||||
|
||||
|
@ -487,6 +490,9 @@ The ``start`` command will start a new restore on the specified (or default) tag
|
|||
``--orig_cluster_file <CONNFILE>``
|
||||
The cluster file for the original database from which the backup was created. The original database is only needed to convert a --timestamp argument to a database version.
|
||||
|
||||
``--inconsistent_snapshot_only``
|
||||
Ignore mutation log files during the restore to speedup the process. Because only range files are restored, this option gives an inconsistent snapshot in most cases and is not recommended to use.
|
||||
|
||||
.. program:: fdbrestore abort
|
||||
|
||||
``abort``
|
||||
|
|
|
@ -29,7 +29,7 @@ Before using the API, we need to specify the API version. This allows programs t
|
|||
|
||||
.. code-block:: go
|
||||
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
|
||||
Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file <default-cluster-file>`.
|
||||
|
||||
|
@ -78,7 +78,7 @@ If this is all working, it looks like we are ready to start building a real appl
|
|||
|
||||
func main() {
|
||||
// Different API versions may expose different runtime behaviors.
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
|
||||
// Open the default database from the system cluster
|
||||
db := fdb.MustOpenDefault()
|
||||
|
@ -666,7 +666,7 @@ Here's the code for the scheduling tutorial:
|
|||
}
|
||||
|
||||
func main() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
db.Options().SetTransactionTimeout(60000) // 60,000 ms = 1 minute
|
||||
db.Options().SetTransactionRetryLimit(100)
|
||||
|
|
|
@ -30,7 +30,7 @@ Before using the API, we need to specify the API version. This allows programs t
|
|||
private static final Database db;
|
||||
|
||||
static {
|
||||
fdb = FDB.selectAPIVersion(700);
|
||||
fdb = FDB.selectAPIVersion(710);
|
||||
db = fdb.open();
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ If this is all working, it looks like we are ready to start building a real appl
|
|||
private static final Database db;
|
||||
|
||||
static {
|
||||
fdb = FDB.selectAPIVersion(700);
|
||||
fdb = FDB.selectAPIVersion(710);
|
||||
db = fdb.open();
|
||||
}
|
||||
|
||||
|
@ -441,7 +441,7 @@ Here's the code for the scheduling tutorial:
|
|||
private static final Database db;
|
||||
|
||||
static {
|
||||
fdb = FDB.selectAPIVersion(700);
|
||||
fdb = FDB.selectAPIVersion(710);
|
||||
db = fdb.open();
|
||||
db.options().setTransactionTimeout(60000); // 60,000 ms = 1 minute
|
||||
db.options().setTransactionRetryLimit(100);
|
||||
|
|
|
@ -23,7 +23,7 @@ Open a Ruby interactive interpreter and import the FoundationDB API module::
|
|||
|
||||
Before using the API, we need to specify the API version. This allows programs to maintain compatibility even if the API is modified in future versions::
|
||||
|
||||
> FDB.api_version 700
|
||||
> FDB.api_version 710
|
||||
=> nil
|
||||
|
||||
Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file <default-cluster-file>`. ::
|
||||
|
@ -46,7 +46,7 @@ If this is all working, it looks like we are ready to start building a real appl
|
|||
.. code-block:: ruby
|
||||
|
||||
require 'fdb'
|
||||
FDB.api_version 700
|
||||
FDB.api_version 710
|
||||
@db = FDB.open
|
||||
@db['hello'] = 'world'
|
||||
print 'hello ', @db['hello']
|
||||
|
@ -373,7 +373,7 @@ Here's the code for the scheduling tutorial:
|
|||
|
||||
require 'fdb'
|
||||
|
||||
FDB.api_version 700
|
||||
FDB.api_version 710
|
||||
|
||||
####################################
|
||||
## Initialization ##
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue