Merge branch 'master' into redwood_queue_improvements

This commit is contained in:
negoyal 2021-05-06 11:32:29 -07:00
commit 84a438b001
293 changed files with 7419 additions and 2101 deletions

View File

@ -18,7 +18,7 @@
# limitations under the License.
cmake_minimum_required(VERSION 3.13)
project(foundationdb
VERSION 7.0.0
VERSION 7.1.0
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
HOMEPAGE_URL "http://www.foundationdb.org/"
LANGUAGES C CXX ASM)
@ -152,6 +152,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
endif()
include(CompileBoost)
include(GetMsgpack)
add_subdirectory(flow)
add_subdirectory(fdbrpc)
add_subdirectory(fdbclient)

View File

@ -171,7 +171,7 @@ that Visual Studio is used to compile.
1. Install Visual Studio 2017 (Community Edition is tested)
1. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
1. Download version 1.72 of [Boost](https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2)
1. Download version 1.72 of [Boost](https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2)
1. Unpack boost (you don't need to compile it)
1. Install [Mono](http://www.mono-project.com/download/stable/)
1. (Optional) Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8

View File

@ -26,7 +26,7 @@ sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings',
import util
FDB_API_VERSION = 700
FDB_API_VERSION = 710
LOGGING = {
'version': 1,

View File

@ -157,7 +157,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
api_version = min_version
elif random.random() < 0.9:
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700] if v >= min_version and v <= max_version])
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700, 710] if v >= min_version and v <= max_version])
else:
api_version = random.randint(min_version, max_version)

View File

@ -20,7 +20,7 @@
import os
MAX_API_VERSION = 700
MAX_API_VERSION = 710
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
ALL_TYPES = COMMON_TYPES + ['versionstamp']

View File

@ -34,7 +34,7 @@ fdb.api_version(FDB_API_VERSION)
class ScriptedTest(Test):
TEST_API_VERSION = 700
TEST_API_VERSION = 710
def __init__(self, subspace):
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)

View File

@ -19,10 +19,11 @@
*/
#include <cstdint>
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#define FDB_INCLUDE_LEGACY_TYPES
#include "fdbclient/MultiVersionTransaction.h"
#include "fdbclient/MultiVersionAssignmentVars.h"
#include "foundationdb/fdb_c.h"
int g_api_version = 0;
@ -364,15 +365,20 @@ extern "C" DLLEXPORT double fdb_database_get_main_thread_busyness(FDBDatabase* d
return DB(d)->getMainThreadBusyness();
}
// Returns the protocol version reported by a quorum of coordinators
// Returns the protocol version reported by the coordinator this client is connected to
// If an expected version is non-zero, the future won't return until the protocol version is different than expected
// Note: this will never return if the server is running a protocol from FDB 5.0 or older
extern "C" DLLEXPORT FDBFuture* fdb_database_get_server_protocol(FDBDatabase* db, uint64_t expected_version) {
Optional<ProtocolVersion> expected;
if (expected_version > 0) {
expected = ProtocolVersion(expected_version);
}
return (FDBFuture*)(DB(db)->getServerProtocol(expected).extractPtr());
return (
FDBFuture*)(mapThreadFuture<ProtocolVersion,
uint64_t>(DB(db)->getServerProtocol(expected), [](ErrorOr<ProtocolVersion> result) {
return result.map<uint64_t>([](ProtocolVersion pv) { return pv.versionWithFlags(); });
}).extractPtr());
}
extern "C" DLLEXPORT void fdb_transaction_destroy(FDBTransaction* tr) {

View File

@ -27,10 +27,10 @@
#endif
#if !defined(FDB_API_VERSION)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 700)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 710)
#elif FDB_API_VERSION < 13
#error API version no longer supported (upgrade to 13)
#elif FDB_API_VERSION > 700
#elif FDB_API_VERSION > 710
#error Requested API version requires a newer version of this header
#endif
@ -97,7 +97,7 @@ typedef struct key {
const uint8_t* key;
int key_length;
} FDBKey;
#if FDB_API_VERSION >= 700
#if FDB_API_VERSION >= 710
typedef struct keyvalue {
const uint8_t* key;
int key_length;

View File

@ -1,9 +1,9 @@
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>
int main(int argc, char* argv[]) {
(void)argc;
(void)argv;
fdb_select_api_version(700);
fdb_select_api_version(710);
return 0;
}

View File

@ -1172,6 +1172,14 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
#endif
}
/* Set client Log group */
if (strlen(args->log_group) != 0) {
err = fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP, (uint8_t*)args->log_group, strlen(args->log_group));
if (err) {
fprintf(stderr, "ERROR: fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP): %s\n", fdb_get_error(err));
}
}
/* enable tracing if specified */
if (args->trace) {
fprintf(debugme,
@ -1345,6 +1353,7 @@ int init_args(mako_args_t* args) {
args->verbose = 1;
args->flatbuffers = 0; /* internal */
args->knobs[0] = '\0';
args->log_group[0] = '\0';
args->trace = 0;
args->tracepath[0] = '\0';
args->traceformat = 0; /* default to client's default (XML) */
@ -1505,6 +1514,7 @@ void usage() {
printf("%-24s %s\n", "-m, --mode=MODE", "Specify the mode (build, run, clean)");
printf("%-24s %s\n", "-z, --zipf", "Use zipfian distribution instead of uniform distribution");
printf("%-24s %s\n", " --commitget", "Commit GETs");
printf("%-24s %s\n", " --loggroup=LOGGROUP", "Set client log group");
printf("%-24s %s\n", " --trace", "Enable tracing");
printf("%-24s %s\n", " --tracepath=PATH", "Set trace file path");
printf("%-24s %s\n", " --trace_format <xml|json>", "Set trace format (Default: json)");
@ -1546,6 +1556,7 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
{ "verbose", required_argument, NULL, 'v' },
{ "mode", required_argument, NULL, 'm' },
{ "knobs", required_argument, NULL, ARG_KNOBS },
{ "loggroup", required_argument, NULL, ARG_LOGGROUP },
{ "tracepath", required_argument, NULL, ARG_TRACEPATH },
{ "trace_format", required_argument, NULL, ARG_TRACEFORMAT },
{ "streaming", required_argument, NULL, ARG_STREAMING_MODE },
@ -1656,6 +1667,9 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
case ARG_KNOBS:
memcpy(args->knobs, optarg, strlen(optarg) + 1);
break;
case ARG_LOGGROUP:
memcpy(args->log_group, optarg, strlen(optarg) + 1);
break;
case ARG_TRACE:
args->trace = 1;
break;

View File

@ -3,7 +3,7 @@
#pragma once
#ifndef FDB_API_VERSION
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#endif
#include <foundationdb/fdb_c.h>
@ -68,6 +68,7 @@ enum Arguments {
ARG_VERSION,
ARG_KNOBS,
ARG_FLATBUFFERS,
ARG_LOGGROUP,
ARG_TRACE,
ARG_TRACEPATH,
ARG_TRACEFORMAT,
@ -97,6 +98,7 @@ typedef struct {
int ops[MAX_OP][3];
} mako_txnspec_t;
#define LOGGROUP_MAX 256
#define KNOB_MAX 256
#define TAGPREFIXLENGTH_MAX 8
@ -122,6 +124,7 @@ typedef struct {
int verbose;
mako_txnspec_t txnspec;
char cluster_file[PATH_MAX];
char log_group[LOGGROUP_MAX];
int trace;
char tracepath[PATH_MAX];
int traceformat; /* 0 - XML, 1 - JSON */

View File

@ -641,7 +641,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(700), "select API version", rs);
checkError(fdb_select_api_version(710), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
valueStr = (uint8_t*)malloc((sizeof(uint8_t)) * valueSize);

View File

@ -285,7 +285,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(700), "select API version", rs);
checkError(fdb_select_api_version(710), "select API version", rs);
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, keySize);

View File

@ -29,7 +29,7 @@
#include <inttypes.h>
#ifndef FDB_API_VERSION
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#endif
#include <foundationdb/fdb_c.h>

View File

@ -97,7 +97,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(700), "select API version", rs);
checkError(fdb_select_api_version(710), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, KEY_SIZE);

View File

@ -39,7 +39,7 @@
#pragma once
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>
#include <string>

View File

@ -20,7 +20,7 @@
// Unit tests for API setup, network initialization functions from the FDB C API.
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>
#include <iostream>
#include <thread>
@ -42,13 +42,13 @@ TEST_CASE("setup") {
CHECK(err);
// Select current API version
fdb_check(fdb_select_api_version(700));
fdb_check(fdb_select_api_version(710));
// Error to call again after a successful return
err = fdb_select_api_version(700);
err = fdb_select_api_version(710);
CHECK(err);
CHECK(fdb_get_max_api_version() >= 700);
CHECK(fdb_get_max_api_version() >= 710);
fdb_check(fdb_setup_network());
// Calling a second time should fail

View File

@ -20,7 +20,7 @@
// Unit tests for the FoundationDB C API.
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>
#include <assert.h>
#include <string.h>
@ -2151,7 +2151,7 @@ int main(int argc, char** argv) {
<< "Usage: fdb_c_unit_tests /path/to/cluster_file key_prefix [externalClient]" << std::endl;
return 1;
}
fdb_check(fdb_select_api_version(700));
fdb_check(fdb_select_api_version(710));
if (argc == 4) {
std::string externalClientLibrary = argv[3];
fdb_check(fdb_network_set_option(

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include "foundationdb/fdb_c.h"
#undef DLLEXPORT
#include "workloads.h"
@ -266,7 +266,7 @@ struct SimpleWorkload : FDBWorkload {
insertsPerTx = context->getOption("insertsPerTx", 100ul);
opsPerTx = context->getOption("opsPerTx", 100ul);
runFor = context->getOption("runFor", 10.0);
auto err = fdb_select_api_version(700);
auto err = fdb_select_api_version(710);
if (err) {
context->trace(
FDBSeverity::Info, "SelectAPIVersionFailed", { { "Error", std::string(fdb_get_error(err)) } });

View File

@ -37,7 +37,7 @@ THREAD_FUNC networkThread(void* fdb) {
}
ACTOR Future<Void> _test() {
API* fdb = FDB::API::selectAPIVersion(700);
API* fdb = FDB::API::selectAPIVersion(710);
auto db = fdb->createDatabase();
state Reference<Transaction> tr = db->createTransaction();
@ -81,7 +81,7 @@ ACTOR Future<Void> _test() {
}
void fdb_flow_test() {
API* fdb = FDB::API::selectAPIVersion(700);
API* fdb = FDB::API::selectAPIVersion(710);
fdb->setupNetwork();
startThread(networkThread, fdb);

View File

@ -23,7 +23,7 @@
#include <flow/flow.h>
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <bindings/c/foundationdb/fdb_c.h>
#undef DLLEXPORT

View File

@ -1863,7 +1863,7 @@ ACTOR void _test_versionstamp() {
try {
g_network = newNet2(TLSConfig());
API* fdb = FDB::API::selectAPIVersion(700);
API* fdb = FDB::API::selectAPIVersion(710);
fdb->setupNetwork();
startThread(networkThread, fdb);

View File

@ -9,7 +9,7 @@ This package requires:
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-700.
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-710.
To install this package, you can run the "fdb-go-install.sh" script (for versions 5.0.x and greater):

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 700
// #define FDB_API_VERSION 710
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 700
// #define FDB_API_VERSION 710
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below:
func main() {
// Different API versions may expose different runtime behaviors.
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
// Open the default database from the system cluster
db := fdb.MustOpenDefault()

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 700
// #define FDB_API_VERSION 710
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 700
// #define FDB_API_VERSION 710
// #include <foundationdb/fdb_c.h>
// #include <stdlib.h>
import "C"
@ -108,7 +108,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// library, an error will be returned. APIVersion must be called prior to any
// other functions in the fdb package.
//
// Currently, this package supports API versions 200 through 700.
// Currently, this package supports API versions 200 through 710.
//
// Warning: When using the multi-version client API, setting an API version that
// is not supported by a particular client library will prevent that client from
@ -116,7 +116,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// the API version of your application after upgrading your client until the
// cluster has also been upgraded.
func APIVersion(version int) error {
headerVersion := 700
headerVersion := 710
networkMutex.Lock()
defer networkMutex.Unlock()
@ -128,7 +128,7 @@ func APIVersion(version int) error {
return errAPIVersionAlreadySet
}
if version < 200 || version > 700 {
if version < 200 || version > 710 {
return errAPIVersionNotSupported
}

View File

@ -32,7 +32,7 @@ import (
func ExampleOpenDefault() {
var e error
e = fdb.APIVersion(700)
e = fdb.APIVersion(710)
if e != nil {
fmt.Printf("Unable to set API version: %v\n", e)
return
@ -52,7 +52,7 @@ func ExampleOpenDefault() {
}
func TestVersionstamp(t *testing.T) {
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
db := fdb.MustOpenDefault()
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
@ -98,7 +98,7 @@ func TestVersionstamp(t *testing.T) {
}
func ExampleTransactor() {
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
db := fdb.MustOpenDefault()
setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error {
@ -149,7 +149,7 @@ func ExampleTransactor() {
}
func ExampleReadTransactor() {
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
db := fdb.MustOpenDefault()
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
@ -202,7 +202,7 @@ func ExampleReadTransactor() {
}
func ExamplePrefixRange() {
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()
@ -241,7 +241,7 @@ func ExamplePrefixRange() {
}
func ExampleRangeIterator() {
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()

View File

@ -23,7 +23,7 @@
package fdb
// #cgo LDFLAGS: -lfdb_c -lm
// #define FDB_API_VERSION 700
// #define FDB_API_VERSION 710
// #include <foundationdb/fdb_c.h>
// #include <string.h>
//

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 700
// #define FDB_API_VERSION 710
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 700
// #define FDB_API_VERSION 710
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -19,7 +19,7 @@
*/
#include <foundationdb/ClientWorkload.h>
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>
#include <jni.h>
@ -375,7 +375,7 @@ struct JVM {
jmethodID selectMethod =
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;");
checkException();
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(700));
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(710));
checkException();
env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V"));
checkException();

View File

@ -21,7 +21,7 @@
#include <jni.h>
#include <string.h>
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>

View File

@ -42,7 +42,7 @@ import org.junit.jupiter.api.extension.ExtendWith;
*/
@ExtendWith(RequiresDatabase.class)
class DirectoryTest {
private static final FDB fdb = FDB.selectAPIVersion(700);
private static final FDB fdb = FDB.selectAPIVersion(710);
@Test
void testCanCreateDirectory() throws Exception {

View File

@ -41,7 +41,7 @@ import org.junit.jupiter.api.extension.ExtendWith;
*/
@ExtendWith(RequiresDatabase.class)
class RangeQueryIntegrationTest {
private static final FDB fdb = FDB.selectAPIVersion(700);
private static final FDB fdb = FDB.selectAPIVersion(710);
@BeforeEach
@AfterEach

View File

@ -80,7 +80,7 @@ public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback {
* assume that if we are here, then canRunIntegrationTest() is returning true and we don't have to bother
* checking it.
*/
try (Database db = FDB.selectAPIVersion(700).open()) {
try (Database db = FDB.selectAPIVersion(710).open()) {
db.run(tr -> {
CompletableFuture<byte[]> future = tr.get("test".getBytes());

View File

@ -37,7 +37,7 @@ public class FDBLibraryRule implements BeforeAllCallback {
public FDBLibraryRule(int apiVersion) { this.apiVersion = apiVersion; }
public static FDBLibraryRule current() { return new FDBLibraryRule(700); }
public static FDBLibraryRule current() { return new FDBLibraryRule(710); }
public static FDBLibraryRule v63() { return new FDBLibraryRule(630); }

View File

@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger;
* This call is required before using any other part of the API. The call allows
* an error to be thrown at this point to prevent client code from accessing a later library
* with incorrect assumptions from the current version. The API version documented here is version
* {@code 700}.<br><br>
* {@code 710}.<br><br>
* FoundationDB encapsulates multiple versions of its interface by requiring
* the client to explicitly specify the version of the API it uses. The purpose
* of this design is to allow you to upgrade the server, client libraries, or
@ -183,8 +183,8 @@ public class FDB {
}
if(version < 510)
throw new IllegalArgumentException("API version not supported (minimum 510)");
if(version > 700)
throw new IllegalArgumentException("API version not supported (maximum 700)");
if(version > 710)
throw new IllegalArgumentException("API version not supported (maximum 710)");
Select_API_version(version);
singleton = new FDB(version);

View File

@ -13,7 +13,7 @@ and then added to your classpath.<br>
<h1>Getting started</h1>
To start using FoundationDB from Java, create an instance of the
{@link com.apple.foundationdb.FDB FoundationDB API interface} with the version of the
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 700}).
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 710}).
With this API object you can then open {@link com.apple.foundationdb.Cluster Cluster}s and
{@link com.apple.foundationdb.Database Database}s and start using
{@link com.apple.foundationdb.Transaction Transaction}s.
@ -29,7 +29,7 @@ import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -27,7 +27,7 @@ import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
public abstract class AbstractTester {
public static final int API_VERSION = 700;
public static final int API_VERSION = 710;
protected static final int NUM_RUNS = 25;
protected static final Charset ASCII = Charset.forName("ASCII");

View File

@ -33,7 +33,7 @@ public class BlockingBenchmark {
private static final int PARALLEL = 100;
public static void main(String[] args) throws InterruptedException {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
// The cluster file DOES NOT need to be valid, although it must exist.
// This is because the database is never really contacted in this test.

View File

@ -48,7 +48,7 @@ public class ConcurrentGetSetGet {
}
public static void main(String[] args) {
try(Database database = FDB.selectAPIVersion(700).open()) {
try(Database database = FDB.selectAPIVersion(710).open()) {
new ConcurrentGetSetGet().apply(database);
}
}

View File

@ -26,7 +26,7 @@ import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -31,7 +31,7 @@ public class IterableTest {
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.tuple.ByteArrayUtil;
public class LocalityTests {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database database = fdb.open(args[0])) {
try(Transaction tr = database.createTransaction()) {
String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join();

View File

@ -43,7 +43,7 @@ public class ParallelRandomScan {
private static final int PARALLELISM_STEP = 5;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(700);
FDB api = FDB.selectAPIVersion(710);
try(Database database = api.open(args[0])) {
for(int i = PARALLELISM_MIN; i <= PARALLELISM_MAX; i += PARALLELISM_STEP) {
runTest(database, i, ROWS, DURATION_MS);

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction;
import com.apple.foundationdb.async.AsyncIterable;
public class RangeTest {
private static final int API_VERSION = 700;
private static final int API_VERSION = 710;
public static void main(String[] args) {
System.out.println("About to use version " + API_VERSION);

View File

@ -34,7 +34,7 @@ public class SerialInsertion {
private static final int NODES = 1000000;
public static void main(String[] args) {
FDB api = FDB.selectAPIVersion(700);
FDB api = FDB.selectAPIVersion(710);
try(Database database = api.open()) {
long start = System.currentTimeMillis();

View File

@ -39,7 +39,7 @@ public class SerialIteration {
private static final int THREAD_COUNT = 1;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(700);
FDB api = FDB.selectAPIVersion(710);
try(Database database = api.open(args[0])) {
for(int i = 1; i <= THREAD_COUNT; i++) {
runThreadedTest(database, i);

View File

@ -30,7 +30,7 @@ public class SerialTest {
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -39,7 +39,7 @@ public class SnapshotTransactionTest {
private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges"));
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database db = fdb.open()) {
snapshotReadShouldNotConflict(db);
snapshotShouldNotAddConflictRange(db);

View File

@ -37,7 +37,7 @@ public class TupleTest {
public static void main(String[] args) throws NoSuchFieldException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -32,7 +32,7 @@ import com.apple.foundationdb.tuple.Versionstamp;
public class VersionstampSmokeTest {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database db = fdb.open()) {
db.run(tr -> {
tr.clear(Tuple.from("prefix").range());

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction;
public class WatchTest {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(700);
FDB fdb = FDB.selectAPIVersion(710);
try(Database database = fdb.open(args[0])) {
database.options().setLocationCacheSize(42);
try(Transaction tr = database.createTransaction()) {

View File

@ -52,7 +52,7 @@ def get_api_version():
def api_version(ver):
header_version = 700
header_version = 710
if '_version' in globals():
if globals()['_version'] != ver:

View File

@ -253,7 +253,7 @@ def transactional(*tr_args, **tr_kwargs):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# We can't throw this from the decorator, as when a user runs
# >>> import fdb ; fdb.api_version(700)
# >>> import fdb ; fdb.api_version(710)
# the code above uses @transactional before the API version is set
if fdb.get_api_version() >= 630 and inspect.isgeneratorfunction(func):
raise ValueError("Generators can not be wrapped with fdb.transactional")

View File

@ -22,7 +22,7 @@ import fdb
import sys
if __name__ == '__main__':
fdb.api_version(700)
fdb.api_version(710)
@fdb.transactional
def setValue(tr, key, value):

View File

@ -36,7 +36,7 @@ module FDB
end
end
def self.api_version(version)
header_version = 700
header_version = 710
if self.is_api_version_selected?()
if @@chosen_version != version
raise "FDB API already loaded at version #{@@chosen_version}."

View File

@ -13,7 +13,7 @@ RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.1
cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/
# install boost
RUN curl -L https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_72_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
RUN curl -L https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\
sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_72_0/boost /usr/local/include/ &&\
rm -rf boost.tar.bz2 boost_1_72_0

View File

@ -1,7 +1,7 @@
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>
int main(int argc, char* argv[]) {
fdb_select_api_version(700);
fdb_select_api_version(710);
return 0;
}

View File

@ -65,7 +65,7 @@ then
python setup.py install
successOr "Installing python bindings failed"
popd
python -c 'import fdb; fdb.api_version(700)'
python -c 'import fdb; fdb.api_version(710)'
successOr "Loading python bindings failed"
# Test cmake and pkg-config integration: https://github.com/apple/foundationdb/issues/1483

View File

@ -156,7 +156,7 @@ RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocks
rm -rf /tmp/*
# install boost 1.67 to /opt
RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
sha256sum -c boost-sha-67.txt && \
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
@ -165,7 +165,7 @@ RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.
# install boost 1.72 to /opt
RUN source /opt/rh/devtoolset-8/enable && \
curl -Ls https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
sha256sum -c boost-sha-72.txt && \
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \

View File

@ -139,7 +139,7 @@ RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocks
rm -rf /tmp/*
# install boost 1.67 to /opt
RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
sha256sum -c boost-sha-67.txt && \
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
@ -148,7 +148,7 @@ RUN curl -Ls https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.
# install boost 1.72 to /opt
RUN source /opt/rh/devtoolset-8/enable && \
curl -Ls https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
sha256sum -c boost-sha-72.txt && \
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \

View File

@ -38,7 +38,7 @@ function(compile_boost)
include(ExternalProject)
set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install")
ExternalProject_add("${MY_TARGET}Project"
URL "https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2"
URL "https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2"
URL_HASH SHA256=59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722
CONFIGURE_COMMAND ./bootstrap.sh ${BOOTSTRAP_ARGS}
BUILD_COMMAND ${B2_COMMAND} link=static ${MY_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install

20
cmake/GetMsgpack.cmake Normal file
View File

@ -0,0 +1,20 @@
find_package(msgpack 3.3.0 EXACT QUIET CONFIG)
add_library(msgpack INTERFACE)
if(msgpack_FOUND)
target_link_libraries(msgpack INTERFACE msgpackc-cxx)
else()
include(ExternalProject)
ExternalProject_add(msgpackProject
URL "https://github.com/msgpack/msgpack-c/releases/download/cpp-3.3.0/msgpack-3.3.0.tar.gz"
URL_HASH SHA256=6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
)
ExternalProject_Get_property(msgpackProject SOURCE_DIR)
target_include_directories(msgpack SYSTEM INTERFACE "${SOURCE_DIR}/include")
add_dependencies(msgpack msgpackProject)
endif()

View File

@ -103,54 +103,10 @@ function(symlink_files)
endif()
endfunction()
# 'map' from (destination, package) to path
# format vars like install_destination_for_${destination}_${package}
set(install_destination_for_bin_tgz "bin")
set(install_destination_for_bin_deb "usr/bin")
set(install_destination_for_bin_el6 "usr/bin")
set(install_destination_for_bin_el7 "usr/bin")
set(install_destination_for_bin_pm "usr/local/bin")
set(install_destination_for_sbin_tgz "sbin")
set(install_destination_for_sbin_deb "usr/sbin")
set(install_destination_for_sbin_el6 "usr/sbin")
set(install_destination_for_sbin_el7 "usr/sbin")
set(install_destination_for_sbin_pm "usr/local/libexec")
set(install_destination_for_lib_tgz "lib")
set(install_destination_for_lib_deb "usr/lib")
set(install_destination_for_lib_el6 "usr/lib64")
set(install_destination_for_lib_el7 "usr/lib64")
set(install_destination_for_lib_pm "usr/local/lib")
set(install_destination_for_fdbmonitor_tgz "sbin")
set(install_destination_for_fdbmonitor_deb "usr/lib/foundationdb")
set(install_destination_for_fdbmonitor_el6 "usr/lib/foundationdb")
set(install_destination_for_fdbmonitor_el7 "usr/lib/foundationdb")
set(install_destination_for_fdbmonitor_pm "usr/local/libexec")
set(install_destination_for_include_tgz "include")
set(install_destination_for_include_deb "usr/include")
set(install_destination_for_include_el6 "usr/include")
set(install_destination_for_include_el7 "usr/include")
set(install_destination_for_include_pm "usr/local/include")
set(install_destination_for_etc_tgz "etc/foundationdb")
set(install_destination_for_etc_deb "etc/foundationdb")
set(install_destination_for_etc_el6 "etc/foundationdb")
set(install_destination_for_etc_el7 "etc/foundationdb")
set(install_destination_for_etc_pm "usr/local/etc/foundationdb")
set(install_destination_for_log_tgz "log/foundationdb")
set(install_destination_for_log_deb "var/log/foundationdb")
set(install_destination_for_log_el6 "var/log/foundationdb")
set(install_destination_for_log_el7 "var/log/foundationdb")
set(install_destination_for_log_pm "usr/local/foundationdb/logs")
set(install_destination_for_data_tgz "lib/foundationdb")
set(install_destination_for_data_deb "var/lib/foundationdb/data")
set(install_destination_for_data_el6 "var/lib/foundationdb/data")
set(install_destination_for_data_el7 "var/lib/foundationdb/data")
set(install_destination_for_data_pm "usr/local/foundationdb/data")
fdb_install_packages(TGZ DEB EL7 PM VERSIONED)
fdb_install_dirs(BIN SBIN LIB FDBMONITOR INCLUDE ETC LOG DATA)
message(STATUS "FDB_INSTALL_DIRS -> ${FDB_INSTALL_DIRS}")
# 'map' from (destination, package) to path
# format vars like install_destination_for_${destination}_${package}
install_destinations(TGZ
BIN bin
SBIN sbin
@ -169,7 +125,7 @@ install_destinations(DEB
INCLUDE usr/include
ETC etc/foundationdb
LOG var/log/foundationdb
DATA var/lib/foundationdb)
DATA var/lib/foundationdb/data)
copy_install_destinations(DEB EL7)
install_destinations(EL7 LIB usr/lib64)
install_destinations(PM
@ -227,6 +183,13 @@ set(LIB_DIR lib64)
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients/postinst-el7" @ONLY)
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/prerm" "${script_dir}/clients" @ONLY)
################################################################################
# Move Docker Setup
################################################################################
file(COPY "${PROJECT_SOURCE_DIR}/packaging/docker" DESTINATION "${PROJECT_BINARY_DIR}/packages/")
################################################################################
# General CPack configuration
################################################################################

View File

@ -0,0 +1,134 @@
#!/usr/bin/env python3
#
# grv_test.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import inspect
import sys
import rate_model
import workload_model
import proxy_model
import ratekeeper_model
from priority import Priority
from plot import Plotter
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workload', type=str, help='Name of workload to run')
parser.add_argument('-r', '--ratekeeper', type=str, help='Name of ratekeeper model')
parser.add_argument('-d', '--duration', type=int, default=240, help='Duration of simulated test, in seconds. Defaults to 240.')
parser.add_argument('-L', '--limiter', type=str, default='Original', help='Name of limiter implementation. Defaults to \'Original\'.')
parser.add_argument('-p', '--proxy', type=str, default='ProxyModel', help='Name of proxy implementation. Defaults to \'ProxyModel\'.')
parser.add_argument('--list', action='store_true', default=False, help='List options for all models.')
parser.add_argument('--no-graph', action='store_true', default=False, help='Disable graphical output.')
args = parser.parse_args()
def print_choices_list(context=None):
if context == 'workload' or context is None:
print('Workloads:')
for w in workload_model.predefined_workloads.keys():
print(' %s' % w)
if context == 'ratekeeper' or context is None:
print('\nRatekeeper models:')
for r in ratekeeper_model.predefined_ratekeeper.keys():
print(' %s' % r)
proxy_model_classes = [c for c in [getattr(proxy_model, a) for a in dir(proxy_model)] if inspect.isclass(c)]
if context == 'proxy' or context is None:
print('\nProxy models:')
for p in proxy_model_classes:
if issubclass(p, proxy_model.ProxyModel):
print(' %s' % p.__name__)
if context == 'limiter' or context is None:
print('\nProxy limiters:')
for p in proxy_model_classes:
if issubclass(p, proxy_model.Limiter) and p != proxy_model.Limiter:
name = p.__name__
if name.endswith('Limiter'):
name = name[0:-len('Limiter')]
print(' %s' % name)
if args.workload is None or args.ratekeeper is None:
print('ERROR: A workload (-w/--workload) and ratekeeper model (-r/--ratekeeper) must be specified.\n')
print_choices_list()
sys.exit(1)
if args.list:
print_choices_list()
sys.exit(0)
def validate_class_type(var, name, superclass):
cls = getattr(var, name, None)
return cls is not None and inspect.isclass(cls) and issubclass(cls, superclass)
if not args.ratekeeper in ratekeeper_model.predefined_ratekeeper:
print('Invalid ratekeeper model `%s\'' % args.ratekeeper)
print_choices_list('ratekeeper')
sys.exit(1)
if not args.workload in workload_model.predefined_workloads:
print('Invalid workload model `%s\'' % args.workload)
print_choices_list('workload')
sys.exit(1)
if not validate_class_type(proxy_model, args.proxy, proxy_model.ProxyModel):
print('Invalid proxy model `%s\'' % args.proxy)
print_choices_list('proxy')
sys.exit(1)
limiter_name = args.limiter
if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter):
limiter_name += 'Limiter'
if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter):
print('Invalid proxy limiter `%s\'' % args.limiter)
print_choices_list('limiter')
sys.exit(1)
ratekeeper = ratekeeper_model.predefined_ratekeeper[args.ratekeeper]
workload = workload_model.predefined_workloads[args.workload]
limiter = getattr(proxy_model, limiter_name)
proxy = getattr(proxy_model, args.proxy)(args.duration, ratekeeper, workload, limiter)
proxy.run()
for priority in workload.priorities():
latencies = sorted([p for t in proxy.results.latencies[priority].values() for p in t])
total_started = sum(proxy.results.started[priority].values())
still_queued = sum([r.count for r in proxy.request_queue if r.priority == priority])
if len(latencies) > 0:
print('\n%s: %d requests in %d seconds (rate=%f). %d still queued.' % (priority, total_started, proxy.time, float(total_started)/proxy.time, still_queued))
print(' Median latency: %f' % latencies[len(latencies)//2])
print(' 90%% latency: %f' % latencies[int(0.9*len(latencies))])
print(' 99%% latency: %f' % latencies[int(0.99*len(latencies))])
print(' 99.9%% latency: %f' % latencies[int(0.999*len(latencies))])
print(' Max latency: %f' % latencies[-1])
print('')
if not args.no_graph:
plotter = Plotter(proxy.results)
plotter.display()

107
contrib/grv_proxy_model/plot.py Executable file
View File

@ -0,0 +1,107 @@
#
# plot.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
class Plotter:
def __init__(self, results):
self.results = results
def add_plot(data, time_resolution, label, use_avg=False):
out_data = {}
counts = {}
for t in data.keys():
out_data.setdefault(t//time_resolution*time_resolution, 0)
counts.setdefault(t//time_resolution*time_resolution, 0)
out_data[t//time_resolution*time_resolution] += data[t]
counts[t//time_resolution*time_resolution] += 1
if use_avg:
out_data = { t: v/counts[t] for t,v in out_data.items() }
plt.plot(list(out_data.keys()), list(out_data.values()), label=label)
def add_plot_with_times(data, label):
plt.plot(list(data.keys()), list(data.values()), label=label)
def display(self, time_resolution=0.1):
plt.figure(figsize=(40,9))
plt.subplot(3, 3, 1)
for priority in self.results.started.keys():
Plotter.add_plot(self.results.started[priority], time_resolution, priority)
plt.xlabel('Time (s)')
plt.ylabel('Released/s')
plt.legend()
plt.subplot(3, 3, 2)
for priority in self.results.queued.keys():
Plotter.add_plot(self.results.queued[priority], time_resolution, priority)
plt.xlabel('Time (s)')
plt.ylabel('Requests/s')
plt.legend()
plt.subplot(3, 3, 3)
for priority in self.results.unprocessed_queue_sizes.keys():
data = {k: max(v) for (k,v) in self.results.unprocessed_queue_sizes[priority].items()}
Plotter.add_plot(data, time_resolution, priority)
plt.xlabel('Time (s)')
plt.ylabel('Max queue size')
plt.legend()
num = 4
for priority in self.results.latencies.keys():
plt.subplot(3, 3, num)
median_latencies = {k: v[int(0.5*len(v))] if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()}
percentile90_latencies = {k: v[int(0.9*len(v))] if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()}
max_latencies = {k: max(v) if len(v) > 0 else 0 for (k,v) in self.results.latencies[priority].items()}
Plotter.add_plot(median_latencies, time_resolution, 'median')
Plotter.add_plot(percentile90_latencies, time_resolution, '90th percentile')
Plotter.add_plot(max_latencies, time_resolution, 'max')
plt.xlabel('Time (s)')
plt.ylabel(str(priority) + ' Latency (s)')
plt.yscale('log')
plt.legend()
num += 1
for priority in self.results.rate.keys():
plt.subplot(3, 3, num)
if len(self.results.rate[priority]) > 0:
Plotter.add_plot(self.results.rate[priority], time_resolution, 'Rate', use_avg=True)
if len(self.results.released[priority]) > 0:
Plotter.add_plot(self.results.released[priority], time_resolution, 'Released', use_avg=True)
if len(self.results.limit[priority]) > 0:
Plotter.add_plot(self.results.limit[priority], time_resolution, 'Limit', use_avg=True)
if len(self.results.limit_and_budget[priority]) > 0:
Plotter.add_plot(self.results.limit_and_budget[priority], time_resolution, 'Limit and budget', use_avg=True)
if len(self.results.budget[priority]) > 0:
Plotter.add_plot(self.results.budget[priority], time_resolution, 'Budget', use_avg=True)
plt.xlabel('Time (s)')
plt.ylabel('Value (' + str(priority) + ')')
plt.legend()
num += 1
plt.show()

View File

@ -0,0 +1,40 @@
#
# priority.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
@functools.total_ordering
class Priority:
def __init__(self, priority_value, label):
self.priority_value = priority_value
self.label = label
def __lt__(self, other):
return self.priority_value < other.priority_value
def __str__(self):
return self.label
def __repr__(self):
return repr(self.label)
Priority.SYSTEM = Priority(0, "System")
Priority.DEFAULT = Priority(1, "Default")
Priority.BATCH = Priority(2, "Batch")

View File

@ -0,0 +1,338 @@
#
# proxy_model.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import functools
import heapq
from priority import Priority
from smoother import Smoother
@functools.total_ordering
class Task:
def __init__(self, time, fxn):
self.time = time
self.fxn = fxn
def __lt__(self, other):
return self.time < other.time
class Limiter:
class UpdateRateParams:
def __init__(self, time):
self.time = time
class UpdateLimitParams:
def __init__(self, time, elapsed):
self.time = time
self.elapsed = elapsed
class CanStartParams:
def __init__(self, time, num_started, count):
self.time = time
self.num_started = num_started
self.count = count
class UpdateBudgetParams:
def __init__(self, time, num_started, num_started_at_priority, min_priority, last_batch, queue_empty, elapsed):
self.time = time
self.num_started = num_started
self.num_started_at_priority = num_started_at_priority
self.min_priority = min_priority
self.last_batch = last_batch
self.queue_empty = queue_empty
self.elapsed = elapsed
def __init__(self, priority, ratekeeper_model, proxy_model):
self.priority = priority
self.ratekeeper_model = ratekeeper_model
self.proxy_model = proxy_model
self.limit = 0
self.rate = self.ratekeeper_model.get_limit(0, self.priority)
def update_rate(self, params):
pass
def update_limit(self, params):
pass
def can_start(self, params):
pass
def update_budget(self, params):
pass
class OriginalLimiter(Limiter):
def __init__(self, priority, limit_rate_model, proxy_model):
Limiter.__init__(self, priority, limit_rate_model, proxy_model)
def update_rate(self, params):
self.rate = self.ratekeeper_model.get_limit(params.time, self.priority)
def update_limit(self, params):
self.limit = min(0, self.limit) + params.elapsed * self.rate
self.limit = min(self.limit, self.rate * 0.01)
self.limit = min(self.limit, 100000)
self.proxy_model.results.rate[self.priority][params.time] = self.rate
self.proxy_model.results.limit[self.priority][params.time] = self.limit
def can_start(self, params):
return params.num_started < self.limit
def update_budget(self, params):
self.limit -= params.num_started
class PositiveBudgetLimiter(OriginalLimiter):
def __init__(self, priority, limit_rate_model, proxy_model):
OriginalLimiter.__init__(self, priority, limit_rate_model, proxy_model)
def update_limit(self, params):
self.limit += params.elapsed * self.rate
self.limit = min(self.limit, 2.0 * self.rate)
class ClampedBudgetLimiter(PositiveBudgetLimiter):
def __init__(self, priority, limit_rate_model, proxy_model):
PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model)
def update_budget(self, params):
min_budget = -self.rate * 5.0
if self.limit > min_budget:
self.limit = max(self.limit - params.num_started, min_budget)
class TimeLimiter(PositiveBudgetLimiter):
def __init__(self, priority, limit_rate_model, proxy_model):
PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model)
self.locked_until = 0
def can_start(self, params):
return params.time >= self.locked_until and PositiveBudgetLimiter.can_start(self, params)
def update_budget(self, params):
#print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch))
if params.min_priority >= self.priority or params.num_started < self.limit:
self.limit -= params.num_started
else:
self.limit = min(self.limit, max(self.limit - params.num_started, -params.last_batch))
self.locked_until = min(params.time + 2.0, max(params.time, self.locked_until) + (params.num_started - self.limit)/self.rate)
#print('End update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority))
class TimePositiveBudgetLimiter(PositiveBudgetLimiter):
def __init__(self, priority, limit_rate_model, proxy_model):
PositiveBudgetLimiter.__init__(self, priority, limit_rate_model, proxy_model)
self.locked_until = 0
def update_limit(self, params):
if params.time >= self.locked_until:
PositiveBudgetLimiter.update_limit(self, params)
def can_start(self, params):
return params.num_started + params.count <= self.limit
def update_budget(self, params):
#if params.num_started > 0:
#print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch))
if params.num_started > self.limit:
self.locked_until = min(params.time + 2.0, max(params.time, self.locked_until) + penalty/self.rate)
self.limit = 0
else:
self.limit -= params.num_started
#if params.num_started > 0:
#print('End update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority))
class SmoothingLimiter(OriginalLimiter):
def __init__(self, priority, limit_rate_model, proxy_model):
OriginalLimiter.__init__(self, priority, limit_rate_model, proxy_model)
self.smooth_released = Smoother(2)
self.smooth_rate_limit = Smoother(2)
self.rate_set = False
def update_rate(self, params):
OriginalLimiter.update_rate(self, params)
if not self.rate_set:
self.rate_set = True
self.smooth_rate_limit.reset(self.rate)
else:
self.smooth_rate_limit.set_total(params.time, self.rate)
def update_limit(self, params):
self.limit = 2.0 * (self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time))
def can_start(self, params):
return params.num_started + params.count <= self.limit
def update_budget(self, params):
self.smooth_released.add_delta(params.time, params.num_started)
class SmoothingBudgetLimiter(SmoothingLimiter):
def __init__(self, priority, limit_rate_model, proxy_model):
SmoothingLimiter.__init__(self, priority, limit_rate_model, proxy_model)
#self.smooth_filled = Smoother(2)
self.budget = 0
def update_limit(self, params):
release_rate = (self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time))
#self.smooth_filled.set_total(params.time, 1 if release_rate > 0 else 0)
self.limit = 2.0 * release_rate
self.proxy_model.results.rate[self.priority][params.time] = self.smooth_rate_limit.smooth_total(params.time)
self.proxy_model.results.released[self.priority][params.time] = self.smooth_released.smooth_rate(params.time)
self.proxy_model.results.limit[self.priority][params.time] = self.limit
self.proxy_model.results.limit_and_budget[self.priority][params.time] = self.limit + self.budget
self.proxy_model.results.budget[self.priority][params.time] = self.budget
#self.budget = max(0, self.budget + params.elapsed * self.smooth_rate_limit.smooth_total(params.time))
#if self.smooth_filled.smooth_total(params.time) >= 0.1:
#self.budget += params.elapsed * self.smooth_rate_limit.smooth_total(params.time)
#print('Update limit: time=%f, priority=%s, limit=%f, rate=%f, released=%f, budget=%f' % (params.time, self.priority, self.limit, self.smooth_rate_limit.smooth_total(params.time), self.smooth_released.smooth_rate(params.time), self.budget))
def can_start(self, params):
return params.num_started + params.count <= self.limit + self.budget #or params.num_started + params.count <= self.budget
def update_budget(self, params):
self.budget = max(0, self.budget + (self.limit - params.num_started_at_priority) / 2 * params.elapsed)
if params.queue_empty:
self.budget = min(10, self.budget)
self.smooth_released.add_delta(params.time, params.num_started_at_priority)
class ProxyModel:
class Results:
def __init__(self, priorities, duration):
self.started = self.init_result(priorities, 0, duration)
self.queued = self.init_result(priorities, 0, duration)
self.latencies = self.init_result(priorities, [], duration)
self.unprocessed_queue_sizes = self.init_result(priorities, [], duration)
self.rate = {p:{} for p in priorities}
self.released = {p:{} for p in priorities}
self.limit = {p:{} for p in priorities}
self.limit_and_budget = {p:{} for p in priorities}
self.budget = {p:{} for p in priorities}
def init_result(self, priorities, starting_value, duration):
return {p: {s: copy.copy(starting_value) for s in range(0, duration)} for p in priorities}
def __init__(self, duration, ratekeeper_model, workload_model, Limiter):
self.time = 0
self.log_time = 0
self.duration = duration
self.priority_limiters = { priority: Limiter(priority, ratekeeper_model, self) for priority in workload_model.priorities() }
self.workload_model = workload_model
self.request_scheduled = { p: False for p in self.workload_model.priorities()}
self.tasks = []
self.request_queue = []
self.results = ProxyModel.Results(self.workload_model.priorities(), duration)
def run(self):
self.update_rate()
self.process_requests(self.time)
for priority in self.workload_model.priorities():
next_request = self.workload_model.next_request(self.time, priority)
assert next_request is not None
heapq.heappush(self.tasks, Task(next_request.time, lambda next_request=next_request: self.receive_request(next_request)))
self.request_scheduled[priority] = True
while True:# or len(self.request_queue) > 0:
if int(self.time) > self.log_time:
self.log_time = int(self.time)
#print(self.log_time)
task = heapq.heappop(self.tasks)
self.time = task.time
if self.time >= self.duration:
break
task.fxn()
def update_rate(self):
for limiter in self.priority_limiters.values():
limiter.update_rate(Limiter.UpdateRateParams(self.time))
heapq.heappush(self.tasks, Task(self.time + 0.01, lambda: self.update_rate()))
def receive_request(self, request):
heapq.heappush(self.request_queue, request)
self.results.queued[request.priority][int(self.time)] += request.count
next_request = self.workload_model.next_request(self.time, request.priority)
if next_request is not None and next_request.time < self.duration:
heapq.heappush(self.tasks, Task(next_request.time, lambda: self.receive_request(next_request)))
else:
self.request_scheduled[request.priority] = False
def process_requests(self, last_time):
elapsed = self.time - last_time
for limiter in self.priority_limiters.values():
limiter.update_limit(Limiter.UpdateLimitParams(self.time, elapsed))
current_started = 0
started = {p:0 for p in self.workload_model.priorities()}
min_priority = Priority.SYSTEM
last_batch = 0
while len(self.request_queue) > 0:
request = self.request_queue[0]
if not self.priority_limiters[request.priority].can_start(Limiter.CanStartParams(self.time, current_started, request.count)):
break
min_priority = request.priority
last_batch = request.count
if self.workload_model.request_completed(request) and not self.request_scheduled[request.priority]:
next_request = self.workload_model.next_request(self.time, request.priority)
assert next_request is not None
heapq.heappush(self.tasks, Task(next_request.time, lambda next_request=next_request: self.receive_request(next_request)))
self.request_scheduled[request.priority] = True
current_started += request.count
started[request.priority] += request.count
heapq.heappop(self.request_queue)
self.results.started[request.priority][int(self.time)] += request.count
self.results.latencies[request.priority][int(self.time)].append(self.time-request.time)
if len(self.request_queue) == 0:
min_priority = Priority.BATCH
for priority, limiter in self.priority_limiters.items():
started_at_priority = sum([v for p,v in started.items() if p <= priority])
limiter.update_budget(Limiter.UpdateBudgetParams(self.time, current_started, started_at_priority, min_priority, last_batch, len(self.request_queue) == 0 or self.request_queue[0].priority > priority, elapsed))
for priority in self.workload_model.priorities():
self.results.unprocessed_queue_sizes[priority][int(self.time)].append(self.workload_model.workload_models[priority].outstanding)
current_time = self.time
delay = 0.001
heapq.heappush(self.tasks, Task(self.time + delay, lambda: self.process_requests(current_time)))

View File

@ -0,0 +1,83 @@
#
# rate_model.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
class RateModel:
def __init__(self):
pass
def get_rate(self, time):
pass
class FixedRateModel(RateModel):
def __init__(self, rate):
RateModel.__init__(self)
self.rate = rate
def get_rate(self, time):
return self.rate
class UnlimitedRateModel(FixedRateModel):
def __init__(self):
self.rate = 1e9
class IntervalRateModel(RateModel):
def __init__(self, intervals):
self.intervals = sorted(intervals)
def get_rate(self, time):
if len(self.intervals) == 0 or time < self.intervals[0][0]:
return 0
target_interval = len(self.intervals)-1
for i in range(1, len(self.intervals)):
if time < self.intervals[i][0]:
target_interval = i-1
break
self.intervals = self.intervals[target_interval:]
return self.intervals[0][1]
class SawtoothRateModel(RateModel):
def __init__(self, low, high, frequency):
self.low = low
self.high = high
self.frequency = frequency
def get_rate(self, time):
if int(2*time/self.frequency) % 2 == 0:
return self.low
else:
return self.high
class DistributionRateModel(RateModel):
def __init__(self, distribution, frequency):
self.distribution = distribution
self.frequency = frequency
self.last_change = 0
self.rate = None
def get_rate(self, time):
if self.frequency == 0 or int((time - self.last_change) / self.frequency) > int(self.last_change / self.frequency) or self.rate is None:
self.last_change = time
self.rate = self.distribution()
return self.rate

View File

@ -0,0 +1,67 @@
#
# ratekeeper.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
import rate_model
from priority import Priority
class RatekeeperModel:
def __init__(self, limit_models):
self.limit_models = limit_models
def get_limit(self, time, priority):
return self.limit_models[priority].get_rate(time)
predefined_ratekeeper = {}
predefined_ratekeeper['default200_batch100'] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.FixedRateModel(200),
Priority.BATCH: rate_model.FixedRateModel(100)
})
predefined_ratekeeper['default_sawtooth'] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.SawtoothRateModel(10, 200, 1),
Priority.BATCH: rate_model.FixedRateModel(0)
})
predefined_ratekeeper['default_uniform_random'] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.DistributionRateModel(lambda: numpy.random.uniform(10, 200), 1),
Priority.BATCH: rate_model.FixedRateModel(0)
})
predefined_ratekeeper['default_trickle'] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.FixedRateModel(3),
Priority.BATCH: rate_model.FixedRateModel(0)
})
predefined_ratekeeper['default1000'] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.FixedRateModel(1000),
Priority.BATCH: rate_model.FixedRateModel(500)
})

View File

@ -0,0 +1,53 @@
#
# smoother.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
class Smoother:
def __init__(self, folding_time):
self.folding_time = folding_time
self.reset(0)
def reset(self, value):
self.time = 0
self.total = value
self.estimate = value
def set_total(self, time, total):
self.add_delta(time, total-self.total)
def add_delta(self, time, delta):
self.update(time)
self.total += delta
def smooth_total(self, time):
self.update(time)
return self.estimate
def smooth_rate(self, time):
self.update(time)
return (self.total-self.estimate) / self.folding_time
def update(self, time):
elapsed = time - self.time
if elapsed > 0:
self.time = time
self.estimate += (self.total-self.estimate) * (1-math.exp(-elapsed/self.folding_time))

View File

@ -0,0 +1,201 @@
#
# workload_model.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import numpy
import math
import rate_model
from priority import Priority
@functools.total_ordering
class Request:
def __init__(self, time, count, priority):
self.time = time
self.count = count
self.priority = priority
def __lt__(self, other):
return self.priority < other.priority
class PriorityWorkloadModel:
def __init__(self, priority, rate_model, batch_model, generator, max_outstanding=1e9):
self.priority = priority
self.rate_model = rate_model
self.batch_model = batch_model
self.generator = generator
self.max_outstanding = max_outstanding
self.outstanding = 0
def next_request(self, time):
if self.outstanding >= self.max_outstanding:
return None
batch_size = self.batch_model.next_batch()
self.outstanding += batch_size
interval = self.generator.next_request_interval(self.rate_model.get_rate(time))
return Request(time + interval, batch_size, self.priority)
def request_completed(self, request):
was_full = self.max_outstanding <= self.outstanding
self.outstanding -= request.count
return was_full and self.outstanding < self.max_outstanding
class WorkloadModel:
def __init__(self, workload_models):
self.workload_models = workload_models
def priorities(self):
return list(self.workload_models.keys())
def next_request(self, time, priority):
return self.workload_models[priority].next_request(time)
def request_completed(self, request):
return self.workload_models[request.priority].request_completed(request)
class Distribution:
EXPONENTIAL = lambda x: numpy.random.exponential(x)
UNIFORM = lambda x: numpy.random.uniform(0, 2.0*x)
FIXED = lambda x: x
class BatchGenerator:
def __init__(self):
pass
def next_batch(self):
pass
class DistributionBatchGenerator(BatchGenerator):
def __init__(self, distribution, size):
BatchGenerator.__init__(self)
self.distribution = distribution
self.size = size
def next_batch(self):
return math.ceil(self.distribution(self.size))
class RequestGenerator:
def __init__(self):
pass
def next_request_interval(self, rate):
pass
class DistributionRequestGenerator(RequestGenerator):
def __init__(self, distribution):
RequestGenerator.__init__(self)
self.distribution = distribution
def next_request_interval(self, rate):
if rate == 0:
return 1e9
return self.distribution(1.0/rate)
predefined_workloads = {}
predefined_workloads['slow_exponential'] = WorkloadModel(
{
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.FixedRateModel(100),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.EXPONENTIAL),
max_outstanding=100
)
})
predefined_workloads['fixed_uniform'] = WorkloadModel(
{
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=10
),
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.FixedRateModel(95),
DistributionBatchGenerator(Distribution.FIXED, 10),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=200
),
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
rate_model.FixedRateModel(1),
DistributionBatchGenerator(Distribution.UNIFORM, 500),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=200
)
})
predefined_workloads['batch_starvation'] = WorkloadModel(
{
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
rate_model.FixedRateModel(1),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=10
),
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.IntervalRateModel([(0,50), (60,150), (120,90)]),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=200
),
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
rate_model.FixedRateModel(100),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=200
)
})
predefined_workloads['default_low_high_low'] = WorkloadModel(
{
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=10
),
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.IntervalRateModel([(0,100), (60,300), (120,100)]),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=200
),
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.UNIFORM),
max_outstanding=200
)
})
for rate in [83, 100, 180, 190, 200]:
predefined_workloads['default%d' % rate] = WorkloadModel(
{
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.FixedRateModel(rate),
DistributionBatchGenerator(Distribution.FIXED, 1),
DistributionRequestGenerator(Distribution.EXPONENTIAL),
max_outstanding=1000
)
})

View File

@ -20,7 +20,7 @@ Consequently, the special-key-space framework wants to integrate all client func
If your feature is exposing information to clients and the results are easily formatted as key-value pairs, then you can use special-key-space to implement your client function.
## How
If you choose to use, you need to implement a function class that inherits from `SpecialKeyRangeReadImpl`, which has an abstract method `Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr)`.
If you choose to use, you need to implement a function class that inherits from `SpecialKeyRangeReadImpl`, which has an abstract method `Future<RangeResult> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr)`.
This method can be treated as a callback, whose implementation details are determined by the developer.
Once you fill out the method, register the function class to the corresponding key range.
Below is a detailed example.
@ -38,10 +38,10 @@ public:
CountryToCapitalCity[LiteralStringRef("China")] = LiteralStringRef("Beijing");
}
// Implement the getRange interface
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw,
Future<RangeResult> getRange(ReadYourWritesTransaction* ryw,
KeyRangeRef kr) const override {
Standalone<RangeResultRef> result;
RangeResult result;
for (auto const& country : CountryToCapitalCity) {
// the registered range here: [\xff\xff/example/, \xff\xff/example/\xff]
Key keyWithPrefix = country.first.withPrefix(range.begin);
@ -71,7 +71,7 @@ ASSERT(res1.present() && res.getValue() == LiteralStringRef("Tokyo"));
// getRange
// Note: for getRange(key1, key2), both key1 and key2 should prefixed with \xff\xff
// something like getRange("normal_key", "\xff\xff/...") is not supported yet
Standalone<RangeResultRef> res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff")));
RangeResult res2 = wait(tr.getRange(LiteralStringRef("\xff\xff/example/U"), LiteralStringRef("\xff\xff/example/U\xff")));
// res2 should contain USA and UK
ASSERT(
res2.size() == 2 &&

View File

@ -799,3 +799,18 @@ Upgrading from Older Versions
-----------------------------
Upgrades from versions older than 5.0.0 are no longer supported.
Version-specific notes on downgrading
=====================================
In general, downgrades between non-patch releases (i.e. 6.2.x - 6.1.x) are not supported.
.. _downgrade-specific-version:
Downgrading from 6.3.13 - 6.2.33
--------------------------------
After upgrading from 6.2 to 6.3, the option of rolling back and downgrading to 6.2 is still possible, given that the following conditions are met:
* The 6.3 cluster cannot have ``TLogVersion`` greater than V4 (6.2).
* The 6.3 cluster cannot use storage engine types that are not ``ssd-1``, ``ssd-2``, or ``memory``.
* The 6.3 cluster must not have any key servers serialized with tag encoding. This condition can only be guaranteed if the ``TAG_ENCODE_KEY_SERVERS`` knob has never been changed to ``true`` on this cluster.

View File

@ -133,7 +133,7 @@ API versioning
Prior to including ``fdb_c.h``, you must define the ``FDB_API_VERSION`` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
#define FDB_API_VERSION 700
#define FDB_API_VERSION 710
#include <foundationdb/fdb_c.h>
.. function:: fdb_error_t fdb_select_api_version(int version)

View File

@ -148,7 +148,7 @@
.. |atomic-versionstamps-tuple-warning-value| replace::
At this time, versionstamped values are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages.
.. |api-version| replace:: 700
.. |api-version| replace:: 710
.. |streaming-mode-blurb1| replace::
When using |get-range-func| and similar interfaces, API clients can request large ranges of the database to iterate over. Making such a request doesn't necessarily mean that the client will consume all of the data in the range - sometimes the client doesn't know how far it intends to iterate in advance. FoundationDB tries to balance latency and bandwidth by requesting data for iteration in batches.

View File

@ -108,7 +108,7 @@ Opening a database
After importing the ``fdb`` module and selecting an API version, you probably want to open a :class:`Database` using :func:`open`::
import fdb
fdb.api_version(700)
fdb.api_version(710)
db = fdb.open()
.. function:: open( cluster_file=None, event_model=None )

View File

@ -93,7 +93,7 @@ Opening a database
After requiring the ``FDB`` gem and selecting an API version, you probably want to open a :class:`Database` using :func:`open`::
require 'fdb'
FDB.api_version 700
FDB.api_version 710
db = FDB.open
.. function:: open( cluster_file=nil ) -> Database

View File

@ -9,6 +9,14 @@ This document provides an overview of changes that an application developer may
For more details about API versions, see :ref:`api-versions`.
.. _api-version-upgrade-guide-710:
API version 710
===============
General
-------
.. _api-version-upgrade-guide-700:
API version 700

View File

@ -29,7 +29,7 @@ Before using the API, we need to specify the API version. This allows programs t
.. code-block:: go
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file <default-cluster-file>`.
@ -78,7 +78,7 @@ If this is all working, it looks like we are ready to start building a real appl
func main() {
// Different API versions may expose different runtime behaviors.
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
// Open the default database from the system cluster
db := fdb.MustOpenDefault()
@ -666,7 +666,7 @@ Here's the code for the scheduling tutorial:
}
func main() {
fdb.MustAPIVersion(700)
fdb.MustAPIVersion(710)
db := fdb.MustOpenDefault()
db.Options().SetTransactionTimeout(60000) // 60,000 ms = 1 minute
db.Options().SetTransactionRetryLimit(100)

View File

@ -30,7 +30,7 @@ Before using the API, we need to specify the API version. This allows programs t
private static final Database db;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
}
@ -66,7 +66,7 @@ If this is all working, it looks like we are ready to start building a real appl
private static final Database db;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
}
@ -441,7 +441,7 @@ Here's the code for the scheduling tutorial:
private static final Database db;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
db.options().setTransactionTimeout(60000); // 60,000 ms = 1 minute
db.options().setTransactionRetryLimit(100);

View File

@ -23,7 +23,7 @@ Open a Ruby interactive interpreter and import the FoundationDB API module::
Before using the API, we need to specify the API version. This allows programs to maintain compatibility even if the API is modified in future versions::
> FDB.api_version 700
> FDB.api_version 710
=> nil
Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file <default-cluster-file>`. ::
@ -46,7 +46,7 @@ If this is all working, it looks like we are ready to start building a real appl
.. code-block:: ruby
require 'fdb'
FDB.api_version 700
FDB.api_version 710
@db = FDB.open
@db['hello'] = 'world'
print 'hello ', @db['hello']
@ -373,7 +373,7 @@ Here's the code for the scheduling tutorial:
require 'fdb'
FDB.api_version 700
FDB.api_version 710
####################################
## Initialization ##

View File

@ -30,7 +30,7 @@ Open a Python interactive interpreter and import the FoundationDB API module::
Before using the API, we need to specify the API version. This allows programs to maintain compatibility even if the API is modified in future versions::
>>> fdb.api_version(700)
>>> fdb.api_version(710)
Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file <default-cluster-file>`. ::
@ -48,7 +48,7 @@ When this command returns without exception, the modification is durably stored
If this is all working, it looks like we are ready to start building a real application. For reference, here's the full code for "hello world"::
import fdb
fdb.api_version(700)
fdb.api_version(710)
db = fdb.open()
db[b'hello'] = b'world'
print 'hello', db[b'hello']
@ -91,7 +91,7 @@ FoundationDB includes a few tools that make it easy to model data using this app
opening a :ref:`directory <developer-guide-directories>` in the database::
import fdb
fdb.api_version(700)
fdb.api_version(710)
db = fdb.open()
scheduling = fdb.directory.create_or_open(db, ('scheduling',))
@ -337,7 +337,7 @@ Here's the code for the scheduling tutorial::
import fdb
import fdb.tuple
fdb.api_version(700)
fdb.api_version(710)
####################################

View File

@ -69,7 +69,7 @@ Heres a basic implementation of the recipe.
private static final long EMPTY_ARRAY = -1;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
docSpace = new Subspace(Tuple.from("D"));
}

View File

@ -171,6 +171,18 @@
"p99":0.0,
"p99.9":0.0
},
"commit_batching_window_size":{
"count":0,
"min":0.0,
"max":0.0,
"median":0.0,
"mean":0.0,
"p25":0.0,
"p90":0.0,
"p95":0.0,
"p99":0.0,
"p99.9":0.0
},
"grv_latency_bands":{ // How many GRV requests belong to the latency (in seconds) band (e.g., How many requests belong to [0.01,0.1] latency band). The key is the upper bound of the band and the lower bound is the next smallest band (or 0, if none). Example: {0.01: 27, 0.1: 18, 1: 1, inf: 98,filtered: 10}, we have 18 requests in [0.01, 0.1) band.
"$map_key=upperBoundOfBand": 1
},

View File

@ -74,7 +74,7 @@ Heres a simple implementation of multimaps with multisets as described:
private static final int N = 100;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
multi = new Subspace(Tuple.from("M"));
}

View File

@ -74,7 +74,7 @@ Here's a basic implementation of the model:
private static final Random randno;
static{
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
pq = new Subspace(Tuple.from("P"));

View File

@ -73,7 +73,7 @@ The following is a simple implementation of the basic pattern:
private static final Random randno;
static{
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
queue = new Subspace(Tuple.from("Q"));
randno = new Random();

View File

@ -8,6 +8,7 @@ Release Notes
* Fix backup agent stall when writing to local filesystem with slow metadata operations. `(PR #4428) <https://github.com/apple/foundationdb/pull/4428>`_
* Backup agent no longer uses 4k block caching layer on local output files so that write operations are larger. `(PR #4428) <https://github.com/apple/foundationdb/pull/4428>`_
* Fix accounting error that could cause commits to incorrectly fail with ``proxy_memory_limit_exceeded``. `(PR #4529) <https://github.com/apple/foundationdb/pull/4529>`_
* Added support for downgrades from FDB version 6.3. For more details, see the :ref:`administration notes <downgrade-specific-version>`. `(PR #4673) <https://github.com/apple/foundationdb/pull/4673>`_ `(PR #4469) <https://github.com/apple/foundationdb/pull/4469>`_
6.2.32
======

View File

@ -2,6 +2,10 @@
Release Notes
#############
6.3.13
======
* The multi-version client now requires at most two client connections with version 6.2 or larger, regardless of how many external clients are configured. Clients older than 6.2 will continue to create an additional connection each. `(PR #4667) <https://github.com/apple/foundationdb/pull/4667>`_
6.3.12
======
* Change the default for --knob_tls_server_handshake_threads to 64. The previous was 1000. This avoids starting 1000 threads by default, but may adversely affect recovery time for large clusters using tls. Users with large tls clusters should consider explicitly setting this knob in their foundationdb.conf file. `(PR #4421) <https://github.com/apple/foundationdb/pull/4421>`_

View File

@ -15,7 +15,8 @@ Features
Performance
-----------
* Increased performance of dr_agent when copying the mutation log. The ``COPY_LOG_BLOCK_SIZE``, ``COPY_LOG_BLOCKS_PER_TASK``, ``COPY_LOG_PREFETCH_BLOCKS``, ``COPY_LOG_READ_AHEAD_BYTES`` and ``COPY_LOG_TASK_DURATION_NANOS`` knobs can be set. `(PR 3436) <https://github.com/apple/foundationdb/pull/3436>`_
* Increased performance of dr_agent when copying the mutation log. The ``COPY_LOG_BLOCK_SIZE``, ``COPY_LOG_BLOCKS_PER_TASK``, ``COPY_LOG_PREFETCH_BLOCKS``, ``COPY_LOG_READ_AHEAD_BYTES`` and ``COPY_LOG_TASK_DURATION_NANOS`` knobs can be set. `(PR #3436) <https://github.com/apple/foundationdb/pull/3436>`_
* Reduced the number of connections required by the multi-version client when loading external clients. When connecting to 7.0 clusters, only one connection with version 6.2 or larger will be used. With older clusters, at most two connections with version 6.2 or larger will be used. Clients older than version 6.2 will continue to create an additional connection each. `(PR #4667) <https://github.com/apple/foundationdb/pull/4667>`_
Reliability
-----------
@ -29,7 +30,7 @@ Fixes
Status
------
* Added ``commit_batching_window_size`` to the proxy roles section of status to record statistics about commit batching window size on each proxy. `(PR #4735) <https://github.com/apple/foundationdb/pull/4735>`_
Bindings

View File

@ -87,7 +87,7 @@ In this example, were storing user data based on user ID but sometimes need t
private static final Subspace index;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
main = new Subspace(Tuple.from("user"));
index = new Subspace(Tuple.from("zipcode_index"));

View File

@ -62,7 +62,7 @@ Heres a simple implementation of the basic table pattern:
private static final Subspace colIndex;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
table = new Subspace(Tuple.from("T"));
rowIndex = table.subspace(Tuple.from("R"));

View File

@ -77,7 +77,7 @@ Heres the basic pattern:
private static final Subspace vector;
static {
fdb = FDB.selectAPIVersion(700);
fdb = FDB.selectAPIVersion(710);
db = fdb.open();
vector = new Subspace(Tuple.from("V"));
}

View File

@ -366,7 +366,7 @@ ACTOR Future<Void> fdbClient() {
// 3. write 10 values in [k, k+100]
beginIdx = deterministicRandom()->randomInt(0, 1e8 - 100);
startKey = keyPrefix + std::to_string(beginIdx);
Standalone<RangeResultRef> range = wait(tx.getRange(KeyRangeRef(startKey, endKey), 100));
RangeResult range = wait(tx.getRange(KeyRangeRef(startKey, endKey), 100));
for (int i = 0; i < 10; ++i) {
Key k = Key(keyPrefix + std::to_string(beginIdx + deterministicRandom()->randomInt(0, 100)));
tx.set(k, LiteralStringRef("foo"));

View File

@ -1576,7 +1576,7 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
state Reference<ReadYourWritesTransaction> tr2(new ReadYourWritesTransaction(dest));
tr2->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr2->setOption(FDBTransactionOptions::LOCK_AWARE);
state Standalone<RangeResultRef> tagNames = wait(tr2->getRange(dba.tagNames.range(), 10000, snapshot));
state RangeResult tagNames = wait(tr2->getRange(dba.tagNames.range(), 10000, snapshot));
state std::vector<Future<Optional<Key>>> backupVersion;
state std::vector<Future<EBackupState>> backupStatus;
state std::vector<Future<int64_t>> tagRangeBytesDR;
@ -1638,7 +1638,7 @@ ACTOR Future<Void> cleanupStatus(Reference<ReadYourWritesTransaction> tr,
std::string name,
std::string id,
int limit = 1) {
state Standalone<RangeResultRef> docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
state RangeResult docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
state bool readMore = false;
state int i;
for (i = 0; i < docs.size(); ++i) {
@ -1667,7 +1667,7 @@ ACTOR Future<Void> cleanupStatus(Reference<ReadYourWritesTransaction> tr,
}
if (readMore) {
limit = 10000;
Standalone<RangeResultRef> docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
RangeResult docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
docs = std::move(docs2);
readMore = false;
}
@ -1684,7 +1684,7 @@ ACTOR Future<json_spirit::mObject> getLayerStatus(Database src, std::string root
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
state Standalone<RangeResultRef> kvPairs =
state RangeResult kvPairs =
wait(tr.getRange(KeyRangeRef(rootKey, strinc(rootKey)), GetRangeLimits::ROW_LIMIT_UNLIMITED));
json_spirit::mObject statusDoc;
JSONDoc modifier(statusDoc);
@ -4246,4 +4246,4 @@ int main(int argc, char* argv[]) {
}
flushAndExit(status);
}
}

View File

@ -3633,7 +3633,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
if (tokencmp(tokens[0], "kill")) {
getTransaction(db, tr, options, intrans);
if (tokens.size() == 1) {
Standalone<RangeResultRef> kvs = wait(
RangeResult kvs = wait(
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY)));
@ -3700,7 +3700,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
if (tokencmp(tokens[0], "suspend")) {
getTransaction(db, tr, options, intrans);
if (tokens.size() == 1) {
Standalone<RangeResultRef> kvs = wait(
RangeResult kvs = wait(
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY)));
@ -3911,7 +3911,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
continue;
}
getTransaction(db, tr, options, intrans);
Standalone<RangeResultRef> kvs = wait(
RangeResult kvs = wait(
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY)));
@ -3940,7 +3940,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
continue;
}
getTransaction(db, tr, options, intrans);
Standalone<RangeResultRef> kvs = wait(makeInterruptable(
RangeResult kvs = wait(makeInterruptable(
tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY)));
@ -4019,7 +4019,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
continue;
}
getTransaction(db, tr, options, intrans);
Standalone<RangeResultRef> kvs = wait(
RangeResult kvs = wait(
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY)));
@ -4061,7 +4061,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
if (tokencmp(tokens[0], "expensive_data_check")) {
getTransaction(db, tr, options, intrans);
if (tokens.size() == 1) {
Standalone<RangeResultRef> kvs = wait(
RangeResult kvs = wait(
makeInterruptable(tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
LiteralStringRef("\xff\xff/worker_interfaces0")),
CLIENT_KNOBS->TOO_MANY)));
@ -4177,7 +4177,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
endKey = strinc(tokens[1]);
}
Standalone<RangeResultRef> kvs = wait(makeInterruptable(
RangeResult kvs = wait(makeInterruptable(
getTransaction(db, tr, options, intrans)->getRange(KeyRangeRef(tokens[1], endKey), limit)));
printf("\nRange limited to %d keys\n", limit);

Some files were not shown because too many files have changed in this diff Show More