diff --git a/Makefile b/Makefile index de59197f71..cf8fcb228c 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ else ifeq ($(PLATFORM),Darwin) CXX := /usr/bin/clang CFLAGS += -mmacosx-version-min=10.7 -stdlib=libc++ - CXXFLAGS += -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option + CXXFLAGS += -mmacosx-version-min=10.7 -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option .LIBPATTERNS := lib%.dylib lib%.a diff --git a/bindings/bindingtester/known_testers.py b/bindings/bindingtester/known_testers.py index d00f2faf55..a8d2c6b9a1 100644 --- a/bindings/bindingtester/known_testers.py +++ b/bindings/bindingtester/known_testers.py @@ -63,6 +63,6 @@ testers = { 'java_async' : Tester('java', _java_cmd + 'AsyncStackTester', 2040, 500, MAX_API_VERSION), 'java_completable' : Tester('java', _java_completable_cmd + 'StackTester', 2040, 500, MAX_API_VERSION), 'java_completable_async' : Tester('java', _java_completable_cmd + 'AsyncStackTester', 2040, 500, MAX_API_VERSION), - 'go' : Tester('go', _absolute_path('go/bin/_stacktester'), 63, 200, MAX_API_VERSION), + 'go' : Tester('go', _absolute_path('go/build/bin/_stacktester'), 63, 200, MAX_API_VERSION), 'flow' : Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION), } diff --git a/bindings/bindingtester/run_binding_tester.sh b/bindings/bindingtester/run_binding_tester.sh old mode 100755 new mode 100644 diff --git a/bindings/c/local.mk b/bindings/c/local.mk index 4df783d5c1..baa757c16a 100644 --- a/bindings/c/local.mk +++ b/bindings/c/local.mk @@ -23,14 +23,18 @@ fdb_c_CFLAGS := $(fdbclient_CFLAGS) fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS) fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a +fdb_c_tests_LIBS := -Llib -lfdb_c +fdb_c_tests_HEADERS := -Ibindings/c ifeq ($(PLATFORM),linux) fdb_c_LIBS += lib/libstdc++.a -lm -lpthread -lrt -ldl fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete + fdb_c_tests_LIBS += -lpthread endif ifeq ($(PLATFORM),osx) fdb_c_LDFLAGS += -lc++ -Xlinker -exported_symbols_list -Xlinker bindings/c/fdb_c.symbols + fdb_c_tests_LIBS += -lpthread lib/libfdb_c.dylib: bindings/c/fdb_c.symbols @@ -74,3 +78,24 @@ fdb_c_BUILD_SOURCES += bindings/c/fdb_c.g.S bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexillographer/fdb.options $(ALL_MAKEFILES) @echo "Building $@" @$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options c $@ + +bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c + @echo "Compiling fdb_c_performance_test" + @$(CC) $(CFLAGS) $(fdb_c_tests_LIBS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c + +bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c + @echo "Compiling fdb_c_ryw_benchmark" + @$(CC) $(CFLAGS) $(fdb_c_tests_LIBS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c + +packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark + @echo "Packaging $@" + @rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM) + @mkdir -p packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin + @cp bin/fdb_c_performance_test packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin + @cp bin/fdb_c_ryw_benchmark packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin + @tar -C packages -czvf $@ fdb-c-tests-$(VERSION)-$(PLATFORM) > /dev/null + @rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM) + +fdb_c_tests: packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz + +packages: fdb_c_tests diff --git a/bindings/c/test/performance_test.c b/bindings/c/test/performance_test.c new file mode 100644 index 0000000000..aef1926f77 --- /dev/null +++ b/bindings/c/test/performance_test.c @@ -0,0 +1,623 @@ +/* + * performance_test.c + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "test.h" +#include +#include + +#include +#include + +pthread_t netThread; + +int numKeys = 1000000; +int keySize = 16; +uint8_t** keys = NULL; +int valueSize = 100; +uint8_t *valueStr = NULL; + +fdb_error_t waitError(FDBFuture *f) { + fdb_error_t blockError = fdb_future_block_until_ready(f); + if(!blockError) { + return fdb_future_get_error(f); + } else { + return blockError; + } +} + +struct RunResult run(struct ResultSet *rs, FDBDatabase *db, struct RunResult (*func)(struct ResultSet*, FDBTransaction*)) { + FDBTransaction *tr = NULL; + checkError(fdb_database_create_transaction(db, &tr), "create transaction", rs); + fdb_error_t e = fdb_database_create_transaction(db, &tr); + checkError(e, "create transaction", rs); + + while(1) { + struct RunResult r = func(rs, tr); + e = r.e; + if(!e) { + FDBFuture *f = fdb_transaction_commit(tr); + e = waitError(f); + fdb_future_destroy(f); + } + + if(e) { + FDBFuture *f = fdb_transaction_on_error(tr, e); + fdb_error_t retryE = waitError(f); + fdb_future_destroy(f); + if (retryE) { + return (struct RunResult) {0, retryE}; + } + } else { + return r; + } + } + + return RES(0, 4100); // internal_error ; we should never get here +} + +int runTest(struct RunResult (*testFxn)(struct ResultSet*, FDBTransaction*), FDBDatabase *db, struct ResultSet *rs, const char *kpiName) { + int numRuns = 25; + int *results = malloc(sizeof(int)*numRuns); + int i = 0; + for(; i < numRuns; ++i) { + struct RunResult res = run(rs, db, testFxn); + if(res.e) { + logError(res.e, kpiName, rs); + free(results); + return 0; + } + results[i] = res.res; + if(results[i] < 0) { + free(results); + return -1; + } + } + + int result = median(results, numRuns); + free(results); + + addKpi(rs, kpiName, result, "keys/s"); + + return result; +} + +int runTestDb(struct RunResult (*testFxn)(struct ResultSet*, FDBDatabase*), FDBDatabase *db, struct ResultSet *rs, const char *kpiName) { + int numRuns = 25; + int *results = malloc(sizeof(int)*numRuns); + int i = 0; + for(; i < numRuns; ++i) { + struct RunResult res = testFxn(rs, db); + if(res.e) { + logError(res.e, kpiName, rs); + free(results); + return 0; + } + results[i] = res.res; + if(results[i] < 0) { + free(results); + return -1; + } + } + + int result = median(results, numRuns); + free(results); + + addKpi(rs, kpiName, result, "keys/s"); + + return result; +} + + +struct RunResult clearAll(struct ResultSet *rs, FDBTransaction *tr) { + fdb_transaction_clear_range(tr, (uint8_t*)"", 0, (uint8_t*)"\xff", 1); + return RES(0, 0); +} + +uint32_t start = 0; +uint32_t stop = 0; +struct RunResult insertRange(struct ResultSet *rs, FDBTransaction *tr) { + int i; + for(i = start; i < stop; i++) { + fdb_transaction_set(tr, keys[i], keySize, valueStr, valueSize); + } + return RES(0, 0); +} + +void insertData(struct ResultSet *rs, FDBDatabase *db) { + checkError(run(rs, db, &clearAll).e, "clearing database", rs); + + // TODO: Do this asynchronously. + start = 0; + while(start < numKeys) { + stop = start + 1000; + if(stop > numKeys) stop = numKeys; + checkError(run(rs, db, &insertRange).e, "inserting data range", rs); + start = stop; + } +} + +fdb_error_t setRetryLimit(struct ResultSet *rs, FDBTransaction *tr, uint64_t limit) { + return fdb_transaction_set_option(tr, FDB_TR_OPTION_RETRY_LIMIT, (const uint8_t*)&limit, sizeof(uint64_t)); +} + +uint32_t FUTURE_LATENCY_COUNT = 100000; +const char *FUTURE_LATENCY_KPI = "C future throughput (local client)"; +struct RunResult futureLatency(struct ResultSet *rs, FDBTransaction *tr) { + fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs); + if(e) return RES(0, e); + + FDBFuture *f = fdb_transaction_get_read_version(tr); + e = waitError(f); + fdb_future_destroy(f); + maybeLogError(e, "getting initial read version", rs); + if(e) return RES(0, e); + + double start = getTime(); + int i; + for(i = 0; i < FUTURE_LATENCY_COUNT; i++) { + FDBFuture *f = fdb_transaction_get_read_version(tr); + e = waitError(f); + fdb_future_destroy(f); + maybeLogError(e, "getting read version", rs); + if(e) return RES(0, e); + } + double end = getTime(); + + return RES(FUTURE_LATENCY_COUNT/(end - start), 0); +} + +uint32_t CLEAR_COUNT = 100000; +const char *CLEAR_KPI = "C clear throughput (local client)"; +struct RunResult clear(struct ResultSet *rs, FDBTransaction *tr) { + double start = getTime(); + int i; + for(i = 0; i < CLEAR_COUNT; i++) { + int k = ((uint64_t)rand()) % numKeys; + fdb_transaction_clear(tr, keys[k], keySize); + } + double end = getTime(); + + fdb_transaction_reset(tr); // Don't actually clear things. + return RES(CLEAR_COUNT/(end - start), 0); +} + +uint32_t CLEAR_RANGE_COUNT = 100000; +const char *CLEAR_RANGE_KPI = "C clear range throughput (local client)"; +struct RunResult clearRange(struct ResultSet *rs, FDBTransaction *tr) { + double start = getTime(); + int i; + for(i = 0; i < CLEAR_RANGE_COUNT; i++) { + int k = ((uint64_t)rand()) % (numKeys - 1); + fdb_transaction_clear_range(tr, keys[k], keySize, keys[k+1], keySize); + } + double end = getTime(); + + fdb_transaction_reset(tr); // Don't actually clear things. + return RES(CLEAR_RANGE_COUNT/(end - start), 0); +} + +uint32_t SET_COUNT = 100000; +const char *SET_KPI = "C set throughput (local client)"; +struct RunResult set(struct ResultSet *rs, FDBTransaction *tr) { + double start = getTime(); + int i; + for(i = 0; i < SET_COUNT; i++) { + int k = ((uint64_t)rand()) % numKeys; + fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize); + } + double end = getTime(); + + fdb_transaction_reset(tr); // Don't actually set things. + return RES(SET_COUNT/(end - start), 0); +} + +uint32_t PARALLEL_GET_COUNT = 10000; +const char *PARALLEL_GET_KPI = "C parallel get throughput (local client)"; +struct RunResult parallelGet(struct ResultSet *rs, FDBTransaction *tr) { + fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs); + if(e) return RES(0, e); + + FDBFuture **futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * PARALLEL_GET_COUNT); + + double start = getTime(); + + int i; + for(i = 0; i < PARALLEL_GET_COUNT; i++) { + int k = ((uint64_t)rand()) % numKeys; + futures[i] = fdb_transaction_get(tr, keys[k], keySize, 0); + } + + fdb_bool_t present; + uint8_t const *outValue; + int outValueLength; + + for(i = 0; i < PARALLEL_GET_COUNT; i++) { + e = maybeLogError(fdb_future_block_until_ready(futures[i]), "waiting for get future", rs); + if(e) { + fdb_future_destroy(futures[i]); + return RES(0, e); + } + + e = maybeLogError(fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs); + if(e) { + fdb_future_destroy(futures[i]); + return RES(0, e); + } + + fdb_future_destroy(futures[i]); + } + + double end = getTime(); + + free(futures); + return RES(PARALLEL_GET_COUNT/(end - start), 0); +} + +uint32_t ALTERNATING_GET_SET_COUNT = 2000; +const char *ALTERNATING_GET_SET_KPI = "C alternating get set throughput (local client)"; +struct RunResult alternatingGetSet(struct ResultSet *rs, FDBTransaction *tr) { + fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs); + if(e) return RES(0, e); + + FDBFuture **futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * ALTERNATING_GET_SET_COUNT); + + double start = getTime(); + + int i; + for(i = 0; i < ALTERNATING_GET_SET_COUNT; i++) { + int k = ((uint64_t)rand()) % numKeys; + fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize); + futures[i] = fdb_transaction_get(tr, keys[k], keySize, 0); + } + + fdb_bool_t present; + uint8_t const *outValue; + int outValueLength; + + for(i = 0; i < ALTERNATING_GET_SET_COUNT; i++) { + e = maybeLogError(fdb_future_block_until_ready(futures[i]), "waiting for get future", rs); + if(e) { + fdb_future_destroy(futures[i]); + return RES(0, e); + } + + e = maybeLogError(fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs); + if(e) { + fdb_future_destroy(futures[i]); + return RES(0, e); + } + + fdb_future_destroy(futures[i]); + } + + double end = getTime(); + + free(futures); + return RES(ALTERNATING_GET_SET_COUNT/(end - start), 0); +} + +uint32_t SERIAL_GET_COUNT = 2000; +const char *SERIAL_GET_KPI = "C serial get throughput (local client)"; +struct RunResult serialGet(struct ResultSet *rs, FDBTransaction *tr) { + fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs); + if(e) return RES(0, e); + + int i; + uint32_t *keyIndices = (uint32_t*)malloc((sizeof(uint32_t)) * SERIAL_GET_COUNT); + + if(SERIAL_GET_COUNT > numKeys/2) { + for(i = 0; i < SERIAL_GET_COUNT; i++) { + keyIndices[i] = ((uint64_t)rand()) % numKeys; + } + } else { + for(i = 0; i < SERIAL_GET_COUNT; i++) { + while(1) { + // Yes, this is a linear scan. This happens outside + // the part we are measuring. + uint32_t index = ((uint64_t)rand()) % numKeys; + int j; + fdb_bool_t found = 0; + for(j = 0; j < i; j++) { + if(keyIndices[j] == index) { + found = 1; + break; + } + } + + if(!found) { + keyIndices[i] = index; + break; + } + } + } + } + + double start = getTime(); + + fdb_bool_t present; + uint8_t const *outValue; + int outValueLength; + + for(i = 0; i < SERIAL_GET_COUNT; i++) { + FDBFuture *f = fdb_transaction_get(tr, keys[keyIndices[i]], keySize, 0); + fdb_error_t e = maybeLogError(fdb_future_block_until_ready(f), "getting key in serial", rs); + if(e) { + free(keyIndices); + fdb_future_destroy(f); + return RES(0, e); + } + + e = maybeLogError(fdb_future_get_value(f, &present, &outValue, &outValueLength), "getting future value", rs); + fdb_future_destroy(f); + if(e) { + free(keyIndices); + return RES(0, e); + } + } + + double end = getTime(); + + free(keyIndices); + return RES(SERIAL_GET_COUNT/(end - start), 0); +} + +uint32_t GET_RANGE_COUNT = 100000; +const char *GET_RANGE_KPI = "C get range throughput (local client)"; +struct RunResult getRange(struct ResultSet *rs, FDBTransaction *tr) { + fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs); + if(e) return RES(0, e); + + uint32_t startKey = ((uint64_t)rand()) % (numKeys - GET_RANGE_COUNT - 1); + + double start = getTime(); + + const FDBKeyValue *outKv; + int outCount; + fdb_bool_t outMore = 1; + int totalOut = 0; + int iteration = 0; + + FDBFuture *f = fdb_transaction_get_range(tr, + keys[startKey], keySize, 1, 0, + keys[startKey + GET_RANGE_COUNT], keySize, 1, 0, + 0, 0, + FDB_STREAMING_MODE_WANT_ALL, ++iteration, 0, 0); + + while(outMore) { + e = maybeLogError(fdb_future_block_until_ready(f), "getting range", rs); + if(e) { + fdb_future_destroy(f); + return RES(0, e); + } + + e = maybeLogError(fdb_future_get_keyvalue_array(f, &outKv, &outCount, &outMore), "reading range array", rs); + if(e) { + fdb_future_destroy(f); + return RES(0, e); + } + + totalOut += outCount; + + if(outMore) { + FDBFuture *f2 = fdb_transaction_get_range(tr, + outKv[outCount - 1].key, outKv[outCount - 1].key_length, 1, 1, + keys[startKey + GET_RANGE_COUNT], keySize, 1, 0, + 0, 0, + FDB_STREAMING_MODE_WANT_ALL, ++iteration, 0, 0); + fdb_future_destroy(f); + f = f2; + } + } + + if(totalOut != GET_RANGE_COUNT) { + char *msg = (char*)malloc((sizeof(char)) * 200); + sprintf(msg, "verifying out count (%d != %d)", totalOut, GET_RANGE_COUNT); + logError(4100, msg, rs); + free(msg); + fdb_future_destroy(f); + return RES(0, 4100); + } + if(outMore) { + logError(4100, "verifying no more in range", rs); + fdb_future_destroy(f); + return RES(0, 4100); + } + fdb_future_destroy(f); + + double end = getTime(); + + return RES(GET_RANGE_COUNT/(end - start), 0); +} + +uint32_t GET_KEY_COUNT = 2000; +const char *GET_KEY_KPI = "C get key throughput (local client)"; +struct RunResult getKey(struct ResultSet *rs, FDBTransaction *tr) { + fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs); + if(e) return RES(0, e); + + double start = getTime(); + + fdb_bool_t present; + uint8_t const *outValue; + int outValueLength; + + int i; + for(i = 0; i < GET_KEY_COUNT; i++) { + int key = ((uint64_t)rand()) % numKeys; + int offset = (((uint64_t)rand()) % 21) - 10; + FDBFuture *f = fdb_transaction_get_key(tr, keys[key], keySize, 1, offset, 0); + + e = maybeLogError(fdb_future_block_until_ready(f), "waiting for get key", rs); + if(e) { + fdb_future_destroy(f); + return RES(0, e); + } + + e = maybeLogError(fdb_future_get_value(f, &present, &outValue, &outValueLength), "getting future value", rs); + fdb_future_destroy(f); + if(e) { + return RES(0, e); + } + } + + double end = getTime(); + + return RES(GET_KEY_COUNT/(end - start), 0); +} + +uint32_t GET_SINGLE_KEY_RANGE_COUNT = 2000; +const char *GET_SINGLE_KEY_RANGE_KPI = "C get_single_key_range throughput (local client)"; +struct RunResult getSingleKeyRange(struct ResultSet *rs, FDBTransaction *tr) { + fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs); + if(e) return RES(0, e); + + double start = getTime(); + + const FDBKeyValue *outKv; + int outCount; + fdb_bool_t outMore; + + int i; + for(i = 0; i < GET_SINGLE_KEY_RANGE_COUNT; i++) { + int key = ((uint64_t)rand()) % (numKeys - 1); + FDBFuture *f = fdb_transaction_get_range(tr, + keys[key], keySize, 1, 0, + keys[key + 1], keySize, 1, 0, + 0, 0, + FDB_STREAMING_MODE_WANT_ALL, 1, 0, 0); + + e = maybeLogError(fdb_future_block_until_ready(f), "waiting for single key range", rs); + if(e) { + fdb_future_destroy(f); + return RES(0, e); + } + + e = maybeLogError(fdb_future_get_keyvalue_array(f, &outKv, &outCount, &outMore), "reading single key range array", rs); + if(e) { + fdb_future_destroy(f); + return RES(0, e); + } + + if(outCount != 1) { + logError(4100, "non-1 number of keys returned in single key range read", rs); + fdb_future_destroy(f); + return RES(0, 4100); + } + if(outMore) { + logError(4100, "more keys to read in single key range read", rs); + fdb_future_destroy(f); + return RES(0, 4100); + } + + fdb_future_destroy(f); + } + + double end = getTime(); + + return RES(GET_SINGLE_KEY_RANGE_COUNT/(end - start), 0); +} + +struct RunResult singleKey(struct ResultSet *rs, FDBTransaction *tr) { + int k = ((uint64_t)rand()) % numKeys; + fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize); + return RES(0, 0); +} + +uint32_t WRITE_TRANSACTION_COUNT = 1000; +const char *WRITE_TRANSACTION_KPI = "C write_transaction throughput (local client)"; +struct RunResult writeTransaction(struct ResultSet *rs, FDBDatabase *db) { + double start = getTime(); + + int i; + for(i = 0; i < WRITE_TRANSACTION_COUNT; i++) { + struct RunResult res = run(rs, db, &singleKey); + if(res.e) return res; + } + + double end = getTime(); + + return RES(WRITE_TRANSACTION_COUNT/(end - start), 0); +} + +void runTests(struct ResultSet *rs) { + FDBDatabase *db = openDatabase(rs, &netThread); + + printf("Loading database...\n"); + insertData(rs, db); + + printf("future_latency\n"); + runTest(&futureLatency, db, rs, FUTURE_LATENCY_KPI); + + printf("clear\n"); + runTest(&clear, db, rs, CLEAR_KPI); + + printf("clear_range\n"); + runTest(&clearRange, db, rs, CLEAR_RANGE_KPI); + + printf("set\n"); + runTest(&set, db, rs, SET_KPI); + + printf("parallel_get\n"); + runTest(¶llelGet, db, rs, PARALLEL_GET_KPI); + + printf("alternating_get_set\n"); + runTest(&alternatingGetSet, db, rs, ALTERNATING_GET_SET_KPI); + + printf("serial_get\n"); + runTest(&serialGet, db, rs, SERIAL_GET_KPI); + + printf("get_range\n"); + runTest(&getRange, db, rs, GET_RANGE_KPI); + + printf("get_key\n"); + runTest(&getKey, db, rs, GET_KEY_KPI); + + printf("get_single_key_range\n"); + runTest(&getSingleKeyRange, db, rs, GET_SINGLE_KEY_RANGE_KPI); + + printf("write_transaction\n"); + runTestDb(&writeTransaction, db, rs, WRITE_TRANSACTION_KPI); + + fdb_database_destroy(db); + fdb_stop_network(); +} + +int main(int argc, char **argv) { + srand(time(NULL)); + struct ResultSet *rs = newResultSet(); + checkError(fdb_select_api_version(500), "select API version", rs); + printf("Running performance test at client version: %s\n", fdb_get_client_version()); + + valueStr = (uint8_t*)malloc((sizeof(uint8_t))*valueSize); + int i; + for(i = 0; i < valueSize; i++) { + valueStr[i] = (uint8_t)'x'; + } + + keys = generateKeys(numKeys, keySize); + runTests(rs); + writeResultSet(rs); + + free(valueStr); + freeResultSet(rs); + freeKeys(keys, numKeys); + + return 0; +} diff --git a/bindings/c/test/ryw_benchmark.c b/bindings/c/test/ryw_benchmark.c new file mode 100644 index 0000000000..1a780bf1b3 --- /dev/null +++ b/bindings/c/test/ryw_benchmark.c @@ -0,0 +1,257 @@ +/* + * ryw_benchmark.c + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "test.h" +#include +#include + +#include +#include +#include +#include +#include + +pthread_t netThread; + +int numKeys = 10000; +int keySize = 16; +uint8_t** keys; + +void insertData(FDBTransaction *tr) { + fdb_transaction_clear_range(tr, (uint8_t*)"", 0, (uint8_t*)"\xff", 1); + + uint8_t *v = (uint8_t*)"foo"; + uint32_t i; + for(i = 0; i <= numKeys; ++i) { + fdb_transaction_set(tr, keys[i], keySize, v, 3); + } +} + +int runTest(int (*testFxn)(FDBTransaction*, struct ResultSet*), FDBTransaction *tr, struct ResultSet *rs, const char *kpiName) { + int numRuns = 25; + int *results = malloc(sizeof(int)*numRuns); + int i = 0; + for(; i < numRuns; ++i) { + results[i] = testFxn(tr, rs); + if(results[i] < 0) { + free(results); + return -1; + } + } + + int result = median(results, numRuns); + free(results); + + addKpi(rs, kpiName, result, "keys/s"); + + return result; +} + +int getSingle(FDBTransaction *tr, struct ResultSet *rs) { + int present; + uint8_t const *value; + int length; + int i; + + double start = getTime(); + for(i = 0; i < numKeys; ++i) { + FDBFuture *f = fdb_transaction_get(tr, keys[5001], keySize, 0); + if(getError(fdb_future_block_until_ready(f), "GetSingle (block for get)", rs)) return -1; + if(getError(fdb_future_get_value(f, &present, &value, &length), "GetSingle (get result)", rs)) return -1; + fdb_future_destroy(f); + } + double end = getTime(); + + return numKeys / (end - start); +} + +int getManySequential(FDBTransaction *tr, struct ResultSet *rs) { + int present; + uint8_t const *value; + int length; + int i; + + double start = getTime(); + for(i = 0; i < numKeys; ++i) { + FDBFuture *f = fdb_transaction_get(tr, keys[i], keySize, 0); + if(getError(fdb_future_block_until_ready(f), "GetManySequential (block for get)", rs)) return -1; + if(getError(fdb_future_get_value(f, &present, &value, &length), "GetManySequential (get result)", rs)) return -1; + fdb_future_destroy(f); + } + double end = getTime(); + + return numKeys / (end - start); +} + +int getRangeBasic(FDBTransaction *tr, struct ResultSet *rs) { + int count; + const FDBKeyValue *kvs; + int more; + int i; + + double start = getTime(); + for(i = 0; i < 100; ++i) { + FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0); + + if(getError(fdb_future_block_until_ready(f), "GetRangeBasic (block for get range)", rs)) return -1; + if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "GetRangeBasic (get range results)", rs)) return -1; + + if(count != numKeys) { + fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys); + addError(rs, "GetRangeBasic bad count"); + return -1; + } + } + double end = getTime(); + + return 100 * numKeys / (end - start); +} + +int singleClearGetRange(FDBTransaction *tr, struct ResultSet *rs) { + int count; + const FDBKeyValue *kvs; + int more; + int i; + + for(i = 0; i < numKeys; i+=2) { + fdb_transaction_clear(tr, keys[i], keySize); + } + + double start = getTime(); + for(i = 0; i < 100; ++i) { + FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0); + + if(getError(fdb_future_block_until_ready(f), "SingleClearGetRange (block for get range)", rs)) return -1; + if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "SingleClearGetRange (get range results)", rs)) return -1; + + fdb_future_destroy(f); + + if(count != numKeys/2) { + fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys); + addError(rs, "SingleClearGetRange bad count"); + return -1; + } + } + double end = getTime(); + + insertData(tr); + return 100 * numKeys / 2 / (end - start); +} + +int clearRangeGetRange(FDBTransaction *tr, struct ResultSet *rs) { + int count; + const FDBKeyValue *kvs; + int more; + int i; + + for(i = 0; i < numKeys; i+=4) { + fdb_transaction_clear_range(tr, keys[i], keySize, keys[i+1], keySize); + } + + double start = getTime(); + for(i = 0; i < 100; ++i) { + FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0); + + if(getError(fdb_future_block_until_ready(f), "ClearRangeGetRange (block for get range)", rs)) return -1; + if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "ClearRangeGetRange (get range results)", rs)) return -1; + + fdb_future_destroy(f); + + if(count != numKeys*3/4) { + fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys*3/4); + addError(rs, "ClearRangeGetRange bad count"); + return -1; + } + } + double end = getTime(); + + insertData(tr); + return 100 * numKeys * 3 / 4 / (end - start); +} + +int interleavedSetsGets(FDBTransaction *tr, struct ResultSet *rs) { + int present; + uint8_t const *value; + int length; + int i; + + uint8_t *k = (uint8_t*)"foo"; + uint8_t v[10]; + int num = 1; + + double start = getTime(); + sprintf((char*)v, "%d", num); + fdb_transaction_set(tr, k, 3, v, strlen((char*)v)); + + for(i = 0; i < 10000; ++i) { + FDBFuture *f = fdb_transaction_get(tr, k, 3, 0); + if(getError(fdb_future_block_until_ready(f), "InterleavedSetsGets (block for get)", rs)) return -1; + if(getError(fdb_future_get_value(f, &present, &value, &length), "InterleavedSetsGets (get result)", rs)) return -1; + fdb_future_destroy(f); + + sprintf((char*)v, "%d", ++num); + fdb_transaction_set(tr, k, 3, v, strlen((char*)v)); + } + double end = getTime(); + + return 10000 / (end - start); +} + +void runTests(struct ResultSet *rs) { + FDBDatabase *db = openDatabase(rs, &netThread); + + FDBTransaction *tr; + checkError(fdb_database_create_transaction(db, &tr), "create transaction", rs); + + FDBFuture *f = fdb_transaction_get_read_version(tr); + checkError(fdb_future_block_until_ready(f), "block for read version", rs); + + int64_t version; + checkError(fdb_future_get_version(f, &version), "get version", rs); + fdb_future_destroy(f); + + insertData(tr); + + runTest(&getSingle, tr, rs, "C: get single cached value throughput"); + runTest(&getManySequential, tr, rs, "C: get sequential cached values throughput"); + runTest(&getRangeBasic, tr, rs, "C: get range cached values throughput"); + runTest(&singleClearGetRange, tr, rs, "C: get range cached values with clears throughput"); + runTest(&clearRangeGetRange, tr, rs, "C: get range cached values with clear ranges throughput"); + runTest(&interleavedSetsGets, tr, rs, "C: interleaved sets and gets on a single key throughput"); + + fdb_database_destroy(db); + fdb_stop_network(); +} + +int main(int argc, char **argv) { + srand(time(NULL)); + struct ResultSet *rs = newResultSet(); + checkError(fdb_select_api_version(500), "select API version", rs); + printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version()); + + keys = generateKeys(numKeys, keySize); + runTests(rs); + writeResultSet(rs); + freeResultSet(rs); + freeKeys(keys, numKeys); + + return 0; +} + diff --git a/bindings/c/test/test.h b/bindings/c/test/test.h new file mode 100644 index 0000000000..9cb1d9f2ae --- /dev/null +++ b/bindings/c/test/test.h @@ -0,0 +1,257 @@ +/* + * test.h + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include + +#ifndef FDB_API_VERSION +#define FDB_API_VERSION 500 +#endif + +#include +#include + +double getTime() { + static struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_usec/1000000.0 + tv.tv_sec; +} + +void writeKey(uint8_t **dest, int key, int keySize) { + *dest = (uint8_t*)malloc((sizeof(uint8_t))*keySize); + sprintf((char*)*dest, "%0*d", keySize, key); +} + +uint8_t **generateKeys(int numKeys, int keySize) { + uint8_t **keys = (uint8_t**)malloc(sizeof(uint8_t*)*(numKeys+1)); + + uint32_t i; + for(i = 0; i <= numKeys; ++i) { + writeKey(keys + i, i, keySize); + } + + return keys; +} +void freeKeys(uint8_t **keys, int numKeys) { + uint32_t i; + for(i = 0; i < numKeys; i++) { + free(keys[i]); + } + free(keys); +} + +int cmpfunc(const void* a, const void* b) { + return (*(int*)a - *(int*)b); +} + +int median(int *values, int length) { + qsort(values, length, sizeof(int), cmpfunc); + return values[length/2]; +} + +struct RunResult { + int res; + fdb_error_t e; +}; +#define RES(x, y) (struct RunResult) { x, y } + +struct Kpi { + const char *name; + int value; + const char *units; + + struct Kpi *next; +}; + +struct Error { + char *message; + + struct Error *next; +}; + +struct ResultSet { + struct Kpi *kpis; + struct Error *errors; +}; + +struct ResultSet* newResultSet() { + struct ResultSet *rs = malloc(sizeof(struct ResultSet)); + + rs->kpis = NULL; + rs->errors = NULL; + + return rs; +} + +void addKpi(struct ResultSet *rs, const char *name, int value, const char *units) { + struct Kpi *k = malloc(sizeof(struct Kpi)); + k->name = name; + k->value = value; + k->units = units; + k->next = rs->kpis; + rs->kpis = k; +} + +void addError(struct ResultSet *rs, const char *message) { + struct Error *e = malloc(sizeof(struct Error)); + e->message = (char*)malloc(strlen(message)+1); + strcpy(e->message, message); + e->next = rs->errors; + rs->errors = e; +} + +void writeResultSet(struct ResultSet *rs) { + uint64_t id = ((uint64_t)rand() << 32) + rand(); + char name[100]; + sprintf(name, "fdb-c_result-%llu.json", id); + FILE *fp = fopen(name, "w"); + if(!fp) { + fprintf(stderr, "Could not open results file %s\n", name); + exit(1); + } + + fprintf(fp, "{\n"); + fprintf(fp, "\t\"kpis\": {\n"); + + struct Kpi *k = rs->kpis; + while(k != NULL) { + fprintf(fp, "\t\t\"%s\": { \"units\": \"%s\", \"value\": %d }", k->name, k->units, k->value); + if(k->next != NULL) { + fprintf(fp, ","); + } + fprintf(fp, "\n"); + k = k->next; + } + + fprintf(fp, "\t},\n"); + fprintf(fp, "\t\"errors\": [\n"); + + struct Error *e = rs->errors; + while(e != NULL) { + fprintf(fp, "\t\t\"%s\"", e->message); + if(e->next != NULL) { + fprintf(fp, ","); + } + fprintf(fp, "\n"); + e = e->next; + } + + fprintf(fp, "\t]\n"); + fprintf(fp, "}\n"); + + fclose(fp); +} + +void freeResultSet(struct ResultSet *rs) { + struct Kpi *k = rs->kpis; + while(k != NULL) { + struct Kpi *next = k->next; + free(k); + k = next; + } + + struct Error *e = rs->errors; + while(e != NULL) { + struct Error *next = e->next; + free(e->message); + free(e); + e = next; + } + + free(rs); +} + +fdb_error_t getError(fdb_error_t err, const char* context, struct ResultSet *rs) { + if(err) { + char *msg = (char*)malloc(strlen(context) + 100); + sprintf(msg, "Error in %s: %s", context, fdb_get_error(err)); + fprintf(stderr, "%s\n", msg); + if(rs != NULL) { + addError(rs, msg); + } + + free(msg); + } + + return err; +} + +void checkError(fdb_error_t err, const char* context, struct ResultSet *rs) { + if(getError(err, context, rs)) { + if(rs != NULL) { + writeResultSet(rs); + freeResultSet(rs); + } + exit(1); + } +} + +fdb_error_t logError(fdb_error_t err, const char* context, struct ResultSet *rs) { + char *msg = (char*)malloc(strlen(context) + 100); + sprintf(msg, "Error in %s: %s", context, fdb_get_error(err)); + fprintf(stderr, "%s\n", msg); + if(rs != NULL) { + addError(rs, msg); + } + + free(msg); + return err; +} + +fdb_error_t maybeLogError(fdb_error_t err, const char* context, struct ResultSet *rs) { + if(err && !fdb_error_predicate( FDB_ERROR_PREDICATE_RETRYABLE, err ) ) { + return logError(err, context, rs); + } + return err; +} + +void* runNetwork() { + checkError(fdb_run_network(), "run network", NULL); + return NULL; +} + +FDBDatabase* openDatabase(struct ResultSet *rs, pthread_t *netThread) { + checkError(fdb_setup_network(), "setup network", rs); + pthread_create(netThread, NULL, &runNetwork, NULL); + + FDBFuture *f = fdb_create_cluster(NULL); + checkError(fdb_future_block_until_ready(f), "block for cluster", rs); + + FDBCluster *cluster; + checkError(fdb_future_get_cluster(f, &cluster), "get cluster", rs); + + fdb_future_destroy(f); + + f = fdb_cluster_create_database(cluster, (uint8_t*)"DB", 2); + checkError(fdb_future_block_until_ready(f), "block for database", rs); + + FDBDatabase *db; + checkError(fdb_future_get_database(f, &db), "get database", rs); + + fdb_future_destroy(f); + fdb_cluster_destroy(cluster); + + return db; +} diff --git a/bindings/flow/local.mk b/bindings/flow/local.mk index 95814f970e..1f2260e4c1 100644 --- a/bindings/flow/local.mk +++ b/bindings/flow/local.mk @@ -22,5 +22,23 @@ fdb_flow_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS) fdb_flow_LDFLAGS := -Llib -lfdb_c $(fdbrpc_LDFLAGS) -fdb_flow_LIBS := lib/libfdbrpc.a +fdb_flow_LIBS := +packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz: fdb_flow + @echo "Packaging fdb_flow" + @rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH) + @mkdir -p packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb + @cp lib/libfdb_flow.a packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib + @find bindings/flow -name '*.h' -not -name 'bindings/flow/tester/*' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow \; + @find bindings/c/foundationdb -name '*.h' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb \; + @tar czf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz -C packages fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH) + @rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH) + +FDB_FLOW: packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz + +FDB_FLOW_clean: + @echo "Cleaning fdb_flow package" + @rm -rf packages/fdb-flow-*.tar.gz + +packages: FDB_FLOW +packages_clean: FDB_FLOW_clean diff --git a/bindings/flow/tester/local.mk b/bindings/flow/tester/local.mk index 570ad9c0bf..695e98bea9 100644 --- a/bindings/flow/tester/local.mk +++ b/bindings/flow/tester/local.mk @@ -22,7 +22,7 @@ fdb_flow_tester_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS) fdb_flow_tester_LDFLAGS := -Llib $(fdbrpc_LDFLAGS) -lfdb_c -fdb_flow_tester_LIBS := lib/libfdb_flow.a lib/libfdbrpc.a lib/libflow.a lib/libfdb_c.$(DLEXT) +fdb_flow_tester_LIBS := lib/libfdb_flow.a lib/libflow.a lib/libfdb_c.$(DLEXT) fdb_flow_tester: lib/libfdb_c.$(DLEXT) @mkdir -p bindings/flow/bin diff --git a/bindings/go/Gopkg.lock b/bindings/go/Gopkg.lock new file mode 100644 index 0000000000..bef2d0092e --- /dev/null +++ b/bindings/go/Gopkg.lock @@ -0,0 +1,9 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ab4fef131ee828e96ba67d31a7d690bd5f2f42040c6766b1b12fe856f87e0ff7" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/bindings/go/Gopkg.toml b/bindings/go/Gopkg.toml new file mode 100644 index 0000000000..525774ebad --- /dev/null +++ b/bindings/go/Gopkg.toml @@ -0,0 +1,2 @@ +# The FoundationDB go bindings currently have no external golang dependencies outside of +# the go standard library. diff --git a/bindings/go/README.md b/bindings/go/README.md index 89f93976b2..13e8f07940 100644 --- a/bindings/go/README.md +++ b/bindings/go/README.md @@ -10,10 +10,20 @@ This package requires: Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-500. -To install this package, in the top level of this repository run: +To build this package, in the top level of this repository run: make fdb_go +This will create binary packages for the appropriate platform within the "build" subdirectory of this folder. + +To install this package, you can run the "fdb-go-install.sh" script: + + ./fdb-go-install.sh install + +The "install" command of this script does not depend on the presence of the repo in general and will download the repository into +your local go path. Running "localinstall" instead of "install" will use the local copy here (with a symlink) instead +of downloading from the remote repository. + Documentation ------------- diff --git a/bindings/go/fdb-go-install.sh b/bindings/go/fdb-go-install.sh new file mode 100755 index 0000000000..c8122b94cf --- /dev/null +++ b/bindings/go/fdb-go-install.sh @@ -0,0 +1,304 @@ +#!/bin/bash -eu +# +# fdb-go-install.sh +# +# Installs the FoundationDB Go bindings for a client. This will download +# the repository from the remote repo either into the go directory +# with the appropriate semantic version. It will then build a few +# generated files that need to be present for the go build to work. +# At the end, it has some advice for flags to modify within your +# go environment so that other packages may successfully use this +# library. +# + +DESTDIR="${DESTDIR:-}" +FDBVER="${FDBVER:-5.0.1}" +REMOTE="${REMOTE:-github.com}" +FDBREPO="${FDBREPO:-apple/foundationdb}" + +status=0 + +platform=$(uname) +if [[ "${platform}" == "Darwin" ]] ; then + FDBLIBDIR="${FDBLIBDIR:-/usr/local/lib}" + libfdbc="libfdb_c.dylib" +elif [[ "${platform}" == "Linux" ]] ; then + FDBLIBDIR="${FDBLIBDIR:-/usr/lib}" + libfdbc="libfdb_c.so" +else + echo "Unsupported platform ${platform}". + echo "At the moment, only macOS and Linux are supported by this script." + let status="${status} + 1" +fi + +filedir=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd) +destdir="" + +function printUsage() { + echo "Usage: fdb-go-install.sh " + echo + echo "cmd: One of the commands to run. The options are:" + echo " install Download the FDB go bindings and install them" + echo " localinstall Install a into the go path a local copy of the repo" + echo " download Download but do not prepare the FoundationDB bindings" + echo " help Print this help message and then quit" + echo + echo "Command Line Options:" + echo " --fdbver FoundationDB semantic version (default is ${FDBVER})" + echo " -d/--dest-dir Local location for the repo (default is to place in go path)" + echo + echo "Environment Variable Options:" + echo " REMOTE Remote repository to download from (currently ${REMOTE})" + echo " FDBREPO Repository of FoundationDB library to download (currently ${FDBREPO})" + echo " FDBLIBDIR Directory within which should be the FoundationDB c library (currently ${FDBLIBDIR})" +} + +function parseArgs() { + local status=0 + + if [[ "${#}" -lt 0 ]] ; then + printUsage + let status="${status} + 1" + else + operation="${1}" + shift + if [[ "${operation}" != "install" ]] && [[ "${operation}" != "localinstall" ]] && [[ "${operation}" != "download" ]] && [[ "${operation}" != "help" ]] ; then + echo "Unknown command: ${operation}" + printUsage + let status="${status} + 1" + fi + fi + + while [[ "${#}" -gt 0 ]] && [[ "${status}" -eq 0 ]] ; do + local key="${1}" + case "${key}" in + --fdbver) + if [[ "${#}" -lt 2 ]] ; then + echo "No version specified with --fdbver flag" + printUsage + let status="${status} + 1" + else + FDBVER="${2}" + fi + shift + ;; + + -d|--dest-dir) + if [[ "${#}" -lt 2 ]] ; then + echo "No destination specified with ${key} flag" + printUsage + let status="${status} + 1" + else + destdir="${2}" + fi + shift + ;; + + *) + echo "Unrecognized argument ${key}" + printUsage + let status="${status} + 1" + esac + shift + done + + return "${status}" +} + +function checkBin() { + if [[ "${#}" -lt 1 ]] ; then + echo "Usage: checkBin " + return 1 + else + if [[ -n $(which "${1}") ]] ; then + return 0 + else + return 1 + fi + fi +} + +if [[ "${status}" -gt 0 ]] ; then + # We have already failed. + : +elif [[ "${#}" -lt 1 ]] ; then + printUsage +else + required_bins=( 'go' 'git' 'make' 'mono' ) + + missing_bins=() + for bin in "${required_bins[@]}" ; do + if ! checkBin "${bin}" ; then + missing_bins+=("${bin}") + let status="${status} + 1" + fi + done + + if [[ "${status}" -gt 0 ]] ; then + echo "Missing binaries: ${missing_bins[*]}" + elif ! parseArgs ${@} ; then + let status="${status} + 1" + elif [[ "${operation}" == "help" ]] ; then + printUsage + else + # Add go-specific environment variables. + eval $(go env) + + golibdir=$(dirname "${GOPATH}/src/${REMOTE}/${FDBREPO}") + if [[ -z "${destdir}" ]] ; then + if [[ "${operation}" == "localinstall" ]] ; then + # Assume its the local directory. + destdir=$(cd "${filedir}/../../.." && pwd) + else + destdir="${golibdir}" + fi + fi + + if [[ ! -d "${destdir}" ]] ; then + cmd=( 'mkdir' '-p' "${destdir}" ) + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not create destination directory ${destdir}." + fi + fi + + # Step 1: Make sure repository is present. + + if [[ "${status}" -eq 0 ]] ; then + destdir=$( cd "${destdir}" && pwd ) # Get absolute path of destination dir. + fdbdir="${destdir}/foundation" + + if [[ ! -d "${destdir}" ]] ; then + cmd=("mkdir" "-p" "${destdir}") + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + echo "Could not create destination directory ${destdir}." + let status="${status} + 1" + fi + fi + fi + + if [[ "${operation}" == "localinstall" ]] ; then + # No download occurs in this case. + : + else + if [[ -d "${fdbdir}" ]] ; then + echo "Directory ${fdbdir} already exists ; checking out appropriate tag" + cmd1=( 'git' '-C' "${fdbdir}" 'fetch' 'origin' ) + cmd2=( 'git' '-C' "${fdbdir}" 'checkout' "release-${FDBVER}" ) + + if ! echo "${cmd1[*]}" || ! "${cmd1[@]}" ; then + let status="${status} + 1" + echo "Could not pull latest changes from origin" + elif ! echo "${cmd2[*]}" || ! "${cmd2[@]}" ; then + let status="${status} + 1" + echo "Could not checkout tag release-${FDBVER}." + fi + else + echo "Downloading foundation repository into ${destdir}:" + cmd=( 'git' '-C' "${destdir}" 'clone' '--branch' "release-${FDBVER}" "git@${REMOTE}:${FDBREPO}.git" ) + + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not download repository." + fi + fi + fi + + # Step 2: Build generated things. + + if [[ "${operation}" == "download" ]] ; then + # The generated files are not created under a strict download. + : + elif [[ "${status}" -eq 0 ]] ; then + echo "Building generated files." + cmd=( 'make' '-C' "${fdbdir}" 'bindings/c/foundationdb/fdb_c_options.g.h' ) + + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not generate required c header" + else + infile="${fdbdir}/fdbclient/vexillographer/fdb.options" + outfile="${fdbdir}/bindings/go/src/fdb/generated.go" + cmd=( 'go' 'run' "${fdbdir}/bindings/go/src/_util/translate_fdb_options.go" ) + echo "${cmd[*]} < ${infile} > ${outfile}" + if ! "${cmd[@]}" < "${infile}" > "${outfile}" ; then + let status="${status} + 1" + echo "Could not generate generated go file." + fi + fi + fi + + # Step 3: Add to go path. + + if [[ "${operation}" == "download" ]] ; then + # The files are not moved under a strict download. + : + elif [[ "${status}" -eq 0 ]] ; then + linkpath="${GOPATH}/src/${REMOTE}/${FDBREPO}" + if [[ "${linkpath}" == "${fdbdir}" ]] ; then + # Downloaded directly into go path. Skip making the link. + : + elif [[ -e "${linkpath}" ]] ; then + echo "Warning: link path (${linkpath}) already exists. Leaving in place." + else + dirpath=$(dirname "${linkpath}") + if [[ ! -d "${dirpath}" ]] ; then + cmd=( 'mkdir' '-p' "${dirpath}" ) + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not create directory for link." + fi + fi + + if [[ "${status}" -eq 0 ]] ; then + cmd=( 'ln' '-s' "${fdbdir}" "${linkpath}" ) + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not create link within go path." + fi + fi + fi + fi + + # Step 4: Build the binaries. + + if [[ "${operation}" == "download" ]] ; then + # Do not install if only downloading + : + elif [[ "${status}" -eq 0 ]] ; then + cgo_cflags="-g -O2 -I${linkpath}/bindings/c" + cgo_ldflags="-g -O2 -L${FDBLIBDIR}" + fdb_go_path="${REMOTE}/${FDBREPO}/bindings/go/src" + + if [[ ! -e "${FDBLIBDIR}/${libfdbc}" ]] ; then + # Just a warning. Don't fail script. + echo + echo "WARNING: The FoundationDB C library was not found within ${FDBLIBDIR}." + echo "Your installation may be incomplete." + echo + elif ! CGO_CFLAGS="${cgo_cflags}" CGO_LDFLAGS="${cgo_ldflags}" go install "${fdb_go_path}/fdb" "${fdb_go_path}/fdb/tuple" "${fdb_go_path}/fdb/subspace" "${fdb_go_path}/fdb/directory" ; then + let status="${status} + 1" + echo "Could not build FoundationDB go libraries." + fi + fi + + # Step 5: Explain CGO flags. + + if [[ "${status}" -eq 0 && ("${operation}" == "localinstall" || "${operation}" == "install" ) ]] ; then + echo + echo "The FoundationDB go bindings were successfully installed." + echo "To build packages which use the go bindings, you will need to" + echo "set the following environment variables:" + echo " CGO_CFLAGS=\"${cgo_cflags}\"" + echo " CGO_LDFLAGS=\"${cgo_ldflags}\"" + fi + fi +fi + +exit "${status}" diff --git a/bindings/go/include.mk b/bindings/go/include.mk index 4a5ea38433..bfaa2a0334 100644 --- a/bindings/go/include.mk +++ b/bindings/go/include.mk @@ -21,7 +21,11 @@ TARGETS += fdb_go fdb_go_tester CLEAN_TARGETS += fdb_go_clean fdb_go_tester_clean -GOPATH := $(CURDIR)/bindings/go +GOPATH := $(CURDIR)/bindings/go/build +GO_IMPORT_PATH := github.com/apple/foundationdb/bindings/go/src +GO_DEST := $(GOPATH)/src/$(GO_IMPORT_PATH) + +.PHONY: fdb_go fdb_go_path fdb_go_tester fdb_go_tester_clean godoc godoc_clean # We only override if the environment didn't set it (this is used by # the fdbwebsite documentation build process) @@ -38,18 +42,23 @@ else $(error Not prepared to compile on platform $(PLATFORM)) endif -GO_PACKAGE_OUTDIR := $(GOPATH)/pkg/$(GOPLATFORM) +GO_PACKAGE_OUTDIR := $(GOPATH)/pkg/$(GOPLATFORM)/$(GO_IMPORT_PATH) GO_PACKAGES := fdb fdb/tuple fdb/subspace fdb/directory GO_PACKAGE_OBJECTS := $(addprefix $(GO_PACKAGE_OUTDIR)/,$(GO_PACKAGES:=.a)) -GO_SRC := $(shell find $(GOPATH)/src -name '*.go') +GO_SRC := $(shell find $(CURDIR)/bindings/go/src -name '*.go') fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC) +fdb_go_path: $(GO_SRC) + @echo "Creating fdb_go_path" + @mkdir -p $(GO_DEST) + @cp -r bindings/go/src/* $(GO_DEST) + fdb_go_clean: @echo "Cleaning fdb_go" - @rm -rf $(GO_PACKAGE_OUTDIR) + @rm -rf $(GOPATH) fdb_go_tester: $(GOPATH)/bin/_stacktester @@ -57,40 +66,40 @@ fdb_go_tester_clean: @echo "Cleaning fdb_go_tester" @rm -rf $(GOPATH)/bin -$(GOPATH)/bin/_stacktester: $(GO_SRC) $(GO_PACKAGE_OBJECTS) bindings/go/src/fdb/generated.go +$(GOPATH)/bin/_stacktester: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go @echo "Compiling $(basename $(notdir $@))" - @go install _stacktester + @go install $(GO_IMPORT_PATH)/_stacktester -$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a bindings/go/src/fdb/generated.go +$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go @echo "Compiling fdb/tuple" - @go install fdb/tuple + @go install $(GO_IMPORT_PATH)/fdb/tuple -$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a bindings/go/src/fdb/generated.go +$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go @echo "Compiling fdb/subspace" - @go install fdb/subspace + @go install $(GO_IMPORT_PATH)/fdb/subspace -$(GO_PACKAGE_OUTDIR)/fdb/directory.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a bindings/go/src/fdb/generated.go +$(GO_PACKAGE_OUTDIR)/fdb/directory.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go @echo "Compiling fdb/directory" - @go install fdb/directory + @go install $(GO_IMPORT_PATH)/fdb/directory -$(GO_PACKAGE_OUTDIR)/fdb.a: $(GO_SRC) bindings/go/src/fdb/generated.go +$(GO_PACKAGE_OUTDIR)/fdb.a: fdb_go_path $(GO_SRC) $(GO_DEST)/fdb/generated.go @echo "Compiling fdb" - @go install fdb + @go install $(GO_IMPORT_PATH)/fdb -bindings/go/src/fdb/generated.go: lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options +$(GO_DEST)/fdb/generated.go: fdb_go_path lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options @echo "Building $@" @go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@ -godoc: $(GO_SRC) +godoc: fdb_go_path $(GO_SRC) @echo "Generating Go Documentation" @rm -rf $(GODOC_DIR)/godoc @mkdir -p $(GODOC_DIR)/godoc @mkdir -p $(GODOC_DIR)/godoc/lib/godoc - @godoc -url "http://localhost:6060/pkg/fdb" > $(GODOC_DIR)/godoc/fdb.html - @godoc -url "http://localhost:6060/pkg/fdb/tuple" > $(GODOC_DIR)/godoc/fdb.tuple.html - @godoc -url "http://localhost:6060/pkg/fdb/subspace" > $(GODOC_DIR)/godoc/fdb.subspace.html - @godoc -url "http://localhost:6060/pkg/fdb/directory" > $(GODOC_DIR)/godoc/fdb.directory.html - @cp $(GOPATH)/godoc-resources/* $(GODOC_DIR)/godoc/lib/godoc + @godoc -url "pkg/$(GO_IMPORT_PATH)/fdb" > $(GODOC_DIR)/godoc/fdb.html + @godoc -url "pkg/$(GO_IMPORT_PATH)/fdb/tuple" > $(GODOC_DIR)/godoc/fdb.tuple.html + @godoc -url "pkg/$(GO_IMPORT_PATH)/fdb/subspace" > $(GODOC_DIR)/godoc/fdb.subspace.html + @godoc -url "pkg/$(GO_IMPORT_PATH)/fdb/directory" > $(GODOC_DIR)/godoc/fdb.directory.html + @cp $(CURDIR)/bindings/go/godoc-resources/* $(GODOC_DIR)/godoc/lib/godoc @echo "Mangling paths in Go Documentation" @(find $(GODOC_DIR)/godoc/ -name *.html -exec sed -i '' -e 's_/lib_lib_' {} \;) @(sed -i -e 's_a href="tuple/"_a href="fdb.tuple.html"_' $(GODOC_DIR)/godoc/fdb.html) diff --git a/bindings/go/src/_stacktester/directory.go b/bindings/go/src/_stacktester/directory.go index ac4ff1217b..fa66205fde 100644 --- a/bindings/go/src/_stacktester/directory.go +++ b/bindings/go/src/_stacktester/directory.go @@ -21,10 +21,10 @@ package main import ( - "fdb" - "fdb/tuple" - "fdb/subspace" - "fdb/directory" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb/directory" "strings" "bytes" ) @@ -94,14 +94,14 @@ func (sm *StackMachine) maybePath() []string { } var createOps = map[string]bool { - "CREATE_SUBSPACE": true, - "CREATE_LAYER": true, - "CREATE_OR_OPEN": true, - "CREATE": true, - "OPEN": true, - "MOVE": true, - "MOVE_TO": true, - "OPEN_SUBSPACE": true, + "CREATE_SUBSPACE": true, + "CREATE_LAYER": true, + "CREATE_OR_OPEN": true, + "CREATE": true, + "OPEN": true, + "MOVE": true, + "MOVE_TO": true, + "OPEN_SUBSPACE": true, } func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool, idx int, t fdb.Transactor, rt fdb.ReadTransactor) { diff --git a/bindings/go/src/_stacktester/stacktester.go b/bindings/go/src/_stacktester/stacktester.go index 81a1c9e18b..615e851f86 100644 --- a/bindings/go/src/_stacktester/stacktester.go +++ b/bindings/go/src/_stacktester/stacktester.go @@ -24,8 +24,8 @@ import ( "bytes" "encoding/binary" "encoding/hex" - "fdb" - "fdb/tuple" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" "log" "fmt" "os" diff --git a/bindings/go/src/fdb/directory/allocator.go b/bindings/go/src/fdb/directory/allocator.go index 69400fff8d..7303a45fee 100644 --- a/bindings/go/src/fdb/directory/allocator.go +++ b/bindings/go/src/fdb/directory/allocator.go @@ -23,8 +23,8 @@ package directory import ( - "fdb" - "fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" "encoding/binary" "bytes" "math/rand" diff --git a/bindings/go/src/fdb/directory/directory.go b/bindings/go/src/fdb/directory/directory.go index 72e82230a1..7125dc45a5 100644 --- a/bindings/go/src/fdb/directory/directory.go +++ b/bindings/go/src/fdb/directory/directory.go @@ -40,8 +40,8 @@ package directory import ( - "fdb" - "fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" "errors" ) @@ -140,15 +140,15 @@ type Directory interface { } func stringsEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true } func moveTo(t fdb.Transactor, dl directoryLayer, path, newAbsolutePath []string) (DirectorySubspace, error) { diff --git a/bindings/go/src/fdb/directory/directoryLayer.go b/bindings/go/src/fdb/directory/directoryLayer.go index a5bfd4f102..f27caac418 100644 --- a/bindings/go/src/fdb/directory/directoryLayer.go +++ b/bindings/go/src/fdb/directory/directoryLayer.go @@ -23,9 +23,9 @@ package directory import ( - "fdb" - "fdb/subspace" - "fdb/tuple" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" "encoding/binary" "bytes" "fmt" diff --git a/bindings/go/src/fdb/directory/directoryPartition.go b/bindings/go/src/fdb/directory/directoryPartition.go index 253d9482db..cdacc1746e 100644 --- a/bindings/go/src/fdb/directory/directoryPartition.go +++ b/bindings/go/src/fdb/directory/directoryPartition.go @@ -23,9 +23,9 @@ package directory import ( - "fdb" - "fdb/subspace" - "fdb/tuple" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" ) type directoryPartition struct { diff --git a/bindings/go/src/fdb/directory/directorySubspace.go b/bindings/go/src/fdb/directory/directorySubspace.go index 70641f4422..36437ea994 100644 --- a/bindings/go/src/fdb/directory/directorySubspace.go +++ b/bindings/go/src/fdb/directory/directorySubspace.go @@ -23,8 +23,8 @@ package directory import ( - "fdb" - "fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" ) // DirectorySubspace represents a Directory that may also be used as a Subspace diff --git a/bindings/go/src/fdb/directory/node.go b/bindings/go/src/fdb/directory/node.go index a608abafcc..881a965880 100644 --- a/bindings/go/src/fdb/directory/node.go +++ b/bindings/go/src/fdb/directory/node.go @@ -23,8 +23,8 @@ package directory import ( - "fdb" - "fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" "bytes" ) diff --git a/bindings/go/src/fdb/doc.go b/bindings/go/src/fdb/doc.go index aee3e85cfe..fc4cdc563b 100644 --- a/bindings/go/src/fdb/doc.go +++ b/bindings/go/src/fdb/doc.go @@ -39,7 +39,7 @@ A basic interaction with the FoundationDB API is demonstrated below: package main import ( - "github.com/apple/foundationdb/bindings/go/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb" "log" "fmt" ) diff --git a/bindings/go/src/fdb/errors.go b/bindings/go/src/fdb/errors.go index 3e31256597..b8df4793d7 100644 --- a/bindings/go/src/fdb/errors.go +++ b/bindings/go/src/fdb/errors.go @@ -23,7 +23,7 @@ package fdb /* - #define FDB_API_VERSION 200 + #define FDB_API_VERSION 500 #include */ import "C" diff --git a/bindings/go/src/fdb/fdb_test.go b/bindings/go/src/fdb/fdb_test.go index 4f07a5c6f9..709310a499 100644 --- a/bindings/go/src/fdb/fdb_test.go +++ b/bindings/go/src/fdb/fdb_test.go @@ -23,7 +23,7 @@ package fdb_test import ( - "fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb" "fmt" "testing" ) diff --git a/bindings/go/src/fdb/subspace/subspace.go b/bindings/go/src/fdb/subspace/subspace.go index 3e48842a1f..a123ea1d31 100644 --- a/bindings/go/src/fdb/subspace/subspace.go +++ b/bindings/go/src/fdb/subspace/subspace.go @@ -33,8 +33,8 @@ package subspace import ( - "fdb" - "fdb/tuple" + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" "bytes" "errors" ) diff --git a/bindings/go/src/fdb/tuple/tuple.go b/bindings/go/src/fdb/tuple/tuple.go index 1f1c5959da..444f2a8cff 100644 --- a/bindings/go/src/fdb/tuple/tuple.go +++ b/bindings/go/src/fdb/tuple/tuple.go @@ -38,7 +38,7 @@ import ( "fmt" "encoding/binary" "bytes" - "fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb" ) // A TupleElement is one of the types that may be encoded in FoundationDB diff --git a/bindings/java/local.mk b/bindings/java/local.mk index 83636de36d..30cac01290 100644 --- a/bindings/java/local.mk +++ b/bindings/java/local.mk @@ -144,6 +144,11 @@ define add_java_binding_targets @rm -r packages/jar$(1)_regular @cd bindings && jar uf $$(TOPDIR)/$$@ ../LICENSE + packages/fdb-java$(1)-$$(JARVER)-tests.jar: fdb_java$(1) versions.target + @echo "Building $$@" + @rm -f $$@ + @cp $$(TOPDIR)/bindings/java/foundationdb-tests$(1).jar packages/fdb-java$(1)-$$(JARVER)-tests.jar + packages/fdb-java$(1)-$$(JARVER)-sources.jar: $$(JAVA$(1)_GENERATED_SOURCES) versions.target @echo "Building $$@" @rm -f $$@ @@ -165,7 +170,7 @@ define add_java_binding_targets @cd packages/bundle$(1)_regular && jar cf $(TOPDIR)/$$@ * @rm -rf packages/bundle$(1)_regular - fdb_java$(1)_release: packages/fdb-java$(1)-$$(JARVER)-bundle.jar + fdb_java$(1)_release: packages/fdb-java$(1)-$$(JARVER)-bundle.jar packages/fdb-java$(1)-$$(JARVER)-tests.jar fdb_java$(1)_release_clean: @echo "Cleaning Java release" diff --git a/bindings/java/src-completable/main/com/apple/cie/foundationdb/Database.java b/bindings/java/src-completable/main/com/apple/cie/foundationdb/Database.java index 1026d04148..6e8a12be4b 100644 --- a/bindings/java/src-completable/main/com/apple/cie/foundationdb/Database.java +++ b/bindings/java/src-completable/main/com/apple/cie/foundationdb/Database.java @@ -44,8 +44,8 @@ public interface Database extends Disposable, TransactionContext { * Creates a {@link Transaction} that operates on this {@code Database}.
*
* Note: Java transactions automatically set the {@link TransactionOptions#setUsedDuringCommitProtectionDisable} - * option. This is because the Java bindings disallow use of {@code Transaction} objects after either - * {@link Transaction#reset} or {@link Transaction#onError} is called. + * option. This is because the Java bindings disallow use of {@code Transaction} objects after + * {@link Transaction#onError} is called. * * @return a newly created {@code Transaction} that reads from and writes to this {@code Database}. */ diff --git a/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/AsyncIterator.java b/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/AsyncIterator.java index 5c8faf0062..c1067805c4 100644 --- a/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/AsyncIterator.java +++ b/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/AsyncIterator.java @@ -61,7 +61,7 @@ public interface AsyncIterator extends Iterator, Disposable { /** * Returns the next element in the sequence. This will not block if, since the * last call to {@code next()}, {@link #onHasNext()} was called and the resulting - *

FIXME!!!!

has completed or the blocking call {@link #hasNext()} was called + * {@link CompletableFuture} has completed or the blocking call {@link #hasNext()} was called * and has returned. It is legal, therefore, to make a call to {@code next()} without a * preceding call to * {@link #hasNext()} or {@link #onHasNext()}, but that invocation of {@code next()} diff --git a/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/package-info.java b/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/package-info.java index 38619798b4..8c116b32c1 100644 --- a/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/package-info.java +++ b/bindings/java/src-completable/main/com/apple/cie/foundationdb/async/package-info.java @@ -19,7 +19,7 @@ */ /** - * Provides additional constructs for asynchronous programming against Java's CompletableFutures. + * Provides additional constructs for asynchronous programming against Java's {@link java.util.concurrent.CompletableFuture CompletableFuture}s. * */ package com.apple.cie.foundationdb.async; diff --git a/bindings/java/src-completable/main/overview.html.in b/bindings/java/src-completable/main/overview.html.in index 9ec77aef8a..131035a151 100644 --- a/bindings/java/src-completable/main/overview.html.in +++ b/bindings/java/src-completable/main/overview.html.in @@ -24,10 +24,11 @@ and add it to your classpath.

Getting started

To start using FoundationDB from Java, create an instance of the -{@link FDB FoundationDB API interface} with the version of the +{@link com.apple.cie.foundationdb.FDB FoundationDB API interface} with the version of the API that you want to use (this release of the FoundationDB Java API supports only version {@code 500}). -With this API object you can then open {@link Cluster}s and -{@link Database}s and start using {@link Transaction}s. +With this API object you can then open {@link com.apple.cie.foundationdb.Cluster Cluster}s and +{@link com.apple.cie.foundationdb.Database Database}s and start using +{@link com.apple.cie.foundationdb.Transaction Transactions}s. Here we give an example. The example relies on a cluster file at the default location for your platform and a running server.
@@ -77,7 +78,7 @@ for information about how Tuples sort and can be used to efficiently model data. The {@link com.apple.cie.foundationdb.directory Directory API} is provided with the core Java API for FoundationDB. This layer is provided in some form in all official language bindings. The FoundationDB API provides directories as a tool for -managing related {@link Subspace}s. Directories are a +managing related {@link com.apple.cie.foundationdb.subspace.Subspace Subspace}s. Directories are a recommended approach for administering applications. Each application should create or open at least one directory to manage its subspaces. Directories are identified by hierarchical paths analogous to the paths in a Unix-like file system. diff --git a/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/AbstractTester.java b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/AbstractTester.java new file mode 100644 index 0000000000..2ef9abb340 --- /dev/null +++ b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/AbstractTester.java @@ -0,0 +1,117 @@ +/* + * AbstractTester.java + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.apple.cie.foundationdb.test; + +import com.apple.cie.foundationdb.Database; +import com.apple.cie.foundationdb.FDB; + +import java.nio.charset.Charset; +import java.util.Random; + +public abstract class AbstractTester { + public static final int API_VERSION = 500; + protected static final int NUM_RUNS = 25; + protected static final Charset ASCII = Charset.forName("ASCII"); + + protected TesterArgs args; + protected Random random; + protected TestResult result; + protected FDB fdb; + + public AbstractTester() { + args = null; + random = new Random(); + result = new TestResult(random); + } + + public void runTest() { + Database db; + + try { + db = fdb.open(); + } catch (Exception e) { + result.addError(wrapAndPrintError(e, "fdb.open failed")); + return; + } + + try { + testPerformance(db); + } catch (Exception e) { + result.addError(wrapAndPrintError(e, "Failed to complete all tests")); + } + } + + public abstract void testPerformance(Database db); + + public String multiVersionDescription() { + if (args == null) return ""; + + if (!args.useMultiversionApi()) { + return "multi-version API disabled"; + } else if (args.useExternalClient()) { + if (args.putCallbacksOnExternalThread()) { + return "external client on external thread"; + } else { + return "external client on main thread"; + } + } else { + return "local client"; + } + } + + public void run(String[] argStrings) { + args = TesterArgs.parseArgs(argStrings); + if (args == null) return; + + fdb = FDB.selectAPIVersion(API_VERSION); + + // Validate argument combinations and set options. + if (!args.useMultiversionApi()) { + if (args.putCallbacksOnExternalThread() || args.useExternalClient()) { + throw new IllegalArgumentException("Invalid multi-version API argument combination"); + } + fdb.options().setDisableMultiVersionClientApi(); + } + if (args.putCallbacksOnExternalThread()) { + if (!args.useExternalClient()) { + throw new IllegalArgumentException("Cannot enable callbacks on external thread without using external client"); + } + fdb.options().setCallbacksOnExternalThreads(); + } + if (args.useExternalClient()) { + fdb.options().setDisableLocalClient(); + } + + try { + runTest(); + } catch (Exception e) { + result.addError(e); + } + + result.save(args.getOutputDirectory()); + } + + public RuntimeException wrapAndPrintError(Throwable t, String message) { + String errorMessage = message + ": " + t.getClass() + ": " + t.getMessage() + "\n"; + t.printStackTrace(); + return new RuntimeException(errorMessage, t); + } +} diff --git a/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/AsyncPerformanceTester.java b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/AsyncPerformanceTester.java new file mode 100644 index 0000000000..d47e516429 --- /dev/null +++ b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/AsyncPerformanceTester.java @@ -0,0 +1,28 @@ +/* + * AsyncPerformanceTester.java + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.apple.cie.foundationdb.test; + +public class AsyncPerformanceTester { + + public static void main(String[] args) { + System.out.println("Running Java async performance test on Java version " + System.getProperty("java.version")); + } +} diff --git a/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/PerformanceTester.java b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/PerformanceTester.java new file mode 100644 index 0000000000..562f6831c9 --- /dev/null +++ b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/PerformanceTester.java @@ -0,0 +1,408 @@ +/* + * PerformanceTester.java + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.apple.cie.foundationdb.test; + +import com.apple.cie.foundationdb.Database; +import com.apple.cie.foundationdb.KeySelector; +import com.apple.cie.foundationdb.Transaction; +import com.apple.cie.foundationdb.TransactionContext; +import com.apple.cie.foundationdb.async.AsyncUtil; +import com.apple.cie.foundationdb.tuple.ByteArrayUtil; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +public class PerformanceTester extends AbstractTester { + private final int keyCount; + private final int keySize; + private final int valueSize; + + private final String keyFormat; + private final byte[] valueBytes; + + public static final int DEFAULT_KEY_COUNT = 10_000; + public static final int DEFAULT_KEY_SIZE = 16; + public static final int DEFAULT_VALUE_SIZE = 100; + + private enum Tests { + FUTURE_LATENCY("Java Completable API future throughput"), + SET("Java Completable API set throughput"), + CLEAR("Java Completable API clear throughput"), + CLEAR_RANGE("Java Completable API clear_range throughput"), + PARALLEL_GET("Java Completable API parallel get throughput"), + SERIAL_GET("Java Completable API serial get throughput"), + GET_RANGE("Java Completable API get_range throughput"), + GET_KEY("Java Completable API get_key throughput"), + GET_SINGLE_KEY_RANGE("Java Completable API get_single_key_range throughput"), + ALTERNATING_GET_SET("Java Completable API alternating get and set throughput"), + WRITE_TRANSACTION("Java Completable API single-key transaction throughput"); + + private String kpi; + private Function function; + + Tests(String kpi) { + this.kpi = kpi; + } + + public void setFunction(Function function) { + this.function = function; + } + + public Function getFunction() { + return function; + } + + public String getKpi() { + return kpi; + } + } + + public PerformanceTester() { + this(DEFAULT_KEY_COUNT, DEFAULT_KEY_SIZE, DEFAULT_VALUE_SIZE); + } + + public PerformanceTester(int keyCount, int keySize, int valueSize) { + super(); + this.keyCount = keyCount; + this.keySize = keySize; + this.valueSize = valueSize; + + keyFormat = "%0" + keySize + "d"; + + valueBytes = new byte[valueSize]; + Arrays.fill(valueBytes, (byte)'x'); + + // Initialize tests. + Tests.FUTURE_LATENCY.setFunction(db -> futureLatency(db, 100_000)); + Tests.SET.setFunction(db -> set(db, 100_000)); + Tests.CLEAR.setFunction(db -> clear(db, 100_000)); + Tests.CLEAR_RANGE.setFunction(db -> clearRange(db, 100_000)); + Tests.PARALLEL_GET.setFunction(db -> parallelGet(db, 10_000)); + Tests.SERIAL_GET.setFunction(db -> serialGet(db, 2_000)); + Tests.GET_RANGE.setFunction(db -> getRange(db, 1_000)); + Tests.GET_KEY.setFunction(db -> getKey(db, 2_000)); + Tests.GET_SINGLE_KEY_RANGE.setFunction(db -> getSingleKeyRange(db, 2_000)); + Tests.ALTERNATING_GET_SET.setFunction(db -> alternatingGetSet(db, 2_000)); + Tests.WRITE_TRANSACTION.setFunction(db -> writeTransaction(db, 1_000)); + } + + @Override + public void testPerformance(Database db) { + insertData(db); + + List testsToRun; + if (args.getTestsToRun().isEmpty()) { + testsToRun = Arrays.stream(Tests.values()).map(Tests::name).map(String::toLowerCase).sorted().collect(Collectors.toList()); + } else { + testsToRun = args.getTestsToRun(); + } + + for (String test : testsToRun) { + Tests testObj; + try { + testObj = Tests.valueOf(test.toUpperCase()); + } catch (IllegalArgumentException e) { + result.addError(new IllegalArgumentException("Test " + test + " not implemented")); + continue; + } + + Function function = testObj.getFunction(); + + try { + Thread.sleep(5_000); + } catch (InterruptedException e) { + result.addError(wrapAndPrintError(e, "Interrupted while sleeping")); + } + + System.out.println("Running test " + test); + + List results = new ArrayList<>(NUM_RUNS); + + for (int i = 0; i < NUM_RUNS; i++) { + try { + results.add(function.apply(db)); + } catch (Exception e) { + result.addError(wrapAndPrintError(e, "Performance test failed: " + test)); + break; + } + } + + if (results.size() == NUM_RUNS) { + Collections.sort(results); + result.addKpi(String.format("%s (%s)", testObj.getKpi(), multiVersionDescription()), results.get(results.size()/2).intValue(), "keys/s"); + } + } + } + + public void insertData(Database db) { + System.out.println("Loading database"); + + db.run(tr -> { + byte[] subspacePrefix = args.getSubspace().pack(); + if (subspacePrefix.length == 0) { + // Clear user space. + tr.clear(new byte[0], new byte[]{(byte)0xff}); + } else { + tr.clear(args.getSubspace().range()); + } + return null; + }); + + int keysPerActor = 100_000 / (keySize + valueSize); + int numActors = (int)Math.ceil(keyCount*1.0/keysPerActor); + + List> futures = IntStream.range(0, numActors).mapToObj(i -> { + int startKey = keysPerActor * i; + int endKey = (i + 1 == numActors) ? (keyCount) : (keysPerActor * (i+1)); + return db.runAsync(tr -> { + IntStream.range(startKey, endKey).forEach(keyIndex -> tr.set(key(keyIndex), value(keyIndex))); + return CompletableFuture.completedFuture((Void)null); + }); + }).collect(Collectors.toList()); + + try { + AsyncUtil.whenAll(futures).get(); + } catch (InterruptedException | ExecutionException e) { + result.addError(wrapAndPrintError(e, "Data insertion failed")); + } + + // Give the database time to re-balance + try { + Thread.sleep(15_000); + } catch (InterruptedException e) { + result.addError(wrapAndPrintError(e, "Interrupted while waiting for quiescence")); + } + } + + public Double futureLatency(Database db, int count) { + return db.run(tr -> { + tr.options().setRetryLimit(5); + tr.getReadVersion().join(); + + long start = System.nanoTime(); + + for (int i = 0; i < count; i++) { + tr.getReadVersion().join(); + } + + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + }); + } + + public Double clear(Database db, int count) { + Transaction tr = db.createTransaction(); + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tr.clear(randomKey()); + } + long end = System.nanoTime(); + tr.cancel(); + + return count*1_000_000_000.0/(end - start); + } + + public Double clearRange(Database db, int count) { + Transaction tr = db.createTransaction(); + + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + int keyIndex = randomKeyIndex(); + tr.clear(key(keyIndex), key(keyIndex+1)); + } + long end = System.nanoTime(); + tr.cancel(); + + return count*1_000_000_000.0/(end - start); + } + + public Double set(Database db, int count) { + Transaction tr = db.createTransaction(); + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + int keyIndex = randomKeyIndex(); + tr.set(key(keyIndex), value(keyIndex)); + } + long end = System.nanoTime(); + tr.cancel(); + + return count*1_000_000_000.0/(end - start); + } + + public Double parallelGet(TransactionContext tcx, int count) { + return tcx.run(tr -> { + tr.options().setRetryLimit(5); + long start = System.nanoTime(); + + List> futures = IntStream.range(0, count) + .mapToObj(ignore -> tr.get(randomKey())) + .collect(Collectors.toList()); + AsyncUtil.whenAll(futures).join(); + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + }); + } + + public Double alternatingGetSet(TransactionContext tcx, int count) { + return tcx.run(tr -> { + tr.options().setRetryLimit(5); + long start = System.nanoTime(); + + List> futures = IntStream.range(0, count) + .mapToObj(ignore -> { + int keyIndex = randomKeyIndex(); + byte[] keyBytes = key(keyIndex); + byte[] valBytes = value(keyIndex); + + tr.set(keyBytes, valBytes); + return tr.get(keyBytes); + }).collect(Collectors.toList()); + AsyncUtil.whenAll(futures).join(); + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + }); + } + + public Double serialGet(TransactionContext tcx, int count) { + return tcx.run(tr -> { + tr.options().setRetryLimit(5); + + List keys; + if (count > keyCount/2) { + keys = Stream.generate(this::randomKey).limit(count).collect(Collectors.toList()); + } else { + Set keySet = new HashSet<>(); + while (keySet.size() < count) { + keySet.add(randomKeyIndex()); + } + keys = keySet.stream().map(this::key).collect(Collectors.toList()); + } + + long start = System.nanoTime(); + for (byte[] key : keys) { + tr.get(key).join(); + } + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + }); + } + + public Double getRange(TransactionContext tcx, int count) { + return tcx.run(tr -> { + tr.options().setRetryLimit(5); + int startIndex = random.nextInt(keyCount - count); + + long start = System.nanoTime(); + tr.getRange(key(startIndex), key(startIndex+count)).asList().join(); + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + }); + } + + public Double getKey(TransactionContext tcx, int count) { + return tcx.run(tr -> { + tr.options().setRetryLimit(5); + + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tr.getKey(new KeySelector(randomKey(), true, random.nextInt(20) - 10)).join(); + } + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + }); + } + + public Double getSingleKeyRange(TransactionContext tcx, int count) { + return tcx.run(tr -> { + tr.options().setRetryLimit(5); + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + int keyIndex = randomKeyIndex(); + tr.getRange(key(keyIndex), key(keyIndex + 1)).asList().join(); + } + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + }); + } + + public Double writeTransaction(TransactionContext tcx, int count) { + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tcx.run(tr -> { + int keyIndex = randomKeyIndex(); + tr.set(key(keyIndex), value(keyIndex)); + return null; + }); + } + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + } + + public byte[] key(int i) { + return ByteArrayUtil.join(args.getSubspace().pack(), String.format(keyFormat, i).getBytes(ASCII)); + } + + public int randomKeyIndex() { + return random.nextInt(keyCount); + } + + public byte[] randomKey() { + return key(randomKeyIndex()); + } + + public byte[] value(int key) { + return valueBytes; + } + + public static void main(String[] args) { + System.out.println("Running Java performance test on Java version " + System.getProperty("java.version")); + try { + new PerformanceTester().run(args); + } catch (IllegalArgumentException e) { + System.out.println("Could not run test due to malformed arguments."); + System.out.println(e.getMessage()); + System.exit(1); + } catch (Exception e) { + System.out.println("Fatal error encountered during run: " + e); + e.printStackTrace(); + System.exit(2); + } + } + +} \ No newline at end of file diff --git a/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/RYWBenchmark.java b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/RYWBenchmark.java new file mode 100644 index 0000000000..5b090b6328 --- /dev/null +++ b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/RYWBenchmark.java @@ -0,0 +1,233 @@ +/* + * RYWBenchmark.java + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.apple.cie.foundationdb.test; + +import com.apple.cie.foundationdb.Database; +import com.apple.cie.foundationdb.Transaction; +import com.apple.cie.foundationdb.tuple.ByteArrayUtil; + +import java.util.*; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class RYWBenchmark extends AbstractTester { + private int keyCount; + + public static final int DEFAULT_KEY_COUNT = 10_000; + public static final int DEFAULT_KEY_SIZE = 16; + + private final String keyFormat; + + private enum Tests { + GET_SINGLE("RYW Java Completable: get single cached value throughput"), + GET_MANY_SEQUENTIAL("RYW Java Completable: get sequential cached value throughput"), + GET_RANGE_BASIC("RYW Java Completable: get range cached values throughput"), + SINGLE_CLEAR_GET_RANGE("RYW Java Completable: get range cached values with clears throughput"), + CLEAR_RANGE_GET_RANGE("RYW Java Completable: get range cached values with clear ranges throughput"), + INTERLEAVED_SETS_GETS("RYW Java Completable: interleaved sets and gets on a single key throughput"); + + private String kpi; + private Function function; + + Tests(String kpi) { + this.kpi = kpi; + } + + public void setFunction(Function function) { + this.function = function; + } + + public Function getFunction() { + return function; + } + + public String getKpi() { + return kpi; + } + } + + public RYWBenchmark() { + this(DEFAULT_KEY_COUNT, DEFAULT_KEY_SIZE); + } + + public RYWBenchmark(int keyCount, int keySize) { + super(); + this.keyCount = keyCount; + + keyFormat = "%0" + keySize + "d"; + + Tests.GET_SINGLE.setFunction(tr -> getSingle(tr, 10_000)); + Tests.GET_MANY_SEQUENTIAL.setFunction(tr -> getManySequential(tr, 10_000)); + Tests.GET_RANGE_BASIC.setFunction(tr -> getRangeBasic(tr, 1_000)); + Tests.SINGLE_CLEAR_GET_RANGE.setFunction(tr -> singleClearGetRange(tr, 1_000)); + Tests.CLEAR_RANGE_GET_RANGE.setFunction(tr -> clearRangeGetRange(tr, 1_000)); + Tests.INTERLEAVED_SETS_GETS.setFunction(tr -> interleavedSetsGets(tr, 10_000)); + } + + @Override + public void testPerformance(Database db) { + Transaction tr = db.createTransaction(); + insertData(tr); + + List testsToRun; + if (args.getTestsToRun().isEmpty()) { + testsToRun = Arrays.stream(Tests.values()).map(Tests::name).map(String::toLowerCase).sorted().collect(Collectors.toList()); + } else { + testsToRun = args.getTestsToRun(); + } + + for (String test : testsToRun) { + Tests testObj; + try { + testObj = Tests.valueOf(test.toUpperCase()); + } catch (IllegalArgumentException e) { + result.addError(new IllegalArgumentException("Test " + test + " not implemented")); + continue; + } + + Function function = testObj.getFunction(); + + try { + Thread.sleep(5_000); + } catch (InterruptedException e) { + result.addError(wrapAndPrintError(e, "Interrupted while sleeping")); + } + + System.out.println("Running test " + test); + + List results = new ArrayList<>(NUM_RUNS); + + for (int i = 0; i < NUM_RUNS; i++) { + try { + results.add(function.apply(tr)); + } catch (Exception e) { + result.addError(wrapAndPrintError(e, "Performance test failed: " + test)); + break; + } + } + + if (results.size() == NUM_RUNS) { + Collections.sort(results); + result.addKpi(String.format("%s", testObj.getKpi()), results.get(results.size() / 2).intValue(), "keys/s"); + } + } + + tr.cancel(); + } + + public Double getSingle(Transaction tr, int count) { + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tr.get(key(5001)).join(); + } + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + } + + public Double getManySequential(Transaction tr, int count) { + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tr.get(key(i)).join(); + } + long end = System.nanoTime(); + + return count*1_000_000_000.0/(end - start); + } + + public Double getRangeBasic(Transaction tr, int count) { + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tr.getRange(key(0), key(keyCount)).asList().join(); + } + long end = System.nanoTime(); + + return count * 1_000_000_000.0 * keyCount/(end - start); + } + + public Double singleClearGetRange(Transaction tr, int count) { + for (int i = 0; i < keyCount; i += 2) { + tr.clear(("" + i).getBytes(ASCII)); + } + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tr.getRange(key(0), key(keyCount)).asList().join(); + } + long end = System.nanoTime(); + + Double kpi = count * 1_000_000_000.0 * keyCount / 2 / (end - start); + insertData(tr); + return kpi; + } + + public Double clearRangeGetRange(Transaction tr, int count) { + for (int i = 0; i < keyCount; i += 4) { + tr.clear(key(i), key(i+1)); + } + long start = System.nanoTime(); + for (int i = 0; i < count; i++) { + tr.getRange(key(0), key(keyCount)).asList().join(); + } + long end = System.nanoTime(); + + Double kpi = count * 1_000_000_000.0 * keyCount * 3 / 4 / (end - start); + insertData(tr); + return kpi; + } + + public Double interleavedSetsGets(Transaction tr, int count) { + long start = System.nanoTime(); + byte[] keyBytes = "foo".getBytes(ASCII); + tr.set(keyBytes, "1".getBytes(ASCII)); + for (int i = 0; i < count; i++) { + int old = Integer.parseInt(new String(tr.get(keyBytes).join(), ASCII)); + tr.set(keyBytes, ("" + (old + 1)).getBytes(ASCII)); + } + long end = System.nanoTime(); + + return count * 1_000_000_000.0/(end - start); + } + + public void insertData(Transaction tr) { + tr.clear(new byte[0], new byte[]{(byte)0xff}); // Clear user space. + for (int i = 0; i < keyCount; i++) { + tr.set(key(i), "foo".getBytes(ASCII)); + } + } + + public byte[] key(int i) { + return ByteArrayUtil.join(args.getSubspace().pack(), String.format(keyFormat, i).getBytes(ASCII)); + } + + public static void main(String[] args) { + System.out.println("Running Java RYW benchmark on Java version " + System.getProperty("java.version")); + try { + new RYWBenchmark().run(args); + } catch (IllegalArgumentException e) { + System.out.println("Could not run test due to malformed arguments."); + System.exit(1); + } catch (Exception e) { + System.out.println("Fatal error encountered during run: " + e); + e.printStackTrace(); + System.exit(2); + } + } +} diff --git a/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/TestResult.java b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/TestResult.java new file mode 100644 index 0000000000..2ea112d50f --- /dev/null +++ b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/TestResult.java @@ -0,0 +1,151 @@ +/* + * TestResult.java + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.apple.cie.foundationdb.test; + +import java.io.BufferedWriter; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.TreeMap; + +public class TestResult { + private long id; + private Map> kpis; + private List errors; + + public TestResult(Random r) { + id = Math.abs(r.nextLong()); + kpis = new TreeMap>(); // Tree map because we will have to print this out. + errors = new ArrayList(); + } + + public void addKpi(String name, Number value, String units) { + TreeMap kpi = new TreeMap(); + kpi.put("value", value); + kpi.put("units", units); + kpis.put(name, kpi); + } + + public void addError(Throwable t) { + errors.add(t); + } + + public void save(String directory) { + String file = "javacompletableresult-" + id + ".json"; + if(directory.length() > 0) { + file = directory + "/" + file; + } + + // TODO: Should we use a really JSON library? + + StringBuilder outputBuilder = new StringBuilder(); + outputBuilder.append('{'); + + // Add KPIs: + outputBuilder.append("\"kpis\": {"); + boolean firstKpi = true; + for (Map.Entry> kpi : kpis.entrySet()) { + if (firstKpi) { + firstKpi = false; + } else { + outputBuilder.append(", "); + } + + outputBuilder.append("\""); + outputBuilder.append(kpi.getKey()); + outputBuilder.append("\": {"); + + boolean firstEntry = true; + + for (Map.Entry entry : kpi.getValue().entrySet()) { + if (firstEntry) { + firstEntry = false; + } else { + outputBuilder.append(", "); + } + + outputBuilder.append("\""); + outputBuilder.append(entry.getKey()); + outputBuilder.append("\": "); + + Object value = entry.getValue(); + if (value instanceof String) { + outputBuilder.append("\""); + outputBuilder.append((String)value); + outputBuilder.append("\""); + } else { + outputBuilder.append(value.toString()); + } + } + + outputBuilder.append("}"); + } + outputBuilder.append("}, "); + + // Add errors: + outputBuilder.append("\"errors\":["); + boolean firstError = true; + for (Throwable t : errors) { + if (firstError) { + firstError = false; + } else { + outputBuilder.append(", "); + } + + StringBuilder msgBuilder = new StringBuilder(); + msgBuilder.append(t.getClass().toString()); + msgBuilder.append(": "); + msgBuilder.append(t.getMessage()); // Escaping quotes. Yeah, this won't work in the general case.... + StackTraceElement[] stackTraceElements = t.getStackTrace(); + for (StackTraceElement element : stackTraceElements) { + msgBuilder.append("\n "); + msgBuilder.append(element.toString()); + } + outputBuilder.append('"'); + outputBuilder.append(msgBuilder.toString() + .replace("\\", "\\\\") + .replace("\"", "\\\"") + .replace("\t", "\\t") + .replace("\r", "\\r") + .replace("\n", "\\n") + .replace("\f", "\\f") + .replace("\b", "\\b") + ); + outputBuilder.append('"'); + } + outputBuilder.append("]"); + + outputBuilder.append('}'); + + BufferedWriter writer = null; + try { + writer = new BufferedWriter(new FileWriter(file)); + writer.write(outputBuilder.toString()); + writer.close(); + } catch (IOException e) { + System.out.println("Could not write results to file " + file); + throw new RuntimeException("Could not save results: " + e.getMessage(), e); + } + } +} diff --git a/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/TesterArgs.java b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/TesterArgs.java new file mode 100644 index 0000000000..a2b27457b2 --- /dev/null +++ b/bindings/java/src-completable/test/com/apple/cie/foundationdb/test/TesterArgs.java @@ -0,0 +1,153 @@ +/* + * TesterArgs.java + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.apple.cie.foundationdb.test; + +import com.apple.cie.foundationdb.subspace.Subspace; +import com.apple.cie.foundationdb.tuple.Tuple; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class TesterArgs { + private String outputDirectory; + private boolean multiversionApi; + private boolean callbacksOnExternalThread; + private boolean externalClient; + private Subspace subspace; + private List testsToRun; + + private TesterArgs(String outputDirectory, boolean multiversionApi, boolean callbacksOnExternalThread, boolean externalClient, Subspace subspace, List testsToRun) { + this.outputDirectory = outputDirectory; + this.multiversionApi = multiversionApi; + this.callbacksOnExternalThread = callbacksOnExternalThread; + this.externalClient = externalClient; + this.subspace = subspace; + this.testsToRun = testsToRun; + } + + public static void printUsage() { + String usage = "Arguments: [-o/--output-directory DIR] [--disable-multiversion-api] [--enable-callbacks-on-external-threads] [--use-external-client] [--tests-to-run TEST [TEST ...]] [-h/--help]\n" + + "\n" + + "Arguments:\n" + + " -o/--output-directory DIR Directory to store JSON output. If not set, the current directory is used.\n" + + " --disable-multiversion-api Disables the multi-version client API\n" + + " --enable-callbacks-on-external-threads Allows callbacks to be called on threads created by the client library.\n" + + " --use-external-client Connect to the server using an external client.\n" + + " --tests-to-run TEST [TEST ...] List of test names to run.\n" + + " -h/--help Print this help message and then quit.\n"; + + System.out.print(usage); + } + + /** + * Parses the argument strings into a TesterArgs instance. + * This will return null if the args include an argument telling + * it to print the help message and it will throw an {@link IllegalArgumentException} + * if it can't parse the arguments. + * + * @param args command-line args + * @return built instance or null + * @throws IllegalArgumentException if the arguments can't be parsed + */ + public static TesterArgs parseArgs(String[] args) { + String outputDirectory = ""; + boolean multiversionApi = true; + boolean callbacksOnExternalThread = false; + boolean externalClient = false; + Subspace subspace = new Subspace(); + List testsToRun = new ArrayList(); + + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + if (arg.equals("-o") || arg.equals("--output-directory")) { + if (i + 1 < args.length) { + outputDirectory = args[++i]; + } else { + System.out.println("No output directory specified for argument " + arg + "\n"); + printUsage(); + throw new IllegalArgumentException("No output directory specified for argument " + arg); + } + } else if (arg.equals("--subspace")) { + if (i + 1 < args.length) { + subspace = new Subspace(Tuple.from(args[++i])); + } else { + System.out.println("No subspace specified for argument " + args + "\n"); + printUsage(); + throw new IllegalArgumentException("Not subspace specified for argument " + arg); + } + } else if (arg.equals("--disable-multiversion-api")) { + multiversionApi = false; + } else if (arg.equals("--enable-callbacks-on-external-threads")) { + callbacksOnExternalThread = true; + } else if (arg.equals("--use-external-client")) { + externalClient = true; + } else if (arg.equals("--tests-to-run")) { + if (i + 1 < args.length && args[i + 1].charAt(0) != '-') { + int j; + for (j = i + 1; j < args.length && args[j].charAt(0) != '-'; j++) { + testsToRun.add(args[j]); + } + i = j; + } else { + System.out.println("No tests specified with argument " + arg + "\n"); + printUsage(); + throw new IllegalArgumentException("No tests specified with argument " + arg); + } + } else if (arg.equals("-h") || arg.equals("--help")) { + printUsage(); + return null; + } else { + System.out.println("Unknown argument " + arg + "\n"); + printUsage(); + throw new IllegalArgumentException("Unknown argument " + arg); + } + } + + return new TesterArgs(outputDirectory, multiversionApi, callbacksOnExternalThread, externalClient, subspace, testsToRun); + } + + // Accessors. + + public String getOutputDirectory() { + return outputDirectory; + } + + public boolean useMultiversionApi() { + return multiversionApi; + } + + public boolean putCallbacksOnExternalThread() { + return callbacksOnExternalThread; + } + + public boolean useExternalClient() { + return externalClient; + } + + public Subspace getSubspace() { + return subspace; + } + + public List getTestsToRun() { + return testsToRun; + } +} \ No newline at end of file diff --git a/bindings/java/src/main/com/apple/cie/foundationdb/Database.java b/bindings/java/src/main/com/apple/cie/foundationdb/Database.java index c02e66c1a0..c222f4e033 100644 --- a/bindings/java/src/main/com/apple/cie/foundationdb/Database.java +++ b/bindings/java/src/main/com/apple/cie/foundationdb/Database.java @@ -46,8 +46,8 @@ public interface Database extends Disposable, TransactionContext { * Creates a {@link Transaction} that operates on this {@code Database}.
*
* Note: Java transactions automatically set the {@link TransactionOptions#setUsedDuringCommitProtectionDisable} - * option. This is because the Java bindings disallow use of {@code Transaction} objects after either - * {@link Transaction#reset} or {@link Transaction#onError} is called. + * option. This is because the Java bindings disallow use of {@code Transaction} objects after + * {@link Transaction#onError} is called. * * @return a newly created {@code Transaction} that reads from and writes to this {@code Database}. */ diff --git a/bindings/java/src/main/overview.html.in b/bindings/java/src/main/overview.html.in index 1244f5860d..892a5aa8c1 100644 --- a/bindings/java/src/main/overview.html.in +++ b/bindings/java/src/main/overview.html.in @@ -24,11 +24,12 @@ and add it to your classpath.

Getting started

To start using FoundationDB from Java, create an instance of the -{@link FDB FoundationDB API interface} with the version of the +{@link com.apple.cie.foundationdb.FDB FoundationDB API interface} with the version of the API that you want to use (this release of the FoundationDB Java API supports only version {@code 500}). -With this API object you can then open {@link Cluster}s and -{@link Database}s and start using {@link Transaction}s. -Here we give an example. The example relies on a cluster file at the +With this API object you can then open {@link com.apple.cie.foundationdb.Cluster}s and +{@link com.apple.cie.foundationdb.Database}s and start using +{@link com.apple.cie.foundationdb.Transaction}s. Here we give an example. The example relies on a +cluster file at the default location for your platform and a running server.

@@ -77,7 +78,7 @@ for information about how Tuples sort and can be used to efficiently model data. The {@link com.apple.cie.foundationdb.directory Directory API} is provided with the core Java API for FoundationDB. This layer is provided in some form in all official language bindings. The FoundationDB API provides directories as a tool for -managing related {@link Subspace}s. Directories are a +managing related {@link com.apple.cie.foundationdb.subspace.Subspace Subspace}s. Directories are a recommended approach for administering applications. Each application should create or open at least one directory to manage its subspaces. Directories are identified by hierarchical paths analogous to the paths in a Unix-like file system. @@ -87,12 +88,12 @@ for the corresponding subspace. In effect, directories provide a level of indire for access to subspaces.

{@link com.apple.cie.foundationdb.async.Future Future}s and asynchronous operation

-Asynchronous FoundationDB operations return {@link Future}s. -A {@link Future} can be used in a blocking way using the -{@link Future#get() get()} method or in a +Asynchronous FoundationDB operations return {@link com.apple.cie.foundationdb.async.Future Future}s. +A {@link com.apple.cie.foundationdb.async.Future Future} can be used in a blocking way using the +{@link com.apple.cie.foundationdb.async.Future#get() get()} method or in a fully-asynchronous way using the -{@link Future#map(Function) map()} and -{@link Future#flatMap(Function) flatMap()} +{@link com.apple.cie.foundationdb.async.Future#map(Function) map()} and +{@link com.apple.cie.foundationdb.async.Future#flatMap(Function) flatMap()} methods. Generally, the blocking style is more straightforward and the asynchronous style is more efficient. Mixing the two styles correctly can be tricky, so consider choosing one or the other. See the {@linkplain com.apple.cie.foundationdb.async async Package documentation} diff --git a/bindings/nodejs/binding.gyp b/bindings/nodejs/binding.gyp index d24034df76..6771942577 100644 --- a/bindings/nodejs/binding.gyp +++ b/bindings/nodejs/binding.gyp @@ -2,7 +2,7 @@ 'targets': [ { 'target_name': 'fdblib', - 'sources': [ 'src/FdbV8Wrapper.cpp', 'src/Database.cpp', 'src/Transaction.cpp', 'src/Cluster.cpp', 'src/FdbError.cpp', 'src/FdbOptions.cpp', 'src/FdbOptions.g.cpp' ], + 'sources': [ 'src/FdbV8Wrapper.cpp', 'src/Database.cpp', 'src/Transaction.cpp', 'src/Cluster.cpp', 'src/FdbError.cpp', 'src/FdbOptions.cpp', 'src/FdbOptions.g.cpp', 'src/FdbUtil.cpp' ], 'include_dirs': ['../c'], 'conditions': [ ['OS=="linux"', { diff --git a/bindings/nodejs/binding.gyp.npmsrc b/bindings/nodejs/binding.gyp.npmsrc index f6b6e5e691..4e39c9a049 100644 --- a/bindings/nodejs/binding.gyp.npmsrc +++ b/bindings/nodejs/binding.gyp.npmsrc @@ -2,7 +2,7 @@ 'targets': [ { 'target_name': 'fdblib', - 'sources': [ 'src/FdbV8Wrapper.cpp', 'src/Database.cpp', 'src/Transaction.cpp', 'src/Cluster.cpp', 'src/FdbError.cpp', 'src/FdbOptions.cpp', 'src/FdbOptions.g.cpp' ], + 'sources': [ 'src/FdbV8Wrapper.cpp', 'src/Database.cpp', 'src/Transaction.cpp', 'src/Cluster.cpp', 'src/FdbError.cpp', 'src/FdbOptions.cpp', 'src/FdbOptions.g.cpp', 'src/FdbUtil.cpp' ], 'conditions': [ ['OS=="linux"', { 'link_settings': { 'libraries': ['-lfdb_c'] }, diff --git a/bindings/nodejs/fdb_node.target b/bindings/nodejs/fdb_node.target index 9712d768e6..14c7d6684d 100644 --- a/bindings/nodejs/fdb_node.target +++ b/bindings/nodejs/fdb_node.target @@ -136,12 +136,14 @@ copy "$(TargetPath)" "modules\$(NodeVersionDir)\fdblib.node" + + diff --git a/bindings/nodejs/include.mk b/bindings/nodejs/include.mk index c6e7135f4c..2f4243eb20 100644 --- a/bindings/nodejs/include.mk +++ b/bindings/nodejs/include.mk @@ -46,7 +46,7 @@ bindings/nodejs/fdb_node.stamp: bindings/nodejs/src/FdbOptions.g.cpp bindings/no for ver in $(NODE_VERSIONS); do \ MMVER=`echo $$ver | sed -e 's,\., ,g' | awk '{print $$1 "." $$2}'` && \ mkdir modules/$$MMVER && \ - node-gyp configure --target=$$ver && \ + node-gyp configure --dist-url=https://nodejs.org/dist --target=$$ver && \ node-gyp -v build && \ cp build/Release/fdblib.node modules/$${MMVER} ; \ done @@ -67,6 +67,7 @@ bindings/nodejs/package.json: bindings/nodejs/package.json.in $(ALL_MAKEFILES) v @m4 -DVERSION=$(NPMVER) $< > $@ @echo "Updating Node dependencies" @cd bindings/nodejs && \ + npm config set registry "https://registry.npmjs.org/" && \ npm update fdb_node_npm: fdb_node versions.target bindings/nodejs/README.md bindings/nodejs/lib/*.js bindings/nodejs/src/* bindings/nodejs/binding.gyp LICENSE diff --git a/bindings/nodejs/lib/tuple.js b/bindings/nodejs/lib/tuple.js index 1fe1d1dcc5..5758791407 100644 --- a/bindings/nodejs/lib/tuple.js +++ b/bindings/nodejs/lib/tuple.js @@ -24,6 +24,7 @@ var assert = require('assert'); var buffer = require('./bufferConversion'); var fdbUtil = require('./fdbUtil'); +var fdb = require('./fdbModule'); var FDBError = require('./error'); var sizeLimits = new Array(8); @@ -83,7 +84,7 @@ function Float(value) { return this.rawData; } else { var buf = new Buffer(4); - buf.writeFloatBE(this.value, 0); + buf.writeFloatBE(fdb.toFloat(this.value), 0); return buf; } }; @@ -230,7 +231,7 @@ function encode(item, buf, pos) { if (isNaN(item.value) && item.rawData !== undefined) { item.rawData.copy(outBuf, 1, 0, 4); } else { - outBuf.writeFloatBE(item.value, 1); + outBuf.writeFloatBE(fdb.toFloat(item.value), 1); } adjustFloat(outBuf, 1, true); return outBuf; diff --git a/bindings/nodejs/src/FdbUtil.cpp b/bindings/nodejs/src/FdbUtil.cpp new file mode 100644 index 0000000000..6928982868 --- /dev/null +++ b/bindings/nodejs/src/FdbUtil.cpp @@ -0,0 +1,42 @@ +/* + * FdbUtil.cpp + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include "FdbUtil.h" + +using namespace v8; + +Handle ToFloat(const Arguments &args) { + HandleScope scope; + + if (args.Length() != 1) { + return ThrowException(Exception::TypeError(String::NewSymbol("Wrong number of arguments (must be exactly 1)"))); + } + + if (!args[0]->IsNumber()) { + return ThrowException(Exception::TypeError(String::NewSymbol("Argument is not a Number"))); + } + + float value = (float)args[0]->NumberValue(); + Handle jsValue = Number::New(value); + + return scope.Close(jsValue); +} diff --git a/bindings/nodejs/src/FdbUtil.h b/bindings/nodejs/src/FdbUtil.h new file mode 100644 index 0000000000..a8ad05a01a --- /dev/null +++ b/bindings/nodejs/src/FdbUtil.h @@ -0,0 +1,29 @@ +/* + * FdbUtil.h + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef FDB_NODE_FDB_UTIL_H +#define FDB_NODE_FDB_UTIL_H + +#include + +v8::Handle ToFloat(const v8::Arguments &args); + +#endif diff --git a/bindings/nodejs/src/FdbV8Wrapper.cpp b/bindings/nodejs/src/FdbV8Wrapper.cpp index 4b7eea8371..659b26a0a9 100644 --- a/bindings/nodejs/src/FdbV8Wrapper.cpp +++ b/bindings/nodejs/src/FdbV8Wrapper.cpp @@ -33,6 +33,7 @@ #include "Version.h" #include "FdbError.h" #include "FdbOptions.h" +#include "FdbUtil.h" uv_thread_t fdbThread; @@ -140,6 +141,7 @@ void init(Handle target){ target->Set(String::NewSymbol("options"), FdbOptions::CreateOptions(FdbOptions::NetworkOption)); target->Set(String::NewSymbol("streamingMode"), FdbOptions::CreateEnum(FdbOptions::StreamingMode)); target->Set(String::NewSymbol("atomic"), FdbOptions::CreateOptions(FdbOptions::MutationType)); + target->Set(String::NewSymbol("toFloat"), FunctionTemplate::New(ToFloat)->GetFunction()); } #if NODE_VERSION_AT_LEAST(0, 8, 0) diff --git a/bindings/nodejs/tests/tuple_test.js b/bindings/nodejs/tests/tuple_test.js index e430c964c4..56efde8d78 100755 --- a/bindings/nodejs/tests/tuple_test.js +++ b/bindings/nodejs/tests/tuple_test.js @@ -18,7 +18,8 @@ * limitations under the License. */ -var fdb = require('../lib/fdb.js').apiVersion(200); +var fdb = require('../lib/fdb.js').apiVersion(500); +var fdbModule = require('../lib/fdbModule.js'); console.log(fdb.tuple.pack([-Math.pow(2,53)])); console.log(fdb.tuple.pack([-Math.pow(2,53)+1])); @@ -76,3 +77,25 @@ tuples = [ ]; tuples.sort(fdb.tuple.compare); console.log(tuples); + +// Float overruns. +const floats = [ 2.037036e90, -2.037036e90, 4.9090935e-91, -4.9090935e-91, 2.345624805922133125e14, -2.345624805922133125e14 ]; +for (var i = 0; i < floats.length; i++) { + var f = floats[i]; + console.log(f + " -> " + fdb.tuple.Float.fromBytes((new fdb.tuple.Float(f)).toBytes()).value); +} + +// Float type errors. +try { + console.log((new fdb.tuple.Float("asdf")).toBytes()); +} catch (e) { + console.log("Caught!"); + console.log(e); +} + +try { + console.log(fdbModule.toFloat(3.14, 2.718)); +} catch (e) { + console.log("Caught!"); + console.log(e); +} diff --git a/design/tuple.md b/design/tuple.md new file mode 100644 index 0000000000..aed8858f32 --- /dev/null +++ b/design/tuple.md @@ -0,0 +1,196 @@ +# FDB Tuple layer typecodes + +This document is intended to be the system of record for the allocation of typecodes in the Tuple layer. The source code isn’t good enough because a typecode might be added to one language (or by a customer) before another. + +Status: Standard means that all of our language bindings implement this typecode +Status: Reserved means that this typecode is not yet used in our standard language bindings, but may be in use by third party bindings or specific applications +Status: Deprecated means that a previous layer used this type, but issues with that type code have led us to mark this type code as not to be used. + + +### **Null Value** + +Typecode: 0x00 +Length: 0 bytes +Status: Standard + +### **Byte String** + +Typecode: 0x01 +Length: Variable (terminated by` [\x00]![\xff]`) +Encoding: `b'\x01' + value.replace(b'\x00', b'\x00\xFF') + b'\x00'` +Test case: `pack(“foo\x00bar”) == b'\x01foo\x00\xffbar\x00'` +Status: Standard + +In other words, byte strings are null terminated with null values occurring in the string escaped in an order-preserving way. + +### **Unicode String** + +Typecode: 0x02 +Length: Variable (terminated by [\x00]![\xff]) +Encoding: `b'\x02' + value.encode('utf-8').replace(b'\x00', b'\x00\xFF') + b'\x00'` +Test case: `pack( u"F\u00d4O\u0000bar" ) == b'\x02F\xc3\x94O\x00\xffbar\x00'` +Status: Standard + +This is the same way that byte strings are encoded, but first, the unicode string is encoded in UTF-8. + +### **(DEBRECATED) Nested Tuple** + +Typecodes: 0x03-0x04 +Length: Variable (terminated by 0x04 type code) +Status: Deprecated + +This encoding was used by a few layers. However, it had ordering problems when one tuple was a prefix of another and the type of the first element in the longer tuple was either null or a byte string. For an example, consider the empty tuple and the tuple containing only null. In the old scheme, the empty tuple would be encoded as `\x03\x04` while the tuple containing only null would be encoded as `\x03\x00\x04`, so the second tuple would sort first based on their bytes, which is incorrect semantically. + +### **Nested Tuple** + +Typecodes: 0x05 +Length: Variable (terminated by `[\x00]![\xff]` at beginning of nested element) +Encoding: `b'\x05' + ''.join(map(lambda x: b'\x00\xff' if x is None else pack(x), value)) + b'\x00'` +Test case: `pack( (“foo\x00bar”, None, ()) ) == b'\x05\x01foo\x00\xffbar\x00\x00\xff\x05\x00\x00'` +Status: Standard + +The list is ended with a 0x00 byte. Nulls within the tuple are encoded as `\x00\xff`. There is no other null escaping. In particular, 0x00 bytes that are within the nested types can be left as-is as they are passed over when decoding the interior types. To show how this fixes the bug in the previous version of nested tuples, the empty tuple is now encoded as `\x05\x00` while the tuple containing only null is encoded as `\x05\x00\xff\x00`, so the first tuple will sort first. + +### **Negative arbitrary-precision Integer** + +Typecodes: 0x0a, 0x0b +Encoding: Not defined yet +Status: Reserved; 0x0b used in Python and Java + +These typecodes are reserved for encoding integers larger than 8 bytes. Presumably the type code would be followed by some encoding of the length, followed by the big endian one’s complement number. Reserving two typecodes for each of positive and negative numbers is probably overkill, but until there’s a design in place we might as well not use them. In the Python and Java implementations, 0x0b stores negative numbers which are expressed with between 9 and 255 bytes. The first byte following the type code (0x0b) is a single byte expressing the number of bytes in the integer (with its bits flipped to preserve order), followed by that number of bytes representing the number in big endian order in one's complement. + +### **Integer** + +Typecodes: 0x0c - 0x1c + 0x0c is an 8 byte negative number + 0x13 is a 1 byte negative number + 0x14 is a zero + 0x15 is a 1 byte positive number + 0x1c is an 8 byte positive number +Length: Depends on typecode (0-8 bytes) +Encoding: positive numbers are big endian + negative numbers are big endian one’s complement (so -1 is 0x13 0xfe) +Test case: `pack( -5551212 ) == b'\x11\xabK\x93'` +Status: Standard + +There is some variation in the ability of language bindings to encode and decode values at the outside of the possible range, because of different native representations of integers. + +### **Positive arbitrary-precision Integer** + +Typecodes: 0x1d, 0x1e +Encoding: Not defined yet +Status: Reserved; 0x1d used in Python and Java + +These typecodes are reserved for encoding integers larger than 8 bytes. Presumably the type code would be followed by some encoding of the length, followed by the big endian one’s complement number. Reserving two typecodes for each of positive and negative numbers is probably overkill, but until there’s a design in place we might as well not use them. In the Python and Java implementations, 0x1d stores positive numbers which are expressed with between 9 and 255 bytes. The first byte following the type code (0x1d) is a single byte expressing the number of bytes in the integer, followed by that number of bytes representing the number in big endian order. + +### **IEEE Binary Floating Point** + +Typecodes: + 0x20 - float (32 bits) + 0x21 - double (64 bits) + 0x22 - long double (80 bits) +Length: 4 - 10 bytes +Test case: `pack( -42f ) == b'=\xd7\xff\xff'` +Encoding: Big-endian IEEE binary representation, followed by the following transformation: +```python + if ord(rep[0])&0x80: # Check sign bit + # Flip all bits, this is easier in most other languages! + return "".join( chr(0xff^ord(r)) for r in rep ) + else: + # Flip just the sign bit + return chr(0x80^ord(rep[0])) + rep[1:] +``` +Status: Standard (float and double) ; Reserved (long double) + +The binary representation should not be assumed to be canonicalized (as to multiple representations of NaN, for example) by a reader. This order sorts all numbers in the following way: + +* All negative NaN values with order determined by mantissa bits (which are semantically meaningless) +* Negative inifinity +* All real numbers in the standard order (except that -0.0 < 0.0) +* Positive infinity +* All positive NaN values with order determined by mantissa bits + +This should be equivalent to the standard IEEE total ordering. + +### **Arbitrary-precision Decimal** + +Typecodes: 0x23, 0x24 +Length: Arbitrary +Encoding: Scale followed by arbitrary precision integer +Status: Reserved + +This encoding format has been used by layers. Note that this encoding makes almost no guarantees about ordering properties of tuple-encoded values and should thus generally be avoided. + +### **(DEPRECATED) True Value** + +Typecode: 0x25 +Length: 0 bytes +Status: Deprecated + +### **False Value** + +Typecode: 0x26 +Length: 0 bytes +Status: Standard + +### **True Value** + +Typecode: 0x27 +Length: 0 bytes +Status: Standard + +Note that false will sort before true with the given encoding. + +### **RFC 4122 UUID** + +Typecode: 0x30 +Length: 16 bytes +Encoding: Network byte order as defined in the rfc: [_http://www.ietf.org/rfc/rfc4122.txt_](http://www.ietf.org/rfc/rfc4122.txt) +Status: Standard + +This is equivalent to the unsigned byte ordering of the UUID bytes in big-endian order. + +### **64 bit identifier** + +Typecode: 0x31 +Length: 8 bytes +Encoding: Big endian unsigned 8-byte integer (typically random or perhaps semi-sequential) +Status: Reserved + +There’s definitely some question of whether this deserves to be separated from a plain old 64 bit integer, but a separate type was desired in one of the third-party bindings. This type has not been ported over to the first-party bindings. + +### **80 Bit versionstamp** + +Typecode: 0x32 +Length: 10 bytes +Encoding: Big endian 10-byte integer. First/high 8 bytes are a database version, next two are batch version. +Status: Reserved + +### **96 Bit Versionstamp** + +Typecode: 0x33 +Length: 12 bytes +Encoding: Big endian 12-byte integer. First/high 8 bytes are a database version, next two are batch version, next two are ordering within transaction. +Status: Reserved + +The two versionstamp typecodes are reserved for future work adding compatibility between the tuple layer and versionstamp operations. Note that the first 80 bits of the 96 bit versionstamp are the same as the contents of the 80 bit versionstamp, and they correspond to what the `SET_VERSIONSTAMP_KEY` mutation will write into a database key , i.e., the first 8 bytes are a big-endian, unsigned version corresponding to the commit version of a transaction, and the next to bytes are a big-endian, unsigned batch number ordering transactions are committed at the same version. The final two bytes of the 96 bit versionstamp are written by the client and should order writes within a single transaction, thereby providing a global order for all versions. + +### **User type codes** + +Typecode: 0x40 - 0x4f +Length: Variable (user defined) +Encoding: User defined +Status: Reserved + +These type codes may be used by third party extenders without coordinating with us. If used in shipping software, the software should use the directory layer and specify a specific layer name when opening its directories to eliminate the possibility of conflicts. + +The only way in which future official, otherwise backward-compatible versions of the tuple layer would be expected to use these type codes is to implement some kind of actual extensibility point for this purpose - they will not be used for standard types. + +### **Escape Character** + +Typecode: 0xff +Length: N/A +Encoding: N/A +Status: Reserved + +This type code is not used for anything. However, several of the other tuple types depend on this type code not being used as a type code for other types in order to correctly escape bytes in an order-preserving way. Therefore, it would be a Very Bad Idea™ for future development to start using this code for anything else. diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index 22c78f5227..cfbedc5e5f 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -23,6 +23,7 @@ #include "flow/serialize.h" #include "flow/IRandom.h" #include "flow/genericactors.actor.h" +#include "flow/SignalSafeUnwind.h" #include "fdbclient/FDBTypes.h" #include "fdbclient/BackupAgent.h" @@ -1820,6 +1821,7 @@ extern uint8_t *g_extra_memory; int main(int argc, char* argv[]) { platformInit(); + initSignalSafeUnwind(); int status = FDB_EXIT_SUCCESS; diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index 3db220f7bd..a0f35f5106 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -30,6 +30,7 @@ #include "fdbclient/FDBOptions.g.h" #include "flow/DeterministicRandom.h" +#include "flow/SignalSafeUnwind.h" #include "fdbrpc/TLSConnection.h" #include "fdbrpc/Platform.h" @@ -436,9 +437,9 @@ void initHelp() { "clear a range of keys from the database", "All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK); helpMap["configure"] = CommandHelp( - "configure [new] |logs=|resolvers=>*", + "configure [new] |logs=|resolvers=>*", "change database configuration", - "The `new' option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When used, both a redundancy mode and a storage engine must be specified.\n\nRedundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - See the Admin Guide.\n three_datacenter - See the Admin Guide.\n fast_recovery_double - two copies of data on the storage servers, three copies of the data on the logs, non-copying recovery if one log is missing.\n fast_recovery_triple - three copies of data on the storage servers, four copies of the data on the logs, non-copying recovery if one log is missing.\n\nStorage engine:\n ssd - B-Tree storage engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small datasets.\n\nproxies=: Sets the desired number of proxies in the cluster. Must be at least 1, or set to -1 which restores the number of proxies to the default value.\n\nlogs=: Sets the desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of logs to the default value.\n\nresolvers=: Sets the desired number of resolvers in the cluster. Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\nSee the FoundationDB Administration Guide for more information."); + "The `new' option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When used, both a redundancy mode and a storage engine must be specified.\n\nRedundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small datasets.\n\nproxies=: Sets the desired number of proxies in the cluster. Must be at least 1, or set to -1 which restores the number of proxies to the default value.\n\nlogs=: Sets the desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of logs to the default value.\n\nresolvers=: Sets the desired number of resolvers in the cluster. Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\nSee the FoundationDB Administration Guide for more information."); helpMap["coordinators"] = CommandHelp( "coordinators auto|
+ [description=new_cluster_description]", "change cluster coordinators or description", @@ -504,6 +505,7 @@ void initHelp() { "If no addresses are specified, populates the list of processes which can be killed. Processes cannot be killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is unresponsive.\n\nFor each IP:port pair in
*, attempt to kill the specified process."); hiddenCommands.insert("expensive_data_check"); + hiddenCommands.insert("datadistribution"); } void printVersion() { @@ -1672,7 +1674,18 @@ ACTOR Future exclude( Database db, std::vector tokens, Referenc state double worstFreeSpaceRatio = 1.0; try { for (auto proc : processesMap.obj()){ + bool storageServer = false; StatusArray rolesArray = proc.second.get_obj()["roles"].get_array(); + for (StatusObjectReader role : rolesArray) { + if (role["role"].get_str() == "storage") { + storageServer = true; + break; + } + } + // Skip non-storage servers in free space calculation + if (!storageServer) + continue; + StatusObjectReader process(proc.second); std::string addrStr; if (!process.get("address", addrStr)) { @@ -1681,6 +1694,9 @@ ACTOR Future exclude( Database db, std::vector tokens, Referenc } NetworkAddress addr = NetworkAddress::parse(addrStr); bool excluded = (process.has("excluded") && process.last().get_bool()) || addressExcluded(exclusions, addr); + ssTotalCount++; + if (excluded) + ssExcludedCount++; if(!excluded) { StatusObjectReader disk; @@ -1703,15 +1719,6 @@ ACTOR Future exclude( Database db, std::vector tokens, Referenc worstFreeSpaceRatio = std::min(worstFreeSpaceRatio, double(free_bytes)/total_bytes); } - - for (StatusObjectReader role : rolesArray) { - if (role["role"].get_str() == "storage") { - if (excluded) - ssExcludedCount++; - ssTotalCount++; - break; - } - } } } catch (...) // std::exception @@ -1895,7 +1902,7 @@ void onoff_generator(const char* text, const char *line, std::vector& lc) { - const char* opts[] = {"new", "single", "double", "triple", "three_data_hall", "three_datacenter", "fast_recovery_double", "fast_recovery_triple", "ssd", "ssd-1", "ssd-2", "memory", "proxies=", "logs=", "resolvers=", NULL}; + const char* opts[] = {"new", "single", "double", "triple", "three_data_hall", "three_datacenter", "ssd", "ssd-1", "ssd-2", "memory", "proxies=", "logs=", "resolvers=", NULL}; array_generator(text, line, opts, lc); } @@ -2210,35 +2217,45 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { state UID randomID = g_random->randomUniqueID(); TraceEvent(SevInfo, "CLICommandLog", randomID).detail("command", printable(StringRef(line))); - bool err, partial; - state std::vector> parsed = parseLine(line, err, partial); - if (err) { - LogCommand(line, randomID, "ERROR: malformed escape sequence"); - is_error = true; - continue; - } - if (partial) { - LogCommand(line, randomID, "ERROR: unterminated quote"); - is_error = true; - continue; + bool malformed, partial; + state std::vector> parsed = parseLine(line, malformed, partial); + if (malformed) LogCommand(line, randomID, "ERROR: malformed escape sequence"); + if (partial) LogCommand(line, randomID, "ERROR: unterminated quote"); + if (malformed || partial) { + if (parsed.size() > 0) { + // Denote via a special token that the command was a parse failure. + auto& last_command = parsed.back(); + last_command.insert(last_command.begin(), StringRef((const uint8_t*)"parse_error", strlen("parse_error"))); + } } state bool multi = parsed.size() > 1; + is_error = false; state std::vector>::iterator iter; for (iter = parsed.begin(); iter != parsed.end(); ++iter) { state std::vector tokens = *iter; - if (opt.exec.present() && is_error) { + if (is_error) { printf("WARNING: the previous command failed, the remaining commands will not be executed.\n"); - return 1; + break; } - is_error = false; - if (!tokens.size()) continue; + if (tokencmp(tokens[0], "parse_error")) { + printf("ERROR: Command failed to completely parse.\n"); + if (tokens.size() > 1) { + printf("ERROR: Not running partial or malformed command:"); + for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) + printf(" %s", formatStringRef(*t, true).c_str()); + printf("\n"); + } + is_error = true; + continue; + } + if (multi) { printf(">>>"); for (auto t = tokens.begin(); t != tokens.end(); ++t) @@ -2717,6 +2734,25 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { continue; } + if (tokencmp(tokens[0], "datadistribution")) { + if (tokens.size() != 2) { + printf("Usage: datadistribution \n"); + is_error = true; + } else { + if(tokencmp(tokens[1], "on")) { + int _ = wait(setDDMode(db, 1)); + printf("Data distribution is enabled\n"); + } else if(tokencmp(tokens[1], "off")) { + int _ = wait(setDDMode(db, 0)); + printf("Data distribution is disabled\n"); + } else { + printf("Usage: datadistribution \n"); + is_error = true; + } + } + continue; + } + if (tokencmp(tokens[0], "option")) { if (tokens.size() == 2 || tokens.size() > 4) { printUsage(tokens[0]); @@ -2841,6 +2877,7 @@ ACTOR Future timeExit(double duration) { int main(int argc, char **argv) { platformInit(); + initSignalSafeUnwind(); Error::init(); registerCrashHandler(); diff --git a/fdbclient/BackupAgent.h b/fdbclient/BackupAgent.h index b5689d5fd3..7cf8c33bb0 100644 --- a/fdbclient/BackupAgent.h +++ b/fdbclient/BackupAgent.h @@ -25,7 +25,7 @@ #include "flow/flow.h" #include "NativeAPI.h" #include "TaskBucket.h" -#include "flow/Notified.h" +#include "Notified.h" #include #include "KeyBackedTypes.h" #include diff --git a/fdbclient/FailureMonitorClient.actor.cpp b/fdbclient/FailureMonitorClient.actor.cpp index b263fb883a..46b0dd740f 100644 --- a/fdbclient/FailureMonitorClient.actor.cpp +++ b/fdbclient/FailureMonitorClient.actor.cpp @@ -22,12 +22,20 @@ #include "fdbrpc/FailureMonitor.h" #include "ClusterInterface.h" +struct FailureMonitorClientState : ReferenceCounted { + std::set knownAddrs; + double serverFailedTimeout; + + FailureMonitorClientState() { + serverFailedTimeout = CLIENT_KNOBS->FAILURE_TIMEOUT_DELAY; + } +}; + ACTOR Future failureMonitorClientLoop( SimpleFailureMonitor* monitor, ClusterInterface controller, - double* pServerFailedTimeout, - bool trackMyStatus, - std::set* knownAddrs) + Reference fmState, + bool trackMyStatus) { state Version version = 0; state Future request = Never(); @@ -37,7 +45,7 @@ ACTOR Future failureMonitorClientLoop( state double waitfor = 0; monitor->setStatus(controller.failureMonitoring.getEndpoint().address, FailureStatus(false)); - knownAddrs->insert( controller.failureMonitoring.getEndpoint().address ); + fmState->knownAddrs.insert( controller.failureMonitoring.getEndpoint().address ); //The cluster controller's address (controller.failureMonitoring.getEndpoint().address) is treated specially because we can declare that it is down independently //of the response from the cluster controller. It still needs to be in knownAddrs in case the cluster controller changes, so the next cluster controller resets its state @@ -51,14 +59,14 @@ ACTOR Future failureMonitorClientLoop( requestTimeout = Never(); if (reply.allOthersFailed) { // Reset all systems *not* mentioned in the reply to the default (failed) state - knownAddrs->erase( controller.failureMonitoring.getEndpoint().address ); + fmState->knownAddrs.erase( controller.failureMonitoring.getEndpoint().address ); std::set changedAddresses; for(int c=0; cknownAddrs) if (!changedAddresses.count( it )) monitor->setStatus( it, FailureStatus() ); - knownAddrs->clear(); + fmState->knownAddrs.clear(); } else { ASSERT( version != 0 ); } @@ -66,20 +74,20 @@ ACTOR Future failureMonitorClientLoop( if( monitor->getState( controller.failureMonitoring.getEndpoint() ).isFailed() ) TraceEvent("FailureMonitoringServerUp").detail("OldServer",controller.id()); monitor->setStatus( controller.failureMonitoring.getEndpoint().address, FailureStatus(false) ); - knownAddrs->insert( controller.failureMonitoring.getEndpoint().address ); + fmState->knownAddrs.insert( controller.failureMonitoring.getEndpoint().address ); //if (version != reply.failureInformationVersion) // printf("Client '%s': update from %lld to %lld (%d changes, aof=%d)\n", g_network->getLocalAddress().toString().c_str(), version, reply.failureInformationVersion, reply.changes.size(), reply.allOthersFailed); version = reply.failureInformationVersion; - *pServerFailedTimeout = reply.considerServerFailedTimeoutMS * .001; + fmState->serverFailedTimeout = reply.considerServerFailedTimeoutMS * .001; for(int c=0; cgetLocalAddress().toString().c_str(), reply.changes[c].address.toString().c_str(), reply.changes[c].status.failed ? "Failed" : "OK"); monitor->setStatus( reply.changes[c].address, reply.changes[c].status ); if (reply.changes[c].status != FailureStatus()) - knownAddrs->insert( reply.changes[c].address ); + fmState->knownAddrs.insert( reply.changes[c].address ); else - knownAddrs->erase( reply.changes[c].address ); + fmState->knownAddrs.erase( reply.changes[c].address ); ASSERT( reply.changes[c].address != controller.failureMonitoring.getEndpoint().address || !reply.changes[c].status.failed ); } before = now(); @@ -91,7 +99,7 @@ ACTOR Future failureMonitorClientLoop( requestTimeout = Never(); TraceEvent(SevWarn, "FailureMonitoringServerDown").detail("OldServerID",controller.id()); monitor->setStatus( controller.failureMonitoring.getEndpoint().address, FailureStatus(true) ); - knownAddrs->erase( controller.failureMonitoring.getEndpoint().address ); + fmState->knownAddrs.erase( controller.failureMonitoring.getEndpoint().address ); } when( Void _ = wait( nextRequest ) ) { g_network->setCurrentTask(TaskDefaultDelay); @@ -111,7 +119,7 @@ ACTOR Future failureMonitorClientLoop( req.senderStatus = FailureStatus(false); request = controller.failureMonitoring.getReply( req, TaskFailureMonitor ); if(!controller.failureMonitoring.getEndpoint().isLocal()) - requestTimeout = delay( *pServerFailedTimeout, TaskFailureMonitor ); + requestTimeout = delay( fmState->serverFailedTimeout, TaskFailureMonitor ); } } } @@ -125,11 +133,10 @@ ACTOR Future failureMonitorClientLoop( ACTOR Future failureMonitorClient( Reference>> ci, bool trackMyStatus ) { state SimpleFailureMonitor* monitor = static_cast( &IFailureMonitor::failureMonitor() ); - state std::set knownAddrs; - state double serverFailedTimeout = CLIENT_KNOBS->FAILURE_TIMEOUT_DELAY; + state Reference fmState = Reference(new FailureMonitorClientState()); loop { - state Future client = ci->get().present() ? failureMonitorClientLoop(monitor, ci->get().get(), &serverFailedTimeout, trackMyStatus, &knownAddrs) : Void(); + state Future client = ci->get().present() ? failureMonitorClientLoop(monitor, ci->get().get(), fmState, trackMyStatus) : Void(); Void _ = wait( ci->onChange() ); } } \ No newline at end of file diff --git a/fdbclient/ManagementAPI.actor.cpp b/fdbclient/ManagementAPI.actor.cpp index e69ab9859e..7aeabaa445 100644 --- a/fdbclient/ManagementAPI.actor.cpp +++ b/fdbclient/ManagementAPI.actor.cpp @@ -87,7 +87,7 @@ std::map configForToken( std::string const& mode ) { return out; } - std::string redundancy, log_replicas, log_recovery_anti_quorum, dc="1", minDC="1"; + std::string redundancy, log_replicas; IRepPolicyRef storagePolicy; IRepPolicyRef tLogPolicy; @@ -95,36 +95,23 @@ std::map configForToken( std::string const& mode ) { if (mode == "single") { redundancy="1"; log_replicas="1"; - log_recovery_anti_quorum="0"; storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyOne()); - } else if(mode == "double") { + } else if(mode == "double" || mode == "fast_recovery_double") { redundancy="2"; log_replicas="2"; - log_recovery_anti_quorum="0"; storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne()))); - } else if(mode == "triple") { + } else if(mode == "triple" || mode == "fast_recovery_triple") { redundancy="3"; log_replicas="3"; - log_recovery_anti_quorum="0"; storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne()))); - } else if(mode == "fast_recovery_double") { - redundancy="2"; - log_replicas="3"; - log_recovery_anti_quorum="1"; - storagePolicy = IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne()))); - tLogPolicy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne()))); - } else if(mode == "fast_recovery_triple") { - redundancy="3"; - log_replicas="4"; - log_recovery_anti_quorum="1"; - storagePolicy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne()))); - tLogPolicy = IRepPolicyRef(new PolicyAcross(4, "zoneid", IRepPolicyRef(new PolicyOne()))); } else if(mode == "two_datacenter") { - redundancy="3"; log_replicas="3"; log_recovery_anti_quorum="0"; dc="2"; minDC="1"; + redundancy="3"; + log_replicas="3"; storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne()))); } else if(mode == "three_datacenter") { - redundancy="3"; log_replicas="3"; log_recovery_anti_quorum="0"; dc="3"; minDC="2"; + redundancy="3"; + log_replicas="3"; storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyAnd({ IRepPolicyRef(new PolicyAcross(3, "dcid", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne()))) @@ -132,7 +119,6 @@ std::map configForToken( std::string const& mode ) { } else if(mode == "three_data_hall") { redundancy="3"; log_replicas="4"; - log_recovery_anti_quorum="0"; storagePolicy = IRepPolicyRef(new PolicyAcross(3, "data_hall", IRepPolicyRef(new PolicyOne()))); tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "data_hall", IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne()))) @@ -144,9 +130,6 @@ std::map configForToken( std::string const& mode ) { out[p+"storage_quorum"] = redundancy; out[p+"log_replicas"] = log_replicas; out[p+"log_anti_quorum"] = "0"; - out[p+"log_recovery_anti_quorum"] = log_recovery_anti_quorum; - out[p+"replica_datacenters"] = dc; - out[p+"min_replica_datacenters"] = minDC; BinaryWriter policyWriter(IncludeVersion()); serializeReplicationPolicy(policyWriter, storagePolicy); @@ -214,9 +197,7 @@ ConfigurationResult::Type buildConfiguration( std::string const& configMode, std bool isCompleteConfiguration( std::map const& options ) { std::string p = configKeysPrefix.toString(); - return options.count( p+"min_replica_datacenters" ) == 1 && - options.count( p+"replica_datacenters" ) == 1 && - options.count( p+"log_replicas" ) == 1 && + return options.count( p+"log_replicas" ) == 1 && options.count( p+"log_anti_quorum" ) == 1 && options.count( p+"storage_quorum" ) == 1 && options.count( p+"storage_replicas" ) == 1 && @@ -307,10 +288,10 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) { result.auto_replication = "double"; storage_replication = 2; log_replication = 2; - } else if( result.old_replication == "double" ) { + } else if( result.old_replication == "double" || result.old_replication == "fast_recovery_double" ) { storage_replication = 2; log_replication = 2; - } else if( result.old_replication == "triple" ) { + } else if( result.old_replication == "triple" || result.old_replication == "fast_recovery_triple" ) { storage_replication = 3; log_replication = 3; } else if( result.old_replication == "two_datacenter" ) { @@ -319,12 +300,6 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) { } else if( result.old_replication == "three_datacenter" ) { storage_replication = 3; log_replication = 3; - } else if( result.old_replication == "fast_recovery_double" ) { - storage_replication = 2; - log_replication = 3; - } else if( result.old_replication == "fast_recovery_triple" ) { - storage_replication = 3; - log_replication = 4; } else return ConfigureAutoResult(); @@ -705,7 +680,9 @@ ACTOR Future changeQuorum( Database cx, ReferenceisSimulated()) { for(int i = 0; i < (desiredCoordinators.size()/2)+1; i++) { - g_simulator.protectedAddresses.insert( desiredCoordinators[i] ); + auto address = NetworkAddress(desiredCoordinators[i].ip,desiredCoordinators[i].port,true,false); + g_simulator.protectedAddresses.insert(address); + TraceEvent("ProtectCoordinator").detail("Address", address).backtrace(); } } @@ -1067,6 +1044,39 @@ ACTOR Future> getExcludedServers( Database cx ) { } } +ACTOR Future setDDMode( Database cx, int mode ) { + state Transaction tr(cx); + state int oldMode = -1; + state BinaryWriter wr(Unversioned()); + wr << mode; + + loop { + try { + Optional old = wait( tr.get( dataDistributionModeKey ) ); + if (oldMode < 0) { + oldMode = 1; + if (old.present()) { + BinaryReader rd(old.get(), Unversioned()); + rd >> oldMode; + } + } + if (!mode) { + BinaryWriter wrMyOwner(Unversioned()); + wrMyOwner << dataDistributionModeLock; + tr.set( moveKeysLockOwnerKey, wrMyOwner.toStringRef() ); + } + + tr.set( dataDistributionModeKey, wr.toStringRef() ); + + Void _ = wait( tr.commit() ); + return oldMode; + } catch (Error& e) { + TraceEvent("setDDModeRetrying").error(e); + Void _ = wait (tr.onError(e)); + } + } +} + ACTOR Future waitForExcludedServers( Database cx, vector excl ) { state std::set exclusions( excl.begin(), excl.end() ); diff --git a/fdbclient/ManagementAPI.h b/fdbclient/ManagementAPI.h index e3cf772b49..590ddd8c1a 100644 --- a/fdbclient/ManagementAPI.h +++ b/fdbclient/ManagementAPI.h @@ -153,6 +153,8 @@ Future unlockDatabase( Database const& cx, UID const& id ); Future checkDatabaseLock( Transaction* const& tr, UID const& id ); Future checkDatabaseLock( Reference const& tr, UID const& id ); +Future setDDMode( Database const& cx, int const& mode ); + // Gets the cluster connection string Future> getCoordinators( Database const& cx ); #endif \ No newline at end of file diff --git a/fdbclient/MasterProxyInterface.h b/fdbclient/MasterProxyInterface.h index d501e89fbb..b848087d95 100644 --- a/fdbclient/MasterProxyInterface.h +++ b/fdbclient/MasterProxyInterface.h @@ -33,7 +33,7 @@ struct MasterProxyInterface { RequestStream< struct CommitTransactionRequest > commit; RequestStream< struct GetReadVersionRequest > getConsistentReadVersion; // Returns a version which (1) is committed, and (2) is >= the latest version reported committed (by a commit response) when this request was sent // (at some point between when this request is sent and when its response is received, the latest version reported committed) - RequestStream< ReplyPromise> > getKeyServersLocations; + RequestStream< ReplyPromise>>> > getKeyServersLocations; RequestStream< struct GetStorageServerRejoinInfoRequest > getStorageServerRejoinInfo; RequestStream> waitFailure; diff --git a/fdbserver/MetricLogger.actor.cpp b/fdbclient/MetricLogger.actor.cpp similarity index 100% rename from fdbserver/MetricLogger.actor.cpp rename to fdbclient/MetricLogger.actor.cpp diff --git a/fdbclient/MetricLogger.h b/fdbclient/MetricLogger.h new file mode 100755 index 0000000000..96720c363c --- /dev/null +++ b/fdbclient/MetricLogger.h @@ -0,0 +1,25 @@ +/* + * MetricLogger.h + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "NativeAPI.h" + +Future runMetrics( Future const& fcx, Key const& metricsPrefix ); diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 230de8486f..2f2fefee6d 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -602,7 +602,7 @@ Reference DatabaseContext::setCachedLocation( const KeyRangeRef& k attempts++; auto r = locationCache.randomRange(); Key begin = r.begin(), end = r.end(); // insert invalidates r, so can't be passed a mere reference into it - if( begin >= keyServersPrefix ) + if( begin >= keyServersPrefix && attempts > maxEvictionAttempts / 2) continue; locationCache.insert( KeyRangeRef(begin, end), Reference() ); } @@ -875,7 +875,7 @@ void setupNetwork(uint64_t transportId, bool useMetrics) { if (!networkOptions.logClientInfo.present()) networkOptions.logClientInfo = true; - g_network = newNet2(NetworkAddress(), false, useMetrics); + g_network = newNet2(NetworkAddress(), false, useMetrics || networkOptions.traceDirectory.present()); FlowTransport::createInstance(transportId); Net2FileSystem::newFileSystem(); @@ -1102,22 +1102,33 @@ ACTOR Future< pair> > getKeyLocation( Database state vector serverInterfaces; state KeyRangeRef range; - + // We assume that not only /FF/keyServers but /FF/serverList is present on the keyServersLocations since we now need both of them to terminate our search. Currently this is guaranteed because nothing after /FF/keyServers is split. if ( ( key.startsWith( serverListPrefix) && (!isBackward || key.size() > serverListPrefix.size()) ) || ( key.startsWith( keyServersPrefix ) && (!isBackward || key.size() > keyServersPrefix.size()) )) { if( info.debugID.present() ) - g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.Before"); + g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.Before"); loop { choose { when ( Void _ = wait( cx->onMasterProxiesChanged() ) ) {} - when ( vector s = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getKeyServersLocations, ReplyPromise>(), info.taskID ) ) ) { + when ( vector>> keyServersShards = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getKeyServersLocations, ReplyPromise>>>(), info.taskID ) ) ) { if( info.debugID.present() ) g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.After"); - ASSERT( s.size() ); // There should always be storage servers, except on version 0 which should not get to this function - range = KeyRangeRef( keyServersPrefix, allKeys.end ); - serverInterfaces = s; - break; + ASSERT( keyServersShards.size() ); // There should always be storage servers, except on version 0 which should not get to this function + + Reference cachedLocation; + for (pair> keyServersShard : keyServersShards) { + auto locationInfo = cx->setCachedLocation(keyServersShard.first, keyServersShard.second); + + if (isBackward ? (keyServersShard.first.begin < key && keyServersShard.first.end >= key) : keyServersShard.first.contains(key)) { + range = keyServersShard.first; + cachedLocation = locationInfo; + } + } + + ASSERT(isBackward ? (range.begin < key && range.end >= key) : range.contains(key)); + + return make_pair(range, cachedLocation); } } } @@ -1654,6 +1665,15 @@ Future resolveKey( Database const& cx, KeySelector const& key, Version cons ACTOR Future> getRangeFallback( Database cx, Version version, KeySelector begin, KeySelector end, GetRangeLimits limits, bool reverse, TransactionInfo info ) { + if(version == latestVersion) { + state Transaction transaction(cx); + transaction.setOption(FDBTransactionOptions::CAUSAL_READ_RISKY); + transaction.setOption(FDBTransactionOptions::LOCK_AWARE); + transaction.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); + Version ver = wait( transaction.getReadVersion() ); + version = ver; + } + Future fb = resolveKey(cx, begin, version, info); state Future fe = resolveKey(cx, end, version, info); @@ -1849,15 +1869,8 @@ ACTOR Future> getRange( Database cx, Future cx->invalidateCache( beginServer.second ); if (e.code() == error_code_wrong_shard_server) { - if (version == latestVersion) { - // latestVersion queries are only for keyServersPrefix/*, which shard is guaranteed not to split, - // so we should always be able to use the fast path--try again - TEST(true); //Latest version retry fast path - TraceEvent("LatestVersionRetryFastPath").detail("KeyBegin", printable(begin.getKey())).detail("KeyEnd", printable(end.getKey())); - } else { Standalone result = wait( getRangeFallback(cx, version, originalBegin, originalEnd, originalLimits, reverse, info ) ); return result; - } } Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID)); @@ -2424,6 +2437,7 @@ ACTOR static Future commitDummyTransaction( Database cx, KeyRange range, T tr.info.taskID = info.taskID; tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS ); tr.setOption( FDBTransactionOptions::CAUSAL_WRITE_RISKY ); + tr.setOption( FDBTransactionOptions::LOCK_AWARE ); tr.addReadConflictRange(range); tr.addWriteConflictRange(range); Void _ = wait( tr.commit() ); diff --git a/flow/Notified.h b/fdbclient/Notified.h similarity index 96% rename from flow/Notified.h rename to fdbclient/Notified.h index 4496b88e11..fd32476a38 100644 --- a/flow/Notified.h +++ b/fdbclient/Notified.h @@ -18,11 +18,11 @@ * limitations under the License. */ -#ifndef FLOW_NOTIFIED_H -#define FLOW_NOTIFIED_H +#ifndef FDBCLIENT_NOTIFIED_H +#define FDBCLIENT_NOTIFIED_H #pragma once -#include "fdbclient/FDBTypes.h" +#include "FDBTypes.h" #include "flow/TDMetric.actor.h" struct NotifiedVersion { @@ -78,4 +78,4 @@ private: VersionMetricHandle val; }; -#endif \ No newline at end of file +#endif diff --git a/fdbclient/SystemData.cpp b/fdbclient/SystemData.cpp index aea6279574..35d25c6364 100644 --- a/fdbclient/SystemData.cpp +++ b/fdbclient/SystemData.cpp @@ -33,7 +33,8 @@ const KeyRef afterAllKeys = LiteralStringRef("\xff\xff\x00"); const KeyRangeRef keyServersKeys( LiteralStringRef("\xff/keyServers/"), LiteralStringRef("\xff/keyServers0") ); const KeyRef keyServersPrefix = keyServersKeys.begin; const KeyRef keyServersEnd = keyServersKeys.end; -const KeyRef keyServersKeyServersKey = LiteralStringRef("\xff/keyServers/\xff/keyServers/"); +const KeyRangeRef keyServersKeyServersKeys ( LiteralStringRef("\xff/keyServers/\xff/keyServers/"), LiteralStringRef("\xff/keyServers/\xff/keyServers0")); +const KeyRef keyServersKeyServersKey = keyServersKeyServersKeys.begin; const Key keyServersKey( const KeyRef& k ) { return k.withPrefix( keyServersPrefix ); diff --git a/fdbclient/SystemData.h b/fdbclient/SystemData.h index 395f3ac121..22f92fd55d 100644 --- a/fdbclient/SystemData.h +++ b/fdbclient/SystemData.h @@ -34,7 +34,7 @@ extern const KeyRangeRef allKeys; // '' to systemKeys.end extern const KeyRef afterAllKeys; // "\xff/keyServers/[[begin]]" := "[[vector, vector]]" -extern const KeyRangeRef keyServersKeys; +extern const KeyRangeRef keyServersKeys, keyServersKeyServersKeys; extern const KeyRef keyServersPrefix, keyServersEnd, keyServersKeyServersKey; const Key keyServersKey( const KeyRef& k ); const KeyRef keyServersKey( const KeyRef& k, Arena& arena ); diff --git a/fdbclient/fdbclient.vcxproj b/fdbclient/fdbclient.vcxproj old mode 100644 new mode 100755 index f4b0723505..4a43526153 --- a/fdbclient/fdbclient.vcxproj +++ b/fdbclient/fdbclient.vcxproj @@ -38,6 +38,8 @@ false false + + @@ -57,6 +59,7 @@ + @@ -201,4 +204,4 @@ - \ No newline at end of file + diff --git a/fdbclient/vexillographer/fdb.options b/fdbclient/vexillographer/fdb.options index 6e70dd9eec..c194784a52 100644 --- a/fdbclient/vexillographer/fdb.options +++ b/fdbclient/vexillographer/fdb.options @@ -94,7 +94,7 @@ description is not currently required but encouraged.