Merge branch 'master' into feature-remote-logs
# Conflicts: # fdbserver/ClusterController.actor.cpp # fdbserver/DataDistribution.actor.cpp # fdbserver/OldTLogServer.actor.cpp # fdbserver/TLogServer.actor.cpp # fdbserver/WorkerInterface.h # flow/Net2.actor.cpp
This commit is contained in:
commit
76e7988663
2
Makefile
2
Makefile
|
@ -47,7 +47,7 @@ else ifeq ($(PLATFORM),Darwin)
|
|||
CXX := /usr/bin/clang
|
||||
|
||||
CFLAGS += -mmacosx-version-min=10.7 -stdlib=libc++
|
||||
CXXFLAGS += -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
|
||||
CXXFLAGS += -mmacosx-version-min=10.7 -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
|
||||
|
||||
.LIBPATTERNS := lib%.dylib lib%.a
|
||||
|
||||
|
|
|
@ -63,6 +63,6 @@ testers = {
|
|||
'java_async' : Tester('java', _java_cmd + 'AsyncStackTester', 2040, 500, MAX_API_VERSION),
|
||||
'java_completable' : Tester('java', _java_completable_cmd + 'StackTester', 2040, 500, MAX_API_VERSION),
|
||||
'java_completable_async' : Tester('java', _java_completable_cmd + 'AsyncStackTester', 2040, 500, MAX_API_VERSION),
|
||||
'go' : Tester('go', _absolute_path('go/bin/_stacktester'), 63, 200, MAX_API_VERSION),
|
||||
'go' : Tester('go', _absolute_path('go/build/bin/_stacktester'), 63, 200, MAX_API_VERSION),
|
||||
'flow' : Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION),
|
||||
}
|
||||
|
|
|
@ -23,14 +23,18 @@
|
|||
fdb_c_CFLAGS := $(fdbclient_CFLAGS)
|
||||
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
|
||||
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a
|
||||
fdb_c_tests_LIBS := -Llib -lfdb_c
|
||||
fdb_c_tests_HEADERS := -Ibindings/c
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_c_LIBS += lib/libstdc++.a -lm -lpthread -lrt -ldl
|
||||
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete
|
||||
fdb_c_tests_LIBS += -lpthread
|
||||
endif
|
||||
|
||||
ifeq ($(PLATFORM),osx)
|
||||
fdb_c_LDFLAGS += -lc++ -Xlinker -exported_symbols_list -Xlinker bindings/c/fdb_c.symbols
|
||||
fdb_c_tests_LIBS += -lpthread
|
||||
|
||||
lib/libfdb_c.dylib: bindings/c/fdb_c.symbols
|
||||
|
||||
|
@ -74,3 +78,24 @@ fdb_c_BUILD_SOURCES += bindings/c/fdb_c.g.S
|
|||
bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexillographer/fdb.options $(ALL_MAKEFILES)
|
||||
@echo "Building $@"
|
||||
@$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options c $@
|
||||
|
||||
bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_performance_test"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_LIBS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c
|
||||
|
||||
bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_ryw_benchmark"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_LIBS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c
|
||||
|
||||
packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark
|
||||
@echo "Packaging $@"
|
||||
@rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM)
|
||||
@mkdir -p packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
|
||||
@cp bin/fdb_c_performance_test packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
|
||||
@cp bin/fdb_c_ryw_benchmark packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
|
||||
@tar -C packages -czvf $@ fdb-c-tests-$(VERSION)-$(PLATFORM) > /dev/null
|
||||
@rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM)
|
||||
|
||||
fdb_c_tests: packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz
|
||||
|
||||
packages: fdb_c_tests
|
||||
|
|
|
@ -0,0 +1,623 @@
|
|||
/*
|
||||
* performance_test.c
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "test.h"
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <foundationdb/fdb_c_options.g.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <pthread.h>
|
||||
|
||||
pthread_t netThread;
|
||||
|
||||
int numKeys = 1000000;
|
||||
int keySize = 16;
|
||||
uint8_t** keys = NULL;
|
||||
int valueSize = 100;
|
||||
uint8_t *valueStr = NULL;
|
||||
|
||||
fdb_error_t waitError(FDBFuture *f) {
|
||||
fdb_error_t blockError = fdb_future_block_until_ready(f);
|
||||
if(!blockError) {
|
||||
return fdb_future_get_error(f);
|
||||
} else {
|
||||
return blockError;
|
||||
}
|
||||
}
|
||||
|
||||
struct RunResult run(struct ResultSet *rs, FDBDatabase *db, struct RunResult (*func)(struct ResultSet*, FDBTransaction*)) {
|
||||
FDBTransaction *tr = NULL;
|
||||
checkError(fdb_database_create_transaction(db, &tr), "create transaction", rs);
|
||||
fdb_error_t e = fdb_database_create_transaction(db, &tr);
|
||||
checkError(e, "create transaction", rs);
|
||||
|
||||
while(1) {
|
||||
struct RunResult r = func(rs, tr);
|
||||
e = r.e;
|
||||
if(!e) {
|
||||
FDBFuture *f = fdb_transaction_commit(tr);
|
||||
e = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
|
||||
if(e) {
|
||||
FDBFuture *f = fdb_transaction_on_error(tr, e);
|
||||
fdb_error_t retryE = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
if (retryE) {
|
||||
return (struct RunResult) {0, retryE};
|
||||
}
|
||||
} else {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return RES(0, 4100); // internal_error ; we should never get here
|
||||
}
|
||||
|
||||
int runTest(struct RunResult (*testFxn)(struct ResultSet*, FDBTransaction*), FDBDatabase *db, struct ResultSet *rs, const char *kpiName) {
|
||||
int numRuns = 25;
|
||||
int *results = malloc(sizeof(int)*numRuns);
|
||||
int i = 0;
|
||||
for(; i < numRuns; ++i) {
|
||||
struct RunResult res = run(rs, db, testFxn);
|
||||
if(res.e) {
|
||||
logError(res.e, kpiName, rs);
|
||||
free(results);
|
||||
return 0;
|
||||
}
|
||||
results[i] = res.res;
|
||||
if(results[i] < 0) {
|
||||
free(results);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int result = median(results, numRuns);
|
||||
free(results);
|
||||
|
||||
addKpi(rs, kpiName, result, "keys/s");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int runTestDb(struct RunResult (*testFxn)(struct ResultSet*, FDBDatabase*), FDBDatabase *db, struct ResultSet *rs, const char *kpiName) {
|
||||
int numRuns = 25;
|
||||
int *results = malloc(sizeof(int)*numRuns);
|
||||
int i = 0;
|
||||
for(; i < numRuns; ++i) {
|
||||
struct RunResult res = testFxn(rs, db);
|
||||
if(res.e) {
|
||||
logError(res.e, kpiName, rs);
|
||||
free(results);
|
||||
return 0;
|
||||
}
|
||||
results[i] = res.res;
|
||||
if(results[i] < 0) {
|
||||
free(results);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int result = median(results, numRuns);
|
||||
free(results);
|
||||
|
||||
addKpi(rs, kpiName, result, "keys/s");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
struct RunResult clearAll(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_transaction_clear_range(tr, (uint8_t*)"", 0, (uint8_t*)"\xff", 1);
|
||||
return RES(0, 0);
|
||||
}
|
||||
|
||||
uint32_t start = 0;
|
||||
uint32_t stop = 0;
|
||||
struct RunResult insertRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
int i;
|
||||
for(i = start; i < stop; i++) {
|
||||
fdb_transaction_set(tr, keys[i], keySize, valueStr, valueSize);
|
||||
}
|
||||
return RES(0, 0);
|
||||
}
|
||||
|
||||
void insertData(struct ResultSet *rs, FDBDatabase *db) {
|
||||
checkError(run(rs, db, &clearAll).e, "clearing database", rs);
|
||||
|
||||
// TODO: Do this asynchronously.
|
||||
start = 0;
|
||||
while(start < numKeys) {
|
||||
stop = start + 1000;
|
||||
if(stop > numKeys) stop = numKeys;
|
||||
checkError(run(rs, db, &insertRange).e, "inserting data range", rs);
|
||||
start = stop;
|
||||
}
|
||||
}
|
||||
|
||||
fdb_error_t setRetryLimit(struct ResultSet *rs, FDBTransaction *tr, uint64_t limit) {
|
||||
return fdb_transaction_set_option(tr, FDB_TR_OPTION_RETRY_LIMIT, (const uint8_t*)&limit, sizeof(uint64_t));
|
||||
}
|
||||
|
||||
uint32_t FUTURE_LATENCY_COUNT = 100000;
|
||||
const char *FUTURE_LATENCY_KPI = "C future throughput (local client)";
|
||||
struct RunResult futureLatency(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
FDBFuture *f = fdb_transaction_get_read_version(tr);
|
||||
e = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
maybeLogError(e, "getting initial read version", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < FUTURE_LATENCY_COUNT; i++) {
|
||||
FDBFuture *f = fdb_transaction_get_read_version(tr);
|
||||
e = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
maybeLogError(e, "getting read version", rs);
|
||||
if(e) return RES(0, e);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
return RES(FUTURE_LATENCY_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t CLEAR_COUNT = 100000;
|
||||
const char *CLEAR_KPI = "C clear throughput (local client)";
|
||||
struct RunResult clear(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < CLEAR_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_clear(tr, keys[k], keySize);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
fdb_transaction_reset(tr); // Don't actually clear things.
|
||||
return RES(CLEAR_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t CLEAR_RANGE_COUNT = 100000;
|
||||
const char *CLEAR_RANGE_KPI = "C clear range throughput (local client)";
|
||||
struct RunResult clearRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < CLEAR_RANGE_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % (numKeys - 1);
|
||||
fdb_transaction_clear_range(tr, keys[k], keySize, keys[k+1], keySize);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
fdb_transaction_reset(tr); // Don't actually clear things.
|
||||
return RES(CLEAR_RANGE_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t SET_COUNT = 100000;
|
||||
const char *SET_KPI = "C set throughput (local client)";
|
||||
struct RunResult set(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < SET_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
fdb_transaction_reset(tr); // Don't actually set things.
|
||||
return RES(SET_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t PARALLEL_GET_COUNT = 10000;
|
||||
const char *PARALLEL_GET_KPI = "C parallel get throughput (local client)";
|
||||
struct RunResult parallelGet(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
FDBFuture **futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * PARALLEL_GET_COUNT);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
int i;
|
||||
for(i = 0; i < PARALLEL_GET_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
futures[i] = fdb_transaction_get(tr, keys[k], keySize, 0);
|
||||
}
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
int outValueLength;
|
||||
|
||||
for(i = 0; i < PARALLEL_GET_COUNT; i++) {
|
||||
e = maybeLogError(fdb_future_block_until_ready(futures[i]), "waiting for get future", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
fdb_future_destroy(futures[i]);
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
free(futures);
|
||||
return RES(PARALLEL_GET_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t ALTERNATING_GET_SET_COUNT = 2000;
|
||||
const char *ALTERNATING_GET_SET_KPI = "C alternating get set throughput (local client)";
|
||||
struct RunResult alternatingGetSet(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
FDBFuture **futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * ALTERNATING_GET_SET_COUNT);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
int i;
|
||||
for(i = 0; i < ALTERNATING_GET_SET_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize);
|
||||
futures[i] = fdb_transaction_get(tr, keys[k], keySize, 0);
|
||||
}
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
int outValueLength;
|
||||
|
||||
for(i = 0; i < ALTERNATING_GET_SET_COUNT; i++) {
|
||||
e = maybeLogError(fdb_future_block_until_ready(futures[i]), "waiting for get future", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
fdb_future_destroy(futures[i]);
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
free(futures);
|
||||
return RES(ALTERNATING_GET_SET_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t SERIAL_GET_COUNT = 2000;
|
||||
const char *SERIAL_GET_KPI = "C serial get throughput (local client)";
|
||||
struct RunResult serialGet(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
int i;
|
||||
uint32_t *keyIndices = (uint32_t*)malloc((sizeof(uint32_t)) * SERIAL_GET_COUNT);
|
||||
|
||||
if(SERIAL_GET_COUNT > numKeys/2) {
|
||||
for(i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
keyIndices[i] = ((uint64_t)rand()) % numKeys;
|
||||
}
|
||||
} else {
|
||||
for(i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
while(1) {
|
||||
// Yes, this is a linear scan. This happens outside
|
||||
// the part we are measuring.
|
||||
uint32_t index = ((uint64_t)rand()) % numKeys;
|
||||
int j;
|
||||
fdb_bool_t found = 0;
|
||||
for(j = 0; j < i; j++) {
|
||||
if(keyIndices[j] == index) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!found) {
|
||||
keyIndices[i] = index;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
double start = getTime();
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
int outValueLength;
|
||||
|
||||
for(i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, keys[keyIndices[i]], keySize, 0);
|
||||
fdb_error_t e = maybeLogError(fdb_future_block_until_ready(f), "getting key in serial", rs);
|
||||
if(e) {
|
||||
free(keyIndices);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_value(f, &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
fdb_future_destroy(f);
|
||||
if(e) {
|
||||
free(keyIndices);
|
||||
return RES(0, e);
|
||||
}
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
free(keyIndices);
|
||||
return RES(SERIAL_GET_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t GET_RANGE_COUNT = 100000;
|
||||
const char *GET_RANGE_KPI = "C get range throughput (local client)";
|
||||
struct RunResult getRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
uint32_t startKey = ((uint64_t)rand()) % (numKeys - GET_RANGE_COUNT - 1);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
const FDBKeyValue *outKv;
|
||||
int outCount;
|
||||
fdb_bool_t outMore = 1;
|
||||
int totalOut = 0;
|
||||
int iteration = 0;
|
||||
|
||||
FDBFuture *f = fdb_transaction_get_range(tr,
|
||||
keys[startKey], keySize, 1, 0,
|
||||
keys[startKey + GET_RANGE_COUNT], keySize, 1, 0,
|
||||
0, 0,
|
||||
FDB_STREAMING_MODE_WANT_ALL, ++iteration, 0, 0);
|
||||
|
||||
while(outMore) {
|
||||
e = maybeLogError(fdb_future_block_until_ready(f), "getting range", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_keyvalue_array(f, &outKv, &outCount, &outMore), "reading range array", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
totalOut += outCount;
|
||||
|
||||
if(outMore) {
|
||||
FDBFuture *f2 = fdb_transaction_get_range(tr,
|
||||
outKv[outCount - 1].key, outKv[outCount - 1].key_length, 1, 1,
|
||||
keys[startKey + GET_RANGE_COUNT], keySize, 1, 0,
|
||||
0, 0,
|
||||
FDB_STREAMING_MODE_WANT_ALL, ++iteration, 0, 0);
|
||||
fdb_future_destroy(f);
|
||||
f = f2;
|
||||
}
|
||||
}
|
||||
|
||||
if(totalOut != GET_RANGE_COUNT) {
|
||||
char *msg = (char*)malloc((sizeof(char)) * 200);
|
||||
sprintf(msg, "verifying out count (%d != %d)", totalOut, GET_RANGE_COUNT);
|
||||
logError(4100, msg, rs);
|
||||
free(msg);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
}
|
||||
if(outMore) {
|
||||
logError(4100, "verifying no more in range", rs);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
}
|
||||
fdb_future_destroy(f);
|
||||
|
||||
double end = getTime();
|
||||
|
||||
return RES(GET_RANGE_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t GET_KEY_COUNT = 2000;
|
||||
const char *GET_KEY_KPI = "C get key throughput (local client)";
|
||||
struct RunResult getKey(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
int outValueLength;
|
||||
|
||||
int i;
|
||||
for(i = 0; i < GET_KEY_COUNT; i++) {
|
||||
int key = ((uint64_t)rand()) % numKeys;
|
||||
int offset = (((uint64_t)rand()) % 21) - 10;
|
||||
FDBFuture *f = fdb_transaction_get_key(tr, keys[key], keySize, 1, offset, 0);
|
||||
|
||||
e = maybeLogError(fdb_future_block_until_ready(f), "waiting for get key", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_value(f, &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
fdb_future_destroy(f);
|
||||
if(e) {
|
||||
return RES(0, e);
|
||||
}
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
return RES(GET_KEY_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t GET_SINGLE_KEY_RANGE_COUNT = 2000;
|
||||
const char *GET_SINGLE_KEY_RANGE_KPI = "C get_single_key_range throughput (local client)";
|
||||
struct RunResult getSingleKeyRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
const FDBKeyValue *outKv;
|
||||
int outCount;
|
||||
fdb_bool_t outMore;
|
||||
|
||||
int i;
|
||||
for(i = 0; i < GET_SINGLE_KEY_RANGE_COUNT; i++) {
|
||||
int key = ((uint64_t)rand()) % (numKeys - 1);
|
||||
FDBFuture *f = fdb_transaction_get_range(tr,
|
||||
keys[key], keySize, 1, 0,
|
||||
keys[key + 1], keySize, 1, 0,
|
||||
2, 0,
|
||||
FDB_STREAMING_MODE_EXACT, 1, 0, 0);
|
||||
|
||||
e = maybeLogError(fdb_future_block_until_ready(f), "waiting for single key range", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_keyvalue_array(f, &outKv, &outCount, &outMore), "reading single key range array", rs);
|
||||
if(e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
if(outCount != 1) {
|
||||
logError(4100, "more than one key returned in single key range read", rs);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
}
|
||||
if(outMore) {
|
||||
logError(4100, "more keys to read in single key range read", rs);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
}
|
||||
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
return RES(GET_SINGLE_KEY_RANGE_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
struct RunResult singleKey(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize);
|
||||
return RES(0, 0);
|
||||
}
|
||||
|
||||
uint32_t WRITE_TRANSACTION_COUNT = 1000;
|
||||
const char *WRITE_TRANSACTION_KPI = "C write_transaction throughput (local client)";
|
||||
struct RunResult writeTransaction(struct ResultSet *rs, FDBDatabase *db) {
|
||||
double start = getTime();
|
||||
|
||||
int i;
|
||||
for(i = 0; i < WRITE_TRANSACTION_COUNT; i++) {
|
||||
struct RunResult res = run(rs, db, &singleKey);
|
||||
if(res.e) return res;
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
return RES(WRITE_TRANSACTION_COUNT/(end - start), 0);
|
||||
}
|
||||
|
||||
void runTests(struct ResultSet *rs) {
|
||||
FDBDatabase *db = openDatabase(rs, &netThread);
|
||||
|
||||
printf("Loading database...\n");
|
||||
insertData(rs, db);
|
||||
|
||||
printf("future_latency\n");
|
||||
runTest(&futureLatency, db, rs, FUTURE_LATENCY_KPI);
|
||||
|
||||
printf("clear\n");
|
||||
runTest(&clear, db, rs, CLEAR_KPI);
|
||||
|
||||
printf("clear_range\n");
|
||||
runTest(&clearRange, db, rs, CLEAR_RANGE_KPI);
|
||||
|
||||
printf("set\n");
|
||||
runTest(&set, db, rs, SET_KPI);
|
||||
|
||||
printf("parallel_get\n");
|
||||
runTest(¶llelGet, db, rs, PARALLEL_GET_KPI);
|
||||
|
||||
printf("alternating_get_set\n");
|
||||
runTest(&alternatingGetSet, db, rs, ALTERNATING_GET_SET_KPI);
|
||||
|
||||
printf("serial_get\n");
|
||||
runTest(&serialGet, db, rs, SERIAL_GET_KPI);
|
||||
|
||||
printf("get_range\n");
|
||||
runTest(&getRange, db, rs, GET_RANGE_KPI);
|
||||
|
||||
printf("get_key\n");
|
||||
runTest(&getKey, db, rs, GET_KEY_KPI);
|
||||
|
||||
printf("get_single_key_range\n");
|
||||
runTest(&getSingleKeyRange, db, rs, GET_SINGLE_KEY_RANGE_KPI);
|
||||
|
||||
printf("write_transaction\n");
|
||||
runTestDb(&writeTransaction, db, rs, WRITE_TRANSACTION_KPI);
|
||||
|
||||
fdb_database_destroy(db);
|
||||
fdb_stop_network();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet *rs = newResultSet();
|
||||
checkError(fdb_select_api_version(500), "select API version", rs);
|
||||
printf("Running performance test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
valueStr = (uint8_t*)malloc((sizeof(uint8_t))*valueSize);
|
||||
int i;
|
||||
for(i = 0; i < valueSize; i++) {
|
||||
valueStr[i] = (uint8_t)'x';
|
||||
}
|
||||
|
||||
keys = generateKeys(numKeys, keySize);
|
||||
runTests(rs);
|
||||
writeResultSet(rs);
|
||||
|
||||
free(valueStr);
|
||||
freeResultSet(rs);
|
||||
freeKeys(keys, numKeys);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* ryw_benchmark.c
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "test.h"
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <foundationdb/fdb_c_options.g.h>
|
||||
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
pthread_t netThread;
|
||||
|
||||
int numKeys = 10000;
|
||||
int keySize = 16;
|
||||
uint8_t** keys;
|
||||
|
||||
void insertData(FDBTransaction *tr) {
|
||||
fdb_transaction_clear_range(tr, (uint8_t*)"", 0, (uint8_t*)"\xff", 1);
|
||||
|
||||
uint8_t *v = (uint8_t*)"foo";
|
||||
uint32_t i;
|
||||
for(i = 0; i <= numKeys; ++i) {
|
||||
fdb_transaction_set(tr, keys[i], keySize, v, 3);
|
||||
}
|
||||
}
|
||||
|
||||
int runTest(int (*testFxn)(FDBTransaction*, struct ResultSet*), FDBTransaction *tr, struct ResultSet *rs, const char *kpiName) {
|
||||
int numRuns = 25;
|
||||
int *results = malloc(sizeof(int)*numRuns);
|
||||
int i = 0;
|
||||
for(; i < numRuns; ++i) {
|
||||
results[i] = testFxn(tr, rs);
|
||||
if(results[i] < 0) {
|
||||
free(results);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int result = median(results, numRuns);
|
||||
free(results);
|
||||
|
||||
addKpi(rs, kpiName, result, "keys/s");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int getSingle(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int present;
|
||||
uint8_t const *value;
|
||||
int length;
|
||||
int i;
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < numKeys; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, keys[5001], keySize, 0);
|
||||
if(getError(fdb_future_block_until_ready(f), "GetSingle (block for get)", rs)) return -1;
|
||||
if(getError(fdb_future_get_value(f, &present, &value, &length), "GetSingle (get result)", rs)) return -1;
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
return numKeys / (end - start);
|
||||
}
|
||||
|
||||
int getManySequential(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int present;
|
||||
uint8_t const *value;
|
||||
int length;
|
||||
int i;
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < numKeys; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, keys[i], keySize, 0);
|
||||
if(getError(fdb_future_block_until_ready(f), "GetManySequential (block for get)", rs)) return -1;
|
||||
if(getError(fdb_future_get_value(f, &present, &value, &length), "GetManySequential (get result)", rs)) return -1;
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
return numKeys / (end - start);
|
||||
}
|
||||
|
||||
int getRangeBasic(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int count;
|
||||
const FDBKeyValue *kvs;
|
||||
int more;
|
||||
int i;
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < 100; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0);
|
||||
|
||||
if(getError(fdb_future_block_until_ready(f), "GetRangeBasic (block for get range)", rs)) return -1;
|
||||
if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "GetRangeBasic (get range results)", rs)) return -1;
|
||||
|
||||
if(count != numKeys) {
|
||||
fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys);
|
||||
addError(rs, "GetRangeBasic bad count");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
return 100 * numKeys / (end - start);
|
||||
}
|
||||
|
||||
int singleClearGetRange(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int count;
|
||||
const FDBKeyValue *kvs;
|
||||
int more;
|
||||
int i;
|
||||
|
||||
for(i = 0; i < numKeys; i+=2) {
|
||||
fdb_transaction_clear(tr, keys[i], keySize);
|
||||
}
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < 100; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0);
|
||||
|
||||
if(getError(fdb_future_block_until_ready(f), "SingleClearGetRange (block for get range)", rs)) return -1;
|
||||
if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "SingleClearGetRange (get range results)", rs)) return -1;
|
||||
|
||||
fdb_future_destroy(f);
|
||||
|
||||
if(count != numKeys/2) {
|
||||
fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys);
|
||||
addError(rs, "SingleClearGetRange bad count");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
insertData(tr);
|
||||
return 100 * numKeys / 2 / (end - start);
|
||||
}
|
||||
|
||||
int clearRangeGetRange(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int count;
|
||||
const FDBKeyValue *kvs;
|
||||
int more;
|
||||
int i;
|
||||
|
||||
for(i = 0; i < numKeys; i+=4) {
|
||||
fdb_transaction_clear_range(tr, keys[i], keySize, keys[i+1], keySize);
|
||||
}
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < 100; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0);
|
||||
|
||||
if(getError(fdb_future_block_until_ready(f), "ClearRangeGetRange (block for get range)", rs)) return -1;
|
||||
if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "ClearRangeGetRange (get range results)", rs)) return -1;
|
||||
|
||||
fdb_future_destroy(f);
|
||||
|
||||
if(count != numKeys*3/4) {
|
||||
fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys*3/4);
|
||||
addError(rs, "ClearRangeGetRange bad count");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
insertData(tr);
|
||||
return 100 * numKeys * 3 / 4 / (end - start);
|
||||
}
|
||||
|
||||
int interleavedSetsGets(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int present;
|
||||
uint8_t const *value;
|
||||
int length;
|
||||
int i;
|
||||
|
||||
uint8_t *k = (uint8_t*)"foo";
|
||||
uint8_t v[10];
|
||||
int num = 1;
|
||||
|
||||
double start = getTime();
|
||||
sprintf((char*)v, "%d", num);
|
||||
fdb_transaction_set(tr, k, 3, v, strlen((char*)v));
|
||||
|
||||
for(i = 0; i < 10000; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, k, 3, 0);
|
||||
if(getError(fdb_future_block_until_ready(f), "InterleavedSetsGets (block for get)", rs)) return -1;
|
||||
if(getError(fdb_future_get_value(f, &present, &value, &length), "InterleavedSetsGets (get result)", rs)) return -1;
|
||||
fdb_future_destroy(f);
|
||||
|
||||
sprintf((char*)v, "%d", ++num);
|
||||
fdb_transaction_set(tr, k, 3, v, strlen((char*)v));
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
return 10000 / (end - start);
|
||||
}
|
||||
|
||||
void runTests(struct ResultSet *rs) {
|
||||
FDBDatabase *db = openDatabase(rs, &netThread);
|
||||
|
||||
FDBTransaction *tr;
|
||||
checkError(fdb_database_create_transaction(db, &tr), "create transaction", rs);
|
||||
|
||||
FDBFuture *f = fdb_transaction_get_read_version(tr);
|
||||
checkError(fdb_future_block_until_ready(f), "block for read version", rs);
|
||||
|
||||
int64_t version;
|
||||
checkError(fdb_future_get_version(f, &version), "get version", rs);
|
||||
fdb_future_destroy(f);
|
||||
|
||||
insertData(tr);
|
||||
|
||||
runTest(&getSingle, tr, rs, "C: get single cached value throughput");
|
||||
runTest(&getManySequential, tr, rs, "C: get sequential cached values throughput");
|
||||
runTest(&getRangeBasic, tr, rs, "C: get range cached values throughput");
|
||||
runTest(&singleClearGetRange, tr, rs, "C: get range cached values with clears throughput");
|
||||
runTest(&clearRangeGetRange, tr, rs, "C: get range cached values with clear ranges throughput");
|
||||
runTest(&interleavedSetsGets, tr, rs, "C: interleaved sets and gets on a single key throughput");
|
||||
|
||||
fdb_database_destroy(db);
|
||||
fdb_stop_network();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet *rs = newResultSet();
|
||||
checkError(fdb_select_api_version(500), "select API version", rs);
|
||||
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
keys = generateKeys(numKeys, keySize);
|
||||
runTests(rs);
|
||||
writeResultSet(rs);
|
||||
freeResultSet(rs);
|
||||
freeKeys(keys, numKeys);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* test.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <sys/time.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#ifndef FDB_API_VERSION
|
||||
#define FDB_API_VERSION 500
|
||||
#endif
|
||||
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <foundationdb/fdb_c_options.g.h>
|
||||
|
||||
double getTime() {
|
||||
static struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
return tv.tv_usec/1000000.0 + tv.tv_sec;
|
||||
}
|
||||
|
||||
void writeKey(uint8_t **dest, int key, int keySize) {
|
||||
*dest = (uint8_t*)malloc((sizeof(uint8_t))*keySize);
|
||||
sprintf((char*)*dest, "%0*d", keySize, key);
|
||||
}
|
||||
|
||||
uint8_t **generateKeys(int numKeys, int keySize) {
|
||||
uint8_t **keys = (uint8_t**)malloc(sizeof(uint8_t*)*(numKeys+1));
|
||||
|
||||
uint32_t i;
|
||||
for(i = 0; i <= numKeys; ++i) {
|
||||
writeKey(keys + i, i, keySize);
|
||||
}
|
||||
|
||||
return keys;
|
||||
}
|
||||
void freeKeys(uint8_t **keys, int numKeys) {
|
||||
uint32_t i;
|
||||
for(i = 0; i < numKeys; i++) {
|
||||
free(keys[i]);
|
||||
}
|
||||
free(keys);
|
||||
}
|
||||
|
||||
int cmpfunc(const void* a, const void* b) {
|
||||
return (*(int*)a - *(int*)b);
|
||||
}
|
||||
|
||||
int median(int *values, int length) {
|
||||
qsort(values, length, sizeof(int), cmpfunc);
|
||||
return values[length/2];
|
||||
}
|
||||
|
||||
struct RunResult {
|
||||
int res;
|
||||
fdb_error_t e;
|
||||
};
|
||||
#define RES(x, y) (struct RunResult) { x, y }
|
||||
|
||||
struct Kpi {
|
||||
const char *name;
|
||||
int value;
|
||||
const char *units;
|
||||
|
||||
struct Kpi *next;
|
||||
};
|
||||
|
||||
struct Error {
|
||||
char *message;
|
||||
|
||||
struct Error *next;
|
||||
};
|
||||
|
||||
struct ResultSet {
|
||||
struct Kpi *kpis;
|
||||
struct Error *errors;
|
||||
};
|
||||
|
||||
struct ResultSet* newResultSet() {
|
||||
struct ResultSet *rs = malloc(sizeof(struct ResultSet));
|
||||
|
||||
rs->kpis = NULL;
|
||||
rs->errors = NULL;
|
||||
|
||||
return rs;
|
||||
}
|
||||
|
||||
void addKpi(struct ResultSet *rs, const char *name, int value, const char *units) {
|
||||
struct Kpi *k = malloc(sizeof(struct Kpi));
|
||||
k->name = name;
|
||||
k->value = value;
|
||||
k->units = units;
|
||||
k->next = rs->kpis;
|
||||
rs->kpis = k;
|
||||
}
|
||||
|
||||
void addError(struct ResultSet *rs, const char *message) {
|
||||
struct Error *e = malloc(sizeof(struct Error));
|
||||
e->message = (char*)malloc(strlen(message)+1);
|
||||
strcpy(e->message, message);
|
||||
e->next = rs->errors;
|
||||
rs->errors = e;
|
||||
}
|
||||
|
||||
void writeResultSet(struct ResultSet *rs) {
|
||||
uint64_t id = ((uint64_t)rand() << 32) + rand();
|
||||
char name[100];
|
||||
sprintf(name, "fdb-c_result-%llu.json", id);
|
||||
FILE *fp = fopen(name, "w");
|
||||
if(!fp) {
|
||||
fprintf(stderr, "Could not open results file %s\n", name);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
fprintf(fp, "{\n");
|
||||
fprintf(fp, "\t\"kpis\": {\n");
|
||||
|
||||
struct Kpi *k = rs->kpis;
|
||||
while(k != NULL) {
|
||||
fprintf(fp, "\t\t\"%s\": { \"units\": \"%s\", \"value\": %d }", k->name, k->units, k->value);
|
||||
if(k->next != NULL) {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\n");
|
||||
k = k->next;
|
||||
}
|
||||
|
||||
fprintf(fp, "\t},\n");
|
||||
fprintf(fp, "\t\"errors\": [\n");
|
||||
|
||||
struct Error *e = rs->errors;
|
||||
while(e != NULL) {
|
||||
fprintf(fp, "\t\t\"%s\"", e->message);
|
||||
if(e->next != NULL) {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\n");
|
||||
e = e->next;
|
||||
}
|
||||
|
||||
fprintf(fp, "\t]\n");
|
||||
fprintf(fp, "}\n");
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
void freeResultSet(struct ResultSet *rs) {
|
||||
struct Kpi *k = rs->kpis;
|
||||
while(k != NULL) {
|
||||
struct Kpi *next = k->next;
|
||||
free(k);
|
||||
k = next;
|
||||
}
|
||||
|
||||
struct Error *e = rs->errors;
|
||||
while(e != NULL) {
|
||||
struct Error *next = e->next;
|
||||
free(e->message);
|
||||
free(e);
|
||||
e = next;
|
||||
}
|
||||
|
||||
free(rs);
|
||||
}
|
||||
|
||||
fdb_error_t getError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
if(err) {
|
||||
char *msg = (char*)malloc(strlen(context) + 100);
|
||||
sprintf(msg, "Error in %s: %s", context, fdb_get_error(err));
|
||||
fprintf(stderr, "%s\n", msg);
|
||||
if(rs != NULL) {
|
||||
addError(rs, msg);
|
||||
}
|
||||
|
||||
free(msg);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void checkError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
if(getError(err, context, rs)) {
|
||||
if(rs != NULL) {
|
||||
writeResultSet(rs);
|
||||
freeResultSet(rs);
|
||||
}
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
fdb_error_t logError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
char *msg = (char*)malloc(strlen(context) + 100);
|
||||
sprintf(msg, "Error in %s: %s", context, fdb_get_error(err));
|
||||
fprintf(stderr, "%s\n", msg);
|
||||
if(rs != NULL) {
|
||||
addError(rs, msg);
|
||||
}
|
||||
|
||||
free(msg);
|
||||
return err;
|
||||
}
|
||||
|
||||
fdb_error_t maybeLogError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
if(err && !fdb_error_predicate( FDB_ERROR_PREDICATE_RETRYABLE, err ) ) {
|
||||
return logError(err, context, rs);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
void* runNetwork() {
|
||||
checkError(fdb_run_network(), "run network", NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
FDBDatabase* openDatabase(struct ResultSet *rs, pthread_t *netThread) {
|
||||
checkError(fdb_setup_network(), "setup network", rs);
|
||||
pthread_create(netThread, NULL, &runNetwork, NULL);
|
||||
|
||||
FDBFuture *f = fdb_create_cluster(NULL);
|
||||
checkError(fdb_future_block_until_ready(f), "block for cluster", rs);
|
||||
|
||||
FDBCluster *cluster;
|
||||
checkError(fdb_future_get_cluster(f, &cluster), "get cluster", rs);
|
||||
|
||||
fdb_future_destroy(f);
|
||||
|
||||
f = fdb_cluster_create_database(cluster, (uint8_t*)"DB", 2);
|
||||
checkError(fdb_future_block_until_ready(f), "block for database", rs);
|
||||
|
||||
FDBDatabase *db;
|
||||
checkError(fdb_future_get_database(f, &db), "get database", rs);
|
||||
|
||||
fdb_future_destroy(f);
|
||||
fdb_cluster_destroy(cluster);
|
||||
|
||||
return db;
|
||||
}
|
|
@ -41,7 +41,7 @@ ACTOR Future<Void> _test() {
|
|||
// tr->setVersion(1);
|
||||
|
||||
Version ver = wait( tr->getReadVersion() );
|
||||
printf("%ld\n", ver);
|
||||
printf("%lld\n", ver);
|
||||
|
||||
state std::vector< Future<Version> > versions;
|
||||
|
||||
|
|
|
@ -22,5 +22,23 @@
|
|||
|
||||
fdb_flow_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
|
||||
fdb_flow_LDFLAGS := -Llib -lfdb_c $(fdbrpc_LDFLAGS)
|
||||
fdb_flow_LIBS := lib/libfdbrpc.a
|
||||
fdb_flow_LIBS :=
|
||||
|
||||
packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz: fdb_flow
|
||||
@echo "Packaging fdb_flow"
|
||||
@rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
|
||||
@mkdir -p packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb
|
||||
@cp lib/libfdb_flow.a packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib
|
||||
@find bindings/flow -name '*.h' -not -name 'bindings/flow/tester/*' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow \;
|
||||
@find bindings/c/foundationdb -name '*.h' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb \;
|
||||
@tar czf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz -C packages fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
|
||||
@rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
|
||||
|
||||
FDB_FLOW: packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz
|
||||
|
||||
FDB_FLOW_clean:
|
||||
@echo "Cleaning fdb_flow package"
|
||||
@rm -rf packages/fdb-flow-*.tar.gz
|
||||
|
||||
packages: FDB_FLOW
|
||||
packages_clean: FDB_FLOW_clean
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
fdb_flow_tester_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
|
||||
fdb_flow_tester_LDFLAGS := -Llib $(fdbrpc_LDFLAGS) -lfdb_c
|
||||
fdb_flow_tester_LIBS := lib/libfdb_flow.a lib/libfdbrpc.a lib/libflow.a lib/libfdb_c.$(DLEXT)
|
||||
fdb_flow_tester_LIBS := lib/libfdb_flow.a lib/libflow.a lib/libfdb_c.$(DLEXT)
|
||||
|
||||
fdb_flow_tester: lib/libfdb_c.$(DLEXT)
|
||||
@mkdir -p bindings/flow/bin
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "ab4fef131ee828e96ba67d31a7d690bd5f2f42040c6766b1b12fe856f87e0ff7"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
|
@ -0,0 +1,2 @@
|
|||
# The FoundationDB go bindings currently have no external golang dependencies outside of
|
||||
# the go standard library.
|
|
@ -10,10 +10,20 @@ This package requires:
|
|||
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-500.
|
||||
|
||||
To install this package, in the top level of this repository run:
|
||||
To build this package, in the top level of this repository run:
|
||||
|
||||
make fdb_go
|
||||
|
||||
This will create binary packages for the appropriate platform within the "build" subdirectory of this folder.
|
||||
|
||||
To install this package, you can run the "fdb-go-install.sh" script:
|
||||
|
||||
./fdb-go-install.sh install
|
||||
|
||||
The "install" command of this script does not depend on the presence of the repo in general and will download the repository into
|
||||
your local go path. Running "localinstall" instead of "install" will use the local copy here (with a symlink) instead
|
||||
of downloading from the remote repository.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
|
|
|
@ -0,0 +1,304 @@
|
|||
#!/bin/bash -eu
|
||||
#
|
||||
# fdb-go-install.sh
|
||||
#
|
||||
# Installs the FoundationDB Go bindings for a client. This will download
|
||||
# the repository from the remote repo either into the go directory
|
||||
# with the appropriate semantic version. It will then build a few
|
||||
# generated files that need to be present for the go build to work.
|
||||
# At the end, it has some advice for flags to modify within your
|
||||
# go environment so that other packages may successfully use this
|
||||
# library.
|
||||
#
|
||||
|
||||
DESTDIR="${DESTDIR:-}"
|
||||
FDBVER="${FDBVER:-5.0.1}"
|
||||
REMOTE="${REMOTE:-github.com}"
|
||||
FDBREPO="${FDBREPO:-apple/foundationdb}"
|
||||
|
||||
status=0
|
||||
|
||||
platform=$(uname)
|
||||
if [[ "${platform}" == "Darwin" ]] ; then
|
||||
FDBLIBDIR="${FDBLIBDIR:-/usr/local/lib}"
|
||||
libfdbc="libfdb_c.dylib"
|
||||
elif [[ "${platform}" == "Linux" ]] ; then
|
||||
FDBLIBDIR="${FDBLIBDIR:-/usr/lib}"
|
||||
libfdbc="libfdb_c.so"
|
||||
else
|
||||
echo "Unsupported platform ${platform}".
|
||||
echo "At the moment, only macOS and Linux are supported by this script."
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
|
||||
filedir=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
|
||||
destdir=""
|
||||
|
||||
function printUsage() {
|
||||
echo "Usage: fdb-go-install.sh <cmd>"
|
||||
echo
|
||||
echo "cmd: One of the commands to run. The options are:"
|
||||
echo " install Download the FDB go bindings and install them"
|
||||
echo " localinstall Install a into the go path a local copy of the repo"
|
||||
echo " download Download but do not prepare the FoundationDB bindings"
|
||||
echo " help Print this help message and then quit"
|
||||
echo
|
||||
echo "Command Line Options:"
|
||||
echo " --fdbver <version> FoundationDB semantic version (default is ${FDBVER})"
|
||||
echo " -d/--dest-dir <dest> Local location for the repo (default is to place in go path)"
|
||||
echo
|
||||
echo "Environment Variable Options:"
|
||||
echo " REMOTE Remote repository to download from (currently ${REMOTE})"
|
||||
echo " FDBREPO Repository of FoundationDB library to download (currently ${FDBREPO})"
|
||||
echo " FDBLIBDIR Directory within which should be the FoundationDB c library (currently ${FDBLIBDIR})"
|
||||
}
|
||||
|
||||
function parseArgs() {
|
||||
local status=0
|
||||
|
||||
if [[ "${#}" -lt 0 ]] ; then
|
||||
printUsage
|
||||
let status="${status} + 1"
|
||||
else
|
||||
operation="${1}"
|
||||
shift
|
||||
if [[ "${operation}" != "install" ]] && [[ "${operation}" != "localinstall" ]] && [[ "${operation}" != "download" ]] && [[ "${operation}" != "help" ]] ; then
|
||||
echo "Unknown command: ${operation}"
|
||||
printUsage
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
fi
|
||||
|
||||
while [[ "${#}" -gt 0 ]] && [[ "${status}" -eq 0 ]] ; do
|
||||
local key="${1}"
|
||||
case "${key}" in
|
||||
--fdbver)
|
||||
if [[ "${#}" -lt 2 ]] ; then
|
||||
echo "No version specified with --fdbver flag"
|
||||
printUsage
|
||||
let status="${status} + 1"
|
||||
else
|
||||
FDBVER="${2}"
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
|
||||
-d|--dest-dir)
|
||||
if [[ "${#}" -lt 2 ]] ; then
|
||||
echo "No destination specified with ${key} flag"
|
||||
printUsage
|
||||
let status="${status} + 1"
|
||||
else
|
||||
destdir="${2}"
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unrecognized argument ${key}"
|
||||
printUsage
|
||||
let status="${status} + 1"
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
return "${status}"
|
||||
}
|
||||
|
||||
function checkBin() {
|
||||
if [[ "${#}" -lt 1 ]] ; then
|
||||
echo "Usage: checkBin <binary>"
|
||||
return 1
|
||||
else
|
||||
if [[ -n $(which "${1}") ]] ; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "${status}" -gt 0 ]] ; then
|
||||
# We have already failed.
|
||||
:
|
||||
elif [[ "${#}" -lt 1 ]] ; then
|
||||
printUsage
|
||||
else
|
||||
required_bins=( 'go' 'git' 'make' 'mono' )
|
||||
|
||||
missing_bins=()
|
||||
for bin in "${required_bins[@]}" ; do
|
||||
if ! checkBin "${bin}" ; then
|
||||
missing_bins+=("${bin}")
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${status}" -gt 0 ]] ; then
|
||||
echo "Missing binaries: ${missing_bins[*]}"
|
||||
elif ! parseArgs ${@} ; then
|
||||
let status="${status} + 1"
|
||||
elif [[ "${operation}" == "help" ]] ; then
|
||||
printUsage
|
||||
else
|
||||
# Add go-specific environment variables.
|
||||
eval $(go env)
|
||||
|
||||
golibdir=$(dirname "${GOPATH}/src/${REMOTE}/${FDBREPO}")
|
||||
if [[ -z "${destdir}" ]] ; then
|
||||
if [[ "${operation}" == "localinstall" ]] ; then
|
||||
# Assume its the local directory.
|
||||
destdir=$(cd "${filedir}/../../.." && pwd)
|
||||
else
|
||||
destdir="${golibdir}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -d "${destdir}" ]] ; then
|
||||
cmd=( 'mkdir' '-p' "${destdir}" )
|
||||
echo "${cmd[*]}"
|
||||
if ! "${cmd[@]}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not create destination directory ${destdir}."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Step 1: Make sure repository is present.
|
||||
|
||||
if [[ "${status}" -eq 0 ]] ; then
|
||||
destdir=$( cd "${destdir}" && pwd ) # Get absolute path of destination dir.
|
||||
fdbdir="${destdir}/foundation"
|
||||
|
||||
if [[ ! -d "${destdir}" ]] ; then
|
||||
cmd=("mkdir" "-p" "${destdir}")
|
||||
echo "${cmd[*]}"
|
||||
if ! "${cmd[@]}" ; then
|
||||
echo "Could not create destination directory ${destdir}."
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${operation}" == "localinstall" ]] ; then
|
||||
# No download occurs in this case.
|
||||
:
|
||||
else
|
||||
if [[ -d "${fdbdir}" ]] ; then
|
||||
echo "Directory ${fdbdir} already exists ; checking out appropriate tag"
|
||||
cmd1=( 'git' '-C' "${fdbdir}" 'fetch' 'origin' )
|
||||
cmd2=( 'git' '-C' "${fdbdir}" 'checkout' "release-${FDBVER}" )
|
||||
|
||||
if ! echo "${cmd1[*]}" || ! "${cmd1[@]}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not pull latest changes from origin"
|
||||
elif ! echo "${cmd2[*]}" || ! "${cmd2[@]}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not checkout tag release-${FDBVER}."
|
||||
fi
|
||||
else
|
||||
echo "Downloading foundation repository into ${destdir}:"
|
||||
cmd=( 'git' '-C' "${destdir}" 'clone' '--branch' "release-${FDBVER}" "git@${REMOTE}:${FDBREPO}.git" )
|
||||
|
||||
echo "${cmd[*]}"
|
||||
if ! "${cmd[@]}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not download repository."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Step 2: Build generated things.
|
||||
|
||||
if [[ "${operation}" == "download" ]] ; then
|
||||
# The generated files are not created under a strict download.
|
||||
:
|
||||
elif [[ "${status}" -eq 0 ]] ; then
|
||||
echo "Building generated files."
|
||||
cmd=( 'make' '-C' "${fdbdir}" 'bindings/c/foundationdb/fdb_c_options.g.h' )
|
||||
|
||||
echo "${cmd[*]}"
|
||||
if ! "${cmd[@]}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not generate required c header"
|
||||
else
|
||||
infile="${fdbdir}/fdbclient/vexillographer/fdb.options"
|
||||
outfile="${fdbdir}/bindings/go/src/fdb/generated.go"
|
||||
cmd=( 'go' 'run' "${fdbdir}/bindings/go/src/_util/translate_fdb_options.go" )
|
||||
echo "${cmd[*]} < ${infile} > ${outfile}"
|
||||
if ! "${cmd[@]}" < "${infile}" > "${outfile}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not generate generated go file."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Step 3: Add to go path.
|
||||
|
||||
if [[ "${operation}" == "download" ]] ; then
|
||||
# The files are not moved under a strict download.
|
||||
:
|
||||
elif [[ "${status}" -eq 0 ]] ; then
|
||||
linkpath="${GOPATH}/src/${REMOTE}/${FDBREPO}"
|
||||
if [[ "${linkpath}" == "${fdbdir}" ]] ; then
|
||||
# Downloaded directly into go path. Skip making the link.
|
||||
:
|
||||
elif [[ -e "${linkpath}" ]] ; then
|
||||
echo "Warning: link path (${linkpath}) already exists. Leaving in place."
|
||||
else
|
||||
dirpath=$(dirname "${linkpath}")
|
||||
if [[ ! -d "${dirpath}" ]] ; then
|
||||
cmd=( 'mkdir' '-p' "${dirpath}" )
|
||||
echo "${cmd[*]}"
|
||||
if ! "${cmd[@]}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not create directory for link."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${status}" -eq 0 ]] ; then
|
||||
cmd=( 'ln' '-s' "${fdbdir}" "${linkpath}" )
|
||||
echo "${cmd[*]}"
|
||||
if ! "${cmd[@]}" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not create link within go path."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Step 4: Build the binaries.
|
||||
|
||||
if [[ "${operation}" == "download" ]] ; then
|
||||
# Do not install if only downloading
|
||||
:
|
||||
elif [[ "${status}" -eq 0 ]] ; then
|
||||
cgo_cflags="-g -O2 -I${linkpath}/bindings/c"
|
||||
cgo_ldflags="-g -O2 -L${FDBLIBDIR}"
|
||||
fdb_go_path="${REMOTE}/${FDBREPO}/bindings/go/src"
|
||||
|
||||
if [[ ! -e "${FDBLIBDIR}/${libfdbc}" ]] ; then
|
||||
# Just a warning. Don't fail script.
|
||||
echo
|
||||
echo "WARNING: The FoundationDB C library was not found within ${FDBLIBDIR}."
|
||||
echo "Your installation may be incomplete."
|
||||
echo
|
||||
elif ! CGO_CFLAGS="${cgo_cflags}" CGO_LDFLAGS="${cgo_ldflags}" go install "${fdb_go_path}/fdb" "${fdb_go_path}/fdb/tuple" "${fdb_go_path}/fdb/subspace" "${fdb_go_path}/fdb/directory" ; then
|
||||
let status="${status} + 1"
|
||||
echo "Could not build FoundationDB go libraries."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Step 5: Explain CGO flags.
|
||||
|
||||
if [[ "${status}" -eq 0 && ("${operation}" == "localinstall" || "${operation}" == "install" ) ]] ; then
|
||||
echo
|
||||
echo "The FoundationDB go bindings were successfully installed."
|
||||
echo "To build packages which use the go bindings, you will need to"
|
||||
echo "set the following environment variables:"
|
||||
echo " CGO_CFLAGS=\"${cgo_cflags}\""
|
||||
echo " CGO_LDFLAGS=\"${cgo_ldflags}\""
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
exit "${status}"
|
|
@ -21,7 +21,11 @@
|
|||
TARGETS += fdb_go fdb_go_tester
|
||||
CLEAN_TARGETS += fdb_go_clean fdb_go_tester_clean
|
||||
|
||||
GOPATH := $(CURDIR)/bindings/go
|
||||
GOPATH := $(CURDIR)/bindings/go/build
|
||||
GO_IMPORT_PATH := github.com/apple/foundationdb/bindings/go/src
|
||||
GO_DEST := $(GOPATH)/src/$(GO_IMPORT_PATH)
|
||||
|
||||
.PHONY: fdb_go fdb_go_path fdb_go_tester fdb_go_tester_clean godoc godoc_clean
|
||||
|
||||
# We only override if the environment didn't set it (this is used by
|
||||
# the fdbwebsite documentation build process)
|
||||
|
@ -38,18 +42,23 @@ else
|
|||
$(error Not prepared to compile on platform $(PLATFORM))
|
||||
endif
|
||||
|
||||
GO_PACKAGE_OUTDIR := $(GOPATH)/pkg/$(GOPLATFORM)
|
||||
GO_PACKAGE_OUTDIR := $(GOPATH)/pkg/$(GOPLATFORM)/$(GO_IMPORT_PATH)
|
||||
|
||||
GO_PACKAGES := fdb fdb/tuple fdb/subspace fdb/directory
|
||||
GO_PACKAGE_OBJECTS := $(addprefix $(GO_PACKAGE_OUTDIR)/,$(GO_PACKAGES:=.a))
|
||||
|
||||
GO_SRC := $(shell find $(GOPATH)/src -name '*.go')
|
||||
GO_SRC := $(shell find $(CURDIR)/bindings/go/src -name '*.go')
|
||||
|
||||
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC)
|
||||
|
||||
fdb_go_path: $(GO_SRC)
|
||||
@echo "Creating fdb_go_path"
|
||||
@mkdir -p $(GO_DEST)
|
||||
@cp -r bindings/go/src/* $(GO_DEST)
|
||||
|
||||
fdb_go_clean:
|
||||
@echo "Cleaning fdb_go"
|
||||
@rm -rf $(GO_PACKAGE_OUTDIR)
|
||||
@rm -rf $(GOPATH)
|
||||
|
||||
fdb_go_tester: $(GOPATH)/bin/_stacktester
|
||||
|
||||
|
@ -57,40 +66,40 @@ fdb_go_tester_clean:
|
|||
@echo "Cleaning fdb_go_tester"
|
||||
@rm -rf $(GOPATH)/bin
|
||||
|
||||
$(GOPATH)/bin/_stacktester: $(GO_SRC) $(GO_PACKAGE_OBJECTS) bindings/go/src/fdb/generated.go
|
||||
$(GOPATH)/bin/_stacktester: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling $(basename $(notdir $@))"
|
||||
@go install _stacktester
|
||||
@go install $(GO_IMPORT_PATH)/_stacktester
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a bindings/go/src/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/tuple"
|
||||
@go install fdb/tuple
|
||||
@go install $(GO_IMPORT_PATH)/fdb/tuple
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a bindings/go/src/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/subspace"
|
||||
@go install fdb/subspace
|
||||
@go install $(GO_IMPORT_PATH)/fdb/subspace
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a bindings/go/src/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/directory"
|
||||
@go install fdb/directory
|
||||
@go install $(GO_IMPORT_PATH)/fdb/directory
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb.a: $(GO_SRC) bindings/go/src/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb.a: fdb_go_path $(GO_SRC) $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb"
|
||||
@go install fdb
|
||||
@go install $(GO_IMPORT_PATH)/fdb
|
||||
|
||||
bindings/go/src/fdb/generated.go: lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
|
||||
$(GO_DEST)/fdb/generated.go: fdb_go_path lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
|
||||
@echo "Building $@"
|
||||
@go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@
|
||||
|
||||
godoc: $(GO_SRC)
|
||||
godoc: fdb_go_path $(GO_SRC)
|
||||
@echo "Generating Go Documentation"
|
||||
@rm -rf $(GODOC_DIR)/godoc
|
||||
@mkdir -p $(GODOC_DIR)/godoc
|
||||
@mkdir -p $(GODOC_DIR)/godoc/lib/godoc
|
||||
@godoc -url "http://localhost:6060/pkg/fdb" > $(GODOC_DIR)/godoc/fdb.html
|
||||
@godoc -url "http://localhost:6060/pkg/fdb/tuple" > $(GODOC_DIR)/godoc/fdb.tuple.html
|
||||
@godoc -url "http://localhost:6060/pkg/fdb/subspace" > $(GODOC_DIR)/godoc/fdb.subspace.html
|
||||
@godoc -url "http://localhost:6060/pkg/fdb/directory" > $(GODOC_DIR)/godoc/fdb.directory.html
|
||||
@cp $(GOPATH)/godoc-resources/* $(GODOC_DIR)/godoc/lib/godoc
|
||||
@godoc -url "pkg/$(GO_IMPORT_PATH)/fdb" > $(GODOC_DIR)/godoc/fdb.html
|
||||
@godoc -url "pkg/$(GO_IMPORT_PATH)/fdb/tuple" > $(GODOC_DIR)/godoc/fdb.tuple.html
|
||||
@godoc -url "pkg/$(GO_IMPORT_PATH)/fdb/subspace" > $(GODOC_DIR)/godoc/fdb.subspace.html
|
||||
@godoc -url "pkg/$(GO_IMPORT_PATH)/fdb/directory" > $(GODOC_DIR)/godoc/fdb.directory.html
|
||||
@cp $(CURDIR)/bindings/go/godoc-resources/* $(GODOC_DIR)/godoc/lib/godoc
|
||||
@echo "Mangling paths in Go Documentation"
|
||||
@(find $(GODOC_DIR)/godoc/ -name *.html -exec sed -i '' -e 's_/lib_lib_' {} \;)
|
||||
@(sed -i -e 's_a href="tuple/"_a href="fdb.tuple.html"_' $(GODOC_DIR)/godoc/fdb.html)
|
||||
|
|
|
@ -21,10 +21,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/tuple"
|
||||
"fdb/subspace"
|
||||
"fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"strings"
|
||||
"bytes"
|
||||
)
|
||||
|
@ -94,14 +94,14 @@ func (sm *StackMachine) maybePath() []string {
|
|||
}
|
||||
|
||||
var createOps = map[string]bool {
|
||||
"CREATE_SUBSPACE": true,
|
||||
"CREATE_LAYER": true,
|
||||
"CREATE_OR_OPEN": true,
|
||||
"CREATE": true,
|
||||
"OPEN": true,
|
||||
"MOVE": true,
|
||||
"MOVE_TO": true,
|
||||
"OPEN_SUBSPACE": true,
|
||||
"CREATE_SUBSPACE": true,
|
||||
"CREATE_LAYER": true,
|
||||
"CREATE_OR_OPEN": true,
|
||||
"CREATE": true,
|
||||
"OPEN": true,
|
||||
"MOVE": true,
|
||||
"MOVE_TO": true,
|
||||
"OPEN_SUBSPACE": true,
|
||||
}
|
||||
|
||||
func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool, idx int, t fdb.Transactor, rt fdb.ReadTransactor) {
|
||||
|
|
|
@ -24,8 +24,8 @@ import (
|
|||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fdb"
|
||||
"fdb/tuple"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"math/rand"
|
||||
|
|
|
@ -40,8 +40,8 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"errors"
|
||||
)
|
||||
|
||||
|
@ -140,15 +140,15 @@ type Directory interface {
|
|||
}
|
||||
|
||||
func stringsEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func moveTo(t fdb.Transactor, dl directoryLayer, path, newAbsolutePath []string) (DirectorySubspace, error) {
|
||||
|
|
|
@ -23,9 +23,9 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"fdb/tuple"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
|
|
@ -23,9 +23,9 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"fdb/tuple"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
)
|
||||
|
||||
type directoryPartition struct {
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
)
|
||||
|
||||
// DirectorySubspace represents a Directory that may also be used as a Subspace
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ A basic interaction with the FoundationDB API is demonstrated below:
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/apple/foundationdb/bindings/go/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"log"
|
||||
"fmt"
|
||||
)
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 200
|
||||
#define FDB_API_VERSION 500
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
import "C"
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
package fdb_test
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
|
|
@ -33,8 +33,8 @@
|
|||
package subspace
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/tuple"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"bytes"
|
||||
"errors"
|
||||
)
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
"fmt"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
)
|
||||
|
||||
// A TupleElement is one of the types that may be encoded in FoundationDB
|
||||
|
|
|
@ -144,6 +144,11 @@ define add_java_binding_targets
|
|||
@rm -r packages/jar$(1)_regular
|
||||
@cd bindings && jar uf $$(TOPDIR)/$$@ ../LICENSE
|
||||
|
||||
packages/fdb-java$(1)-$$(JARVER)-tests.jar: fdb_java$(1) versions.target
|
||||
@echo "Building $$@"
|
||||
@rm -f $$@
|
||||
@cp $$(TOPDIR)/bindings/java/foundationdb-tests$(1).jar packages/fdb-java$(1)-$$(JARVER)-tests.jar
|
||||
|
||||
packages/fdb-java$(1)-$$(JARVER)-sources.jar: $$(JAVA$(1)_GENERATED_SOURCES) versions.target
|
||||
@echo "Building $$@"
|
||||
@rm -f $$@
|
||||
|
@ -165,7 +170,7 @@ define add_java_binding_targets
|
|||
@cd packages/bundle$(1)_regular && jar cf $(TOPDIR)/$$@ *
|
||||
@rm -rf packages/bundle$(1)_regular
|
||||
|
||||
fdb_java$(1)_release: packages/fdb-java$(1)-$$(JARVER)-bundle.jar
|
||||
fdb_java$(1)_release: packages/fdb-java$(1)-$$(JARVER)-bundle.jar packages/fdb-java$(1)-$$(JARVER)-tests.jar
|
||||
|
||||
fdb_java$(1)_release_clean:
|
||||
@echo "Cleaning Java release"
|
||||
|
|
|
@ -44,8 +44,8 @@ public interface Database extends Disposable, TransactionContext {
|
|||
* Creates a {@link Transaction} that operates on this {@code Database}.<br>
|
||||
* <br>
|
||||
* Note: Java transactions automatically set the {@link TransactionOptions#setUsedDuringCommitProtectionDisable}
|
||||
* option. This is because the Java bindings disallow use of {@code Transaction} objects after either
|
||||
* {@link Transaction#reset} or {@link Transaction#onError} is called.
|
||||
* option. This is because the Java bindings disallow use of {@code Transaction} objects after
|
||||
* {@link Transaction#onError} is called.
|
||||
*
|
||||
* @return a newly created {@code Transaction} that reads from and writes to this {@code Database}.
|
||||
*/
|
||||
|
|
|
@ -61,7 +61,7 @@ public interface AsyncIterator<T> extends Iterator<T>, Disposable {
|
|||
/**
|
||||
* Returns the next element in the sequence. This will not block if, since the
|
||||
* last call to {@code next()}, {@link #onHasNext()} was called and the resulting
|
||||
* <h1>FIXME!!!!</h1> has completed or the blocking call {@link #hasNext()} was called
|
||||
* {@link CompletableFuture} has completed or the blocking call {@link #hasNext()} was called
|
||||
* and has returned. It is legal, therefore, to make a call to {@code next()} without a
|
||||
* preceding call to
|
||||
* {@link #hasNext()} or {@link #onHasNext()}, but that invocation of {@code next()}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
*/
|
||||
|
||||
/**
|
||||
* Provides additional constructs for asynchronous programming against Java's CompletableFutures.
|
||||
* Provides additional constructs for asynchronous programming against Java's {@link java.util.concurrent.CompletableFuture CompletableFuture}s.
|
||||
*
|
||||
*/
|
||||
package com.apple.cie.foundationdb.async;
|
||||
|
|
|
@ -24,10 +24,11 @@ and add it to your classpath.<br>
|
|||
<br>
|
||||
<h3>Getting started</h3>
|
||||
To start using FoundationDB from Java, create an instance of the
|
||||
{@link FDB FoundationDB API interface} with the version of the
|
||||
{@link com.apple.cie.foundationdb.FDB FoundationDB API interface} with the version of the
|
||||
API that you want to use (this release of the FoundationDB Java API supports only version {@code 500}).
|
||||
With this API object you can then open {@link Cluster}s and
|
||||
{@link Database}s and start using {@link Transaction}s.
|
||||
With this API object you can then open {@link com.apple.cie.foundationdb.Cluster Cluster}s and
|
||||
{@link com.apple.cie.foundationdb.Database Database}s and start using
|
||||
{@link com.apple.cie.foundationdb.Transaction Transactions}s.
|
||||
Here we give an example. The example relies on a cluster file at the
|
||||
<a href="/documentation/api-general.html#default-cluster-file">default location</a>
|
||||
for your platform and a running server.<br>
|
||||
|
@ -77,7 +78,7 @@ for information about how Tuples sort and can be used to efficiently model data.
|
|||
The {@link com.apple.cie.foundationdb.directory Directory API} is provided with the core
|
||||
Java API for FoundationDB. This layer is provided in some form in all official
|
||||
language bindings. The FoundationDB API provides directories as a tool for
|
||||
managing related {@link Subspace}s. Directories are a
|
||||
managing related {@link com.apple.cie.foundationdb.subspace.Subspace Subspace}s. Directories are a
|
||||
recommended approach for administering applications. Each application should
|
||||
create or open at least one directory to manage its subspaces. Directories are
|
||||
identified by hierarchical paths analogous to the paths in a Unix-like file system.
|
||||
|
|
|
@ -98,7 +98,6 @@ public abstract class AbstractTester {
|
|||
}
|
||||
if (args.useExternalClient()) {
|
||||
fdb.options().setDisableLocalClient();
|
||||
// TODO: set external client directory
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -115,4 +114,4 @@ public abstract class AbstractTester {
|
|||
t.printStackTrace();
|
||||
return new RuntimeException(errorMessage, t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* AsListTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb.test;
|
||||
|
||||
import com.apple.cie.foundationdb.Database;
|
||||
import com.apple.cie.foundationdb.FDB;
|
||||
import com.apple.cie.foundationdb.LocalityUtil;
|
||||
import com.apple.cie.foundationdb.Transaction;
|
||||
import com.apple.cie.foundationdb.async.AsyncUtil;
|
||||
|
||||
import java.util.function.Function;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
||||
public class AsListTest {
|
||||
/**
|
||||
* When the database contains keys a, b, c, d, e -- this should return 5 items,
|
||||
* a bug made the addition of the clear into the result returning 0 items.
|
||||
*/
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(500);
|
||||
Database database = fdb.open("T:\\circus\\tags\\RebarCluster-bbc\\cluster_id.txt");
|
||||
database.options().setLocationCacheSize(42);
|
||||
Transaction tr = database.createTransaction();
|
||||
//tr.clear("g".getBytes());
|
||||
/*tr.clear("bbb".getBytes());
|
||||
AsyncIterable<KeyValue> query = tr.getRange(
|
||||
KeySelector.firstGreaterOrEqual("a".getBytes()),
|
||||
KeySelector.firstGreaterOrEqual("e".getBytes()),
|
||||
Integer.MAX_VALUE);
|
||||
//List<KeyValue> list = query.asList().get();
|
||||
//System.out.println("List size: " + list.size());
|
||||
*/
|
||||
String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join();
|
||||
for(String s : keyAddresses) {
|
||||
System.out.println(" @ " + s);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
CompletableFuture<Integer> i = AsyncUtil.applySafely(new Function<Exception, CompletableFuture<Integer>>() {
|
||||
@Override
|
||||
public CompletableFuture<Integer> apply(Exception o) {
|
||||
return CompletableFuture.completedFuture(3);
|
||||
}
|
||||
}, new RuntimeException());
|
||||
|
||||
CompletableFuture<Integer> f = null;
|
||||
|
||||
@SuppressWarnings({ "unused", "null" })
|
||||
CompletableFuture<String> g = f.thenComposeAsync(new Function<Integer, CompletableFuture<String>>() {
|
||||
@Override
|
||||
public CompletableFuture<String> apply(Integer o) {
|
||||
return CompletableFuture.completedFuture(o.toString());
|
||||
}
|
||||
});
|
||||
|
||||
@SuppressWarnings({ "unused", "null" })
|
||||
CompletableFuture<String> g2 = f.thenComposeAsync(new Function<Integer, CompletableFuture<String>>() {
|
||||
@Override
|
||||
public CompletableFuture<String> apply(Integer o) {
|
||||
return CompletableFuture.completedFuture(o.toString());
|
||||
}
|
||||
}).exceptionally(new Function<Throwable, String>() {
|
||||
@Override
|
||||
public String apply(Throwable o) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -25,7 +25,6 @@ import com.apple.cie.foundationdb.KeySelector;
|
|||
import com.apple.cie.foundationdb.Transaction;
|
||||
import com.apple.cie.foundationdb.TransactionContext;
|
||||
import com.apple.cie.foundationdb.async.AsyncUtil;
|
||||
import com.apple.cie.foundationdb.subspace.Subspace;
|
||||
import com.apple.cie.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -62,6 +61,7 @@ public class PerformanceTester extends AbstractTester {
|
|||
SERIAL_GET("Java Completable API serial get throughput"),
|
||||
GET_RANGE("Java Completable API get_range throughput"),
|
||||
GET_KEY("Java Completable API get_key throughput"),
|
||||
GET_SINGLE_KEY_RANGE("Java Completable API get_single_key_range throughput"),
|
||||
ALTERNATING_GET_SET("Java Completable API alternating get and set throughput"),
|
||||
WRITE_TRANSACTION("Java Completable API single-key transaction throughput");
|
||||
|
||||
|
@ -109,6 +109,7 @@ public class PerformanceTester extends AbstractTester {
|
|||
Tests.SERIAL_GET.setFunction(db -> serialGet(db, 2_000));
|
||||
Tests.GET_RANGE.setFunction(db -> getRange(db, 1_000));
|
||||
Tests.GET_KEY.setFunction(db -> getKey(db, 2_000));
|
||||
Tests.GET_SINGLE_KEY_RANGE.setFunction(db -> getSingleKeyRange(db, 2_000));
|
||||
Tests.ALTERNATING_GET_SET.setFunction(db -> alternatingGetSet(db, 2_000));
|
||||
Tests.WRITE_TRANSACTION.setFunction(db -> writeTransaction(db, 1_000));
|
||||
}
|
||||
|
@ -345,6 +346,20 @@ public class PerformanceTester extends AbstractTester {
|
|||
});
|
||||
}
|
||||
|
||||
public Double getSingleKeyRange(TransactionContext tcx, int count) {
|
||||
return tcx.run(tr -> {
|
||||
tr.options().setRetryLimit(5);
|
||||
long start = System.nanoTime();
|
||||
for (int i = 0; i < count; i++) {
|
||||
int keyIndex = randomKeyIndex();
|
||||
tr.getRange(key(keyIndex), key(keyIndex + 1), 2).asList().join();
|
||||
}
|
||||
long end = System.nanoTime();
|
||||
|
||||
return count*1_000_000_000.0/(end - start);
|
||||
});
|
||||
}
|
||||
|
||||
public Double writeTransaction(TransactionContext tcx, int count) {
|
||||
long start = System.nanoTime();
|
||||
for (int i = 0; i < count; i++) {
|
||||
|
@ -381,6 +396,7 @@ public class PerformanceTester extends AbstractTester {
|
|||
new PerformanceTester().run(args);
|
||||
} catch (IllegalArgumentException e) {
|
||||
System.out.println("Could not run test due to malformed arguments.");
|
||||
System.out.println(e.getMessage());
|
||||
System.exit(1);
|
||||
} catch (Exception e) {
|
||||
System.out.println("Fatal error encountered during run: " + e);
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
/*
|
||||
* TestApp.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb.test;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
||||
import com.apple.cie.foundationdb.Cluster;
|
||||
import com.apple.cie.foundationdb.Database;
|
||||
import com.apple.cie.foundationdb.FDB;
|
||||
import com.apple.cie.foundationdb.Transaction;
|
||||
|
||||
public class TestApp {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
try {
|
||||
Cluster cluster = FDB.selectAPIVersion(500).createCluster();
|
||||
System.out.println("I now have the cluster");
|
||||
Database db = cluster.openDatabase();
|
||||
|
||||
Transaction tr = db.createTransaction();
|
||||
System.out.println("TR: " + tr);
|
||||
|
||||
byte[] appleValue = tr.get("apple".getBytes()).get();
|
||||
System.out.println("Apple: " + (appleValue == null ? null : new String(appleValue)));
|
||||
|
||||
tr.set("apple".getBytes(), "crunchy".getBytes());
|
||||
System.out.println("Attempting to commit apple/crunchy...");
|
||||
tr.commit().get(); // FIXME: this is not an ok use of the API
|
||||
tr = db.createTransaction();
|
||||
|
||||
long topTime = 0, getTime = 0, bottomTime = 0;
|
||||
|
||||
for(int i = 0; i < 1000; i++) {
|
||||
long a = System.currentTimeMillis();
|
||||
|
||||
final byte[] key = ("apple" + i).getBytes();
|
||||
tr = db.createTransaction();
|
||||
CompletableFuture<byte[]> future = tr.get(key);
|
||||
|
||||
long b = System.currentTimeMillis();
|
||||
|
||||
future.get();
|
||||
|
||||
long c = System.currentTimeMillis();
|
||||
|
||||
tr.set(key, ("Apple" + i).getBytes());
|
||||
final CompletableFuture<Void> commit = tr.commit();
|
||||
|
||||
long d = System.currentTimeMillis();
|
||||
|
||||
commit.whenCompleteAsync((v, error) -> {
|
||||
if(error != null) {
|
||||
error.printStackTrace();
|
||||
}
|
||||
});
|
||||
|
||||
topTime += b - a;
|
||||
getTime += c - b;
|
||||
bottomTime += d - c;
|
||||
}
|
||||
|
||||
System.out.println(" Top: " + topTime);
|
||||
System.out.println(" Get: " + getTime);
|
||||
System.out.println(" Bottom: " + bottomTime);
|
||||
|
||||
tr.dispose();
|
||||
db.dispose();
|
||||
cluster.dispose();
|
||||
} catch(Throwable t) {
|
||||
t.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -99,7 +99,7 @@ public class TesterArgs {
|
|||
} else if (arg.equals("--enable-callbacks-on-external-threads")) {
|
||||
callbacksOnExternalThread = true;
|
||||
} else if (arg.equals("--use-external-client")) {
|
||||
externalClient = false;
|
||||
externalClient = true;
|
||||
} else if (arg.equals("--tests-to-run")) {
|
||||
if (i + 1 < args.length && args[i + 1].charAt(0) != '-') {
|
||||
int j;
|
||||
|
|
|
@ -46,8 +46,8 @@ public interface Database extends Disposable, TransactionContext {
|
|||
* Creates a {@link Transaction} that operates on this {@code Database}.<br>
|
||||
* <br>
|
||||
* Note: Java transactions automatically set the {@link TransactionOptions#setUsedDuringCommitProtectionDisable}
|
||||
* option. This is because the Java bindings disallow use of {@code Transaction} objects after either
|
||||
* {@link Transaction#reset} or {@link Transaction#onError} is called.
|
||||
* option. This is because the Java bindings disallow use of {@code Transaction} objects after
|
||||
* {@link Transaction#onError} is called.
|
||||
*
|
||||
* @return a newly created {@code Transaction} that reads from and writes to this {@code Database}.
|
||||
*/
|
||||
|
|
|
@ -24,11 +24,12 @@ and add it to your classpath.<br>
|
|||
<br>
|
||||
<h3>Getting started</h3>
|
||||
To start using FoundationDB from Java, create an instance of the
|
||||
{@link FDB FoundationDB API interface} with the version of the
|
||||
{@link com.apple.cie.foundationdb.FDB FoundationDB API interface} with the version of the
|
||||
API that you want to use (this release of the FoundationDB Java API supports only version {@code 500}).
|
||||
With this API object you can then open {@link Cluster}s and
|
||||
{@link Database}s and start using {@link Transaction}s.
|
||||
Here we give an example. The example relies on a cluster file at the
|
||||
With this API object you can then open {@link com.apple.cie.foundationdb.Cluster}s and
|
||||
{@link com.apple.cie.foundationdb.Database}s and start using
|
||||
{@link com.apple.cie.foundationdb.Transaction}s. Here we give an example. The example relies on a
|
||||
cluster file at the
|
||||
<a href="/documentation/api-general.html#default-cluster-file">default location</a>
|
||||
for your platform and a running server.<br>
|
||||
<br>
|
||||
|
@ -77,7 +78,7 @@ for information about how Tuples sort and can be used to efficiently model data.
|
|||
The {@link com.apple.cie.foundationdb.directory Directory API} is provided with the core
|
||||
Java API for FoundationDB. This layer is provided in some form in all official
|
||||
language bindings. The FoundationDB API provides directories as a tool for
|
||||
managing related {@link Subspace}s. Directories are a
|
||||
managing related {@link com.apple.cie.foundationdb.subspace.Subspace Subspace}s. Directories are a
|
||||
recommended approach for administering applications. Each application should
|
||||
create or open at least one directory to manage its subspaces. Directories are
|
||||
identified by hierarchical paths analogous to the paths in a Unix-like file system.
|
||||
|
@ -87,12 +88,12 @@ for the corresponding subspace. In effect, directories provide a level of indire
|
|||
for access to subspaces.
|
||||
<br>
|
||||
<h3>{@link com.apple.cie.foundationdb.async.Future Future}s and asynchronous operation</h3>
|
||||
Asynchronous FoundationDB operations return {@link Future}s.
|
||||
A {@link Future} can be used in a blocking way using the
|
||||
{@link Future#get() get()} method or in a
|
||||
Asynchronous FoundationDB operations return {@link com.apple.cie.foundationdb.async.Future Future}s.
|
||||
A {@link com.apple.cie.foundationdb.async.Future Future} can be used in a blocking way using the
|
||||
{@link com.apple.cie.foundationdb.async.Future#get() get()} method or in a
|
||||
fully-asynchronous way using the
|
||||
{@link Future#map(Function) map()} and
|
||||
{@link Future#flatMap(Function) flatMap()}
|
||||
{@link com.apple.cie.foundationdb.async.Future#map(Function) map()} and
|
||||
{@link com.apple.cie.foundationdb.async.Future#flatMap(Function) flatMap()}
|
||||
methods. Generally, the blocking style is more straightforward and the asynchronous style
|
||||
is more efficient. Mixing the two styles correctly can be tricky, so consider choosing
|
||||
one or the other. See the {@linkplain com.apple.cie.foundationdb.async async Package documentation}
|
||||
|
|
|
@ -46,7 +46,7 @@ bindings/nodejs/fdb_node.stamp: bindings/nodejs/src/FdbOptions.g.cpp bindings/no
|
|||
for ver in $(NODE_VERSIONS); do \
|
||||
MMVER=`echo $$ver | sed -e 's,\., ,g' | awk '{print $$1 "." $$2}'` && \
|
||||
mkdir modules/$$MMVER && \
|
||||
node-gyp configure --target=$$ver && \
|
||||
node-gyp configure --dist-url=https://nodejs.org/dist --target=$$ver && \
|
||||
node-gyp -v build && \
|
||||
cp build/Release/fdblib.node modules/$${MMVER} ; \
|
||||
done
|
||||
|
@ -67,6 +67,7 @@ bindings/nodejs/package.json: bindings/nodejs/package.json.in $(ALL_MAKEFILES) v
|
|||
@m4 -DVERSION=$(NPMVER) $< > $@
|
||||
@echo "Updating Node dependencies"
|
||||
@cd bindings/nodejs && \
|
||||
npm config set registry "https://registry.npmjs.org/" && \
|
||||
npm update
|
||||
|
||||
fdb_node_npm: fdb_node versions.target bindings/nodejs/README.md bindings/nodejs/lib/*.js bindings/nodejs/src/* bindings/nodejs/binding.gyp LICENSE
|
||||
|
|
|
@ -0,0 +1,196 @@
|
|||
# FDB Tuple layer typecodes
|
||||
|
||||
This document is intended to be the system of record for the allocation of typecodes in the Tuple layer. The source code isn’t good enough because a typecode might be added to one language (or by a customer) before another.
|
||||
|
||||
Status: Standard means that all of our language bindings implement this typecode
|
||||
Status: Reserved means that this typecode is not yet used in our standard language bindings, but may be in use by third party bindings or specific applications
|
||||
Status: Deprecated means that a previous layer used this type, but issues with that type code have led us to mark this type code as not to be used.
|
||||
|
||||
|
||||
### **Null Value**
|
||||
|
||||
Typecode: 0x00
|
||||
Length: 0 bytes
|
||||
Status: Standard
|
||||
|
||||
### **Byte String**
|
||||
|
||||
Typecode: 0x01
|
||||
Length: Variable (terminated by` [\x00]![\xff]`)
|
||||
Encoding: `b'\x01' + value.replace(b'\x00', b'\x00\xFF') + b'\x00'`
|
||||
Test case: `pack(“foo\x00bar”) == b'\x01foo\x00\xffbar\x00'`
|
||||
Status: Standard
|
||||
|
||||
In other words, byte strings are null terminated with null values occurring in the string escaped in an order-preserving way.
|
||||
|
||||
### **Unicode String**
|
||||
|
||||
Typecode: 0x02
|
||||
Length: Variable (terminated by [\x00]![\xff])
|
||||
Encoding: `b'\x02' + value.encode('utf-8').replace(b'\x00', b'\x00\xFF') + b'\x00'`
|
||||
Test case: `pack( u"F\u00d4O\u0000bar" ) == b'\x02F\xc3\x94O\x00\xffbar\x00'`
|
||||
Status: Standard
|
||||
|
||||
This is the same way that byte strings are encoded, but first, the unicode string is encoded in UTF-8.
|
||||
|
||||
### **(DEBRECATED) Nested Tuple**
|
||||
|
||||
Typecodes: 0x03-0x04
|
||||
Length: Variable (terminated by 0x04 type code)
|
||||
Status: Deprecated
|
||||
|
||||
This encoding was used by a few layers. However, it had ordering problems when one tuple was a prefix of another and the type of the first element in the longer tuple was either null or a byte string. For an example, consider the empty tuple and the tuple containing only null. In the old scheme, the empty tuple would be encoded as `\x03\x04` while the tuple containing only null would be encoded as `\x03\x00\x04`, so the second tuple would sort first based on their bytes, which is incorrect semantically.
|
||||
|
||||
### **Nested Tuple**
|
||||
|
||||
Typecodes: 0x05
|
||||
Length: Variable (terminated by `[\x00]![\xff]` at beginning of nested element)
|
||||
Encoding: `b'\x05' + ''.join(map(lambda x: b'\x00\xff' if x is None else pack(x), value)) + b'\x00'`
|
||||
Test case: `pack( (“foo\x00bar”, None, ()) ) == b'\x05\x01foo\x00\xffbar\x00\x00\xff\x05\x00\x00'`
|
||||
Status: Standard
|
||||
|
||||
The list is ended with a 0x00 byte. Nulls within the tuple are encoded as `\x00\xff`. There is no other null escaping. In particular, 0x00 bytes that are within the nested types can be left as-is as they are passed over when decoding the interior types. To show how this fixes the bug in the previous version of nested tuples, the empty tuple is now encoded as `\x05\x00` while the tuple containing only null is encoded as `\x05\x00\xff\x00`, so the first tuple will sort first.
|
||||
|
||||
### **Negative arbitrary-precision Integer**
|
||||
|
||||
Typecodes: 0x0a, 0x0b
|
||||
Encoding: Not defined yet
|
||||
Status: Reserved; 0x0b used in Python and Java
|
||||
|
||||
These typecodes are reserved for encoding integers larger than 8 bytes. Presumably the type code would be followed by some encoding of the length, followed by the big endian one’s complement number. Reserving two typecodes for each of positive and negative numbers is probably overkill, but until there’s a design in place we might as well not use them. In the Python and Java implementations, 0x0b stores negative numbers which are expressed with between 9 and 255 bytes. The first byte following the type code (0x0b) is a single byte expressing the number of bytes in the integer (with its bits flipped to preserve order), followed by that number of bytes representing the number in big endian order in one's complement.
|
||||
|
||||
### **Integer**
|
||||
|
||||
Typecodes: 0x0c - 0x1c
|
||||
0x0c is an 8 byte negative number
|
||||
0x13 is a 1 byte negative number
|
||||
0x14 is a zero
|
||||
0x15 is a 1 byte positive number
|
||||
0x1c is an 8 byte positive number
|
||||
Length: Depends on typecode (0-8 bytes)
|
||||
Encoding: positive numbers are big endian
|
||||
negative numbers are big endian one’s complement (so -1 is 0x13 0xfe)
|
||||
Test case: `pack( -5551212 ) == b'\x11\xabK\x93'`
|
||||
Status: Standard
|
||||
|
||||
There is some variation in the ability of language bindings to encode and decode values at the outside of the possible range, because of different native representations of integers.
|
||||
|
||||
### **Positive arbitrary-precision Integer**
|
||||
|
||||
Typecodes: 0x1d, 0x1e
|
||||
Encoding: Not defined yet
|
||||
Status: Reserved; 0x1d used in Python and Java
|
||||
|
||||
These typecodes are reserved for encoding integers larger than 8 bytes. Presumably the type code would be followed by some encoding of the length, followed by the big endian one’s complement number. Reserving two typecodes for each of positive and negative numbers is probably overkill, but until there’s a design in place we might as well not use them. In the Python and Java implementations, 0x1d stores positive numbers which are expressed with between 9 and 255 bytes. The first byte following the type code (0x1d) is a single byte expressing the number of bytes in the integer, followed by that number of bytes representing the number in big endian order.
|
||||
|
||||
### **IEEE Binary Floating Point**
|
||||
|
||||
Typecodes:
|
||||
0x20 - float (32 bits)
|
||||
0x21 - double (64 bits)
|
||||
0x22 - long double (80 bits)
|
||||
Length: 4 - 10 bytes
|
||||
Test case: `pack( -42f ) == b'=\xd7\xff\xff'`
|
||||
Encoding: Big-endian IEEE binary representation, followed by the following transformation:
|
||||
```python
|
||||
if ord(rep[0])&0x80: # Check sign bit
|
||||
# Flip all bits, this is easier in most other languages!
|
||||
return "".join( chr(0xff^ord(r)) for r in rep )
|
||||
else:
|
||||
# Flip just the sign bit
|
||||
return chr(0x80^ord(rep[0])) + rep[1:]
|
||||
```
|
||||
Status: Standard (float and double) ; Reserved (long double)
|
||||
|
||||
The binary representation should not be assumed to be canonicalized (as to multiple representations of NaN, for example) by a reader. This order sorts all numbers in the following way:
|
||||
|
||||
* All negative NaN values with order determined by mantissa bits (which are semantically meaningless)
|
||||
* Negative inifinity
|
||||
* All real numbers in the standard order (except that -0.0 < 0.0)
|
||||
* Positive infinity
|
||||
* All positive NaN values with order determined by mantissa bits
|
||||
|
||||
This should be equivalent to the standard IEEE total ordering.
|
||||
|
||||
### **Arbitrary-precision Decimal**
|
||||
|
||||
Typecodes: 0x23, 0x24
|
||||
Length: Arbitrary
|
||||
Encoding: Scale followed by arbitrary precision integer
|
||||
Status: Reserved
|
||||
|
||||
This encoding format has been used by layers. Note that this encoding makes almost no guarantees about ordering properties of tuple-encoded values and should thus generally be avoided.
|
||||
|
||||
### **(DEPRECATED) True Value**
|
||||
|
||||
Typecode: 0x25
|
||||
Length: 0 bytes
|
||||
Status: Deprecated
|
||||
|
||||
### **False Value**
|
||||
|
||||
Typecode: 0x26
|
||||
Length: 0 bytes
|
||||
Status: Standard
|
||||
|
||||
### **True Value**
|
||||
|
||||
Typecode: 0x27
|
||||
Length: 0 bytes
|
||||
Status: Standard
|
||||
|
||||
Note that false will sort before true with the given encoding.
|
||||
|
||||
### **RFC 4122 UUID**
|
||||
|
||||
Typecode: 0x30
|
||||
Length: 16 bytes
|
||||
Encoding: Network byte order as defined in the rfc: [_http://www.ietf.org/rfc/rfc4122.txt_](http://www.ietf.org/rfc/rfc4122.txt)
|
||||
Status: Standard
|
||||
|
||||
This is equivalent to the unsigned byte ordering of the UUID bytes in big-endian order.
|
||||
|
||||
### **64 bit identifier**
|
||||
|
||||
Typecode: 0x31
|
||||
Length: 8 bytes
|
||||
Encoding: Big endian unsigned 8-byte integer (typically random or perhaps semi-sequential)
|
||||
Status: Reserved
|
||||
|
||||
There’s definitely some question of whether this deserves to be separated from a plain old 64 bit integer, but a separate type was desired in one of the third-party bindings. This type has not been ported over to the first-party bindings.
|
||||
|
||||
### **80 Bit versionstamp**
|
||||
|
||||
Typecode: 0x32
|
||||
Length: 10 bytes
|
||||
Encoding: Big endian 10-byte integer. First/high 8 bytes are a database version, next two are batch version.
|
||||
Status: Reserved
|
||||
|
||||
### **96 Bit Versionstamp**
|
||||
|
||||
Typecode: 0x33
|
||||
Length: 12 bytes
|
||||
Encoding: Big endian 12-byte integer. First/high 8 bytes are a database version, next two are batch version, next two are ordering within transaction.
|
||||
Status: Reserved
|
||||
|
||||
The two versionstamp typecodes are reserved for future work adding compatibility between the tuple layer and versionstamp operations. Note that the first 80 bits of the 96 bit versionstamp are the same as the contents of the 80 bit versionstamp, and they correspond to what the `SET_VERSIONSTAMP_KEY` mutation will write into a database key , i.e., the first 8 bytes are a big-endian, unsigned version corresponding to the commit version of a transaction, and the next to bytes are a big-endian, unsigned batch number ordering transactions are committed at the same version. The final two bytes of the 96 bit versionstamp are written by the client and should order writes within a single transaction, thereby providing a global order for all versions.
|
||||
|
||||
### **User type codes**
|
||||
|
||||
Typecode: 0x40 - 0x4f
|
||||
Length: Variable (user defined)
|
||||
Encoding: User defined
|
||||
Status: Reserved
|
||||
|
||||
These type codes may be used by third party extenders without coordinating with us. If used in shipping software, the software should use the directory layer and specify a specific layer name when opening its directories to eliminate the possibility of conflicts.
|
||||
|
||||
The only way in which future official, otherwise backward-compatible versions of the tuple layer would be expected to use these type codes is to implement some kind of actual extensibility point for this purpose - they will not be used for standard types.
|
||||
|
||||
### **Escape Character**
|
||||
|
||||
Typecode: 0xff
|
||||
Length: N/A
|
||||
Encoding: N/A
|
||||
Status: Reserved
|
||||
|
||||
This type code is not used for anything. However, several of the other tuple types depend on this type code not being used as a type code for other types in order to correctly escape bytes in an order-preserving way. Therefore, it would be a Very Bad Idea™ for future development to start using this code for anything else.
|
|
@ -23,6 +23,7 @@
|
|||
#include "flow/serialize.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/SignalSafeUnwind.h"
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/BackupAgent.h"
|
||||
|
@ -1820,6 +1821,7 @@ extern uint8_t *g_extra_memory;
|
|||
|
||||
int main(int argc, char* argv[]) {
|
||||
platformInit();
|
||||
initSignalSafeUnwind();
|
||||
|
||||
int status = FDB_EXIT_SUCCESS;
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "fdbclient/FDBOptions.g.h"
|
||||
|
||||
#include "flow/DeterministicRandom.h"
|
||||
#include "flow/SignalSafeUnwind.h"
|
||||
#include "fdbrpc/TLSConnection.h"
|
||||
#include "fdbrpc/Platform.h"
|
||||
|
||||
|
@ -436,9 +437,9 @@ void initHelp() {
|
|||
"clear a range of keys from the database",
|
||||
"All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK);
|
||||
helpMap["configure"] = CommandHelp(
|
||||
"configure [new] <single|double|triple|three_data_hall|three_datacenter|ssd|memory|proxies=<PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*",
|
||||
"configure [new] <single|double|triple|three_data_hall|three_datacenter|multi_dc|ssd|memory|proxies=<PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*",
|
||||
"change database configuration",
|
||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When used, both a redundancy mode and a storage engine must be specified.\n\nRedundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. Must be at least 1, or set to -1 which restores the number of proxies to the default value.\n\nlogs=<LOGS>: Sets the desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\nSee the FoundationDB Administration Guide for more information.");
|
||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When used, both a redundancy mode and a storage engine must be specified.\n\nRedundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - See the Admin Guide.\n three_datacenter - See the Admin Guide.\n multi_dc - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. Must be at least 1, or set to -1 which restores the number of proxies to the default value.\n\nlogs=<LOGS>: Sets the desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\nSee the FoundationDB Administration Guide for more information.");
|
||||
helpMap["coordinators"] = CommandHelp(
|
||||
"coordinators auto|<ADDRESS>+ [description=new_cluster_description]",
|
||||
"change cluster coordinators or description",
|
||||
|
@ -504,6 +505,7 @@ void initHelp() {
|
|||
"If no addresses are specified, populates the list of processes which can be killed. Processes cannot be killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is unresponsive.\n\nFor each IP:port pair in <ADDRESS>*, attempt to kill the specified process.");
|
||||
|
||||
hiddenCommands.insert("expensive_data_check");
|
||||
hiddenCommands.insert("datadistribution");
|
||||
}
|
||||
|
||||
void printVersion() {
|
||||
|
@ -1672,7 +1674,18 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
state double worstFreeSpaceRatio = 1.0;
|
||||
try {
|
||||
for (auto proc : processesMap.obj()){
|
||||
bool storageServer = false;
|
||||
StatusArray rolesArray = proc.second.get_obj()["roles"].get_array();
|
||||
for (StatusObjectReader role : rolesArray) {
|
||||
if (role["role"].get_str() == "storage") {
|
||||
storageServer = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Skip non-storage servers in free space calculation
|
||||
if (!storageServer)
|
||||
continue;
|
||||
|
||||
StatusObjectReader process(proc.second);
|
||||
std::string addrStr;
|
||||
if (!process.get("address", addrStr)) {
|
||||
|
@ -1681,6 +1694,9 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
}
|
||||
NetworkAddress addr = NetworkAddress::parse(addrStr);
|
||||
bool excluded = (process.has("excluded") && process.last().get_bool()) || addressExcluded(exclusions, addr);
|
||||
ssTotalCount++;
|
||||
if (excluded)
|
||||
ssExcludedCount++;
|
||||
|
||||
if(!excluded) {
|
||||
StatusObjectReader disk;
|
||||
|
@ -1703,15 +1719,6 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
|
||||
worstFreeSpaceRatio = std::min(worstFreeSpaceRatio, double(free_bytes)/total_bytes);
|
||||
}
|
||||
|
||||
for (StatusObjectReader role : rolesArray) {
|
||||
if (role["role"].get_str() == "storage") {
|
||||
if (excluded)
|
||||
ssExcludedCount++;
|
||||
ssTotalCount++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...) // std::exception
|
||||
|
@ -1895,7 +1902,7 @@ void onoff_generator(const char* text, const char *line, std::vector<std::string
|
|||
}
|
||||
|
||||
void configure_generator(const char* text, const char *line, std::vector<std::string>& lc) {
|
||||
const char* opts[] = {"new", "single", "double", "triple", "three_data_hall", "three_datacenter", "ssd", "ssd-1", "ssd-2", "memory", "proxies=", "logs=", "resolvers=", NULL};
|
||||
const char* opts[] = {"new", "single", "double", "triple", "three_data_hall", "three_datacenter", "multi_dc", "ssd", "ssd-1", "ssd-2", "memory", "proxies=", "logs=", "resolvers=", NULL};
|
||||
array_generator(text, line, opts, lc);
|
||||
}
|
||||
|
||||
|
@ -2210,35 +2217,45 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
state UID randomID = g_random->randomUniqueID();
|
||||
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("command", printable(StringRef(line)));
|
||||
|
||||
bool err, partial;
|
||||
state std::vector<std::vector<StringRef>> parsed = parseLine(line, err, partial);
|
||||
if (err) {
|
||||
LogCommand(line, randomID, "ERROR: malformed escape sequence");
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
if (partial) {
|
||||
LogCommand(line, randomID, "ERROR: unterminated quote");
|
||||
is_error = true;
|
||||
continue;
|
||||
bool malformed, partial;
|
||||
state std::vector<std::vector<StringRef>> parsed = parseLine(line, malformed, partial);
|
||||
if (malformed) LogCommand(line, randomID, "ERROR: malformed escape sequence");
|
||||
if (partial) LogCommand(line, randomID, "ERROR: unterminated quote");
|
||||
if (malformed || partial) {
|
||||
if (parsed.size() > 0) {
|
||||
// Denote via a special token that the command was a parse failure.
|
||||
auto& last_command = parsed.back();
|
||||
last_command.insert(last_command.begin(), StringRef((const uint8_t*)"parse_error", strlen("parse_error")));
|
||||
}
|
||||
}
|
||||
|
||||
state bool multi = parsed.size() > 1;
|
||||
is_error = false;
|
||||
|
||||
state std::vector<std::vector<StringRef>>::iterator iter;
|
||||
for (iter = parsed.begin(); iter != parsed.end(); ++iter) {
|
||||
state std::vector<StringRef> tokens = *iter;
|
||||
|
||||
if (opt.exec.present() && is_error) {
|
||||
if (is_error) {
|
||||
printf("WARNING: the previous command failed, the remaining commands will not be executed.\n");
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
|
||||
is_error = false;
|
||||
|
||||
if (!tokens.size())
|
||||
continue;
|
||||
|
||||
if (tokencmp(tokens[0], "parse_error")) {
|
||||
printf("ERROR: Command failed to completely parse.\n");
|
||||
if (tokens.size() > 1) {
|
||||
printf("ERROR: Not running partial or malformed command:");
|
||||
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t)
|
||||
printf(" %s", formatStringRef(*t, true).c_str());
|
||||
printf("\n");
|
||||
}
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (multi) {
|
||||
printf(">>>");
|
||||
for (auto t = tokens.begin(); t != tokens.end(); ++t)
|
||||
|
@ -2717,6 +2734,25 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "datadistribution")) {
|
||||
if (tokens.size() != 2) {
|
||||
printf("Usage: datadistribution <on|off>\n");
|
||||
is_error = true;
|
||||
} else {
|
||||
if(tokencmp(tokens[1], "on")) {
|
||||
int _ = wait(setDDMode(db, 1));
|
||||
printf("Data distribution is enabled\n");
|
||||
} else if(tokencmp(tokens[1], "off")) {
|
||||
int _ = wait(setDDMode(db, 0));
|
||||
printf("Data distribution is disabled\n");
|
||||
} else {
|
||||
printf("Usage: datadistribution <on|off>\n");
|
||||
is_error = true;
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "option")) {
|
||||
if (tokens.size() == 2 || tokens.size() > 4) {
|
||||
printUsage(tokens[0]);
|
||||
|
@ -2841,6 +2877,7 @@ ACTOR Future<Void> timeExit(double duration) {
|
|||
|
||||
int main(int argc, char **argv) {
|
||||
platformInit();
|
||||
initSignalSafeUnwind();
|
||||
Error::init();
|
||||
|
||||
registerCrashHandler();
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "flow/flow.h"
|
||||
#include "NativeAPI.h"
|
||||
#include "TaskBucket.h"
|
||||
#include "flow/Notified.h"
|
||||
#include "Notified.h"
|
||||
#include <fdbrpc/IAsyncFile.h>
|
||||
#include "KeyBackedTypes.h"
|
||||
#include <ctime>
|
||||
|
|
|
@ -22,12 +22,20 @@
|
|||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "ClusterInterface.h"
|
||||
|
||||
struct FailureMonitorClientState : ReferenceCounted<FailureMonitorClientState> {
|
||||
std::set<NetworkAddress> knownAddrs;
|
||||
double serverFailedTimeout;
|
||||
|
||||
FailureMonitorClientState() {
|
||||
serverFailedTimeout = CLIENT_KNOBS->FAILURE_TIMEOUT_DELAY;
|
||||
}
|
||||
};
|
||||
|
||||
ACTOR Future<Void> failureMonitorClientLoop(
|
||||
SimpleFailureMonitor* monitor,
|
||||
ClusterInterface controller,
|
||||
double* pServerFailedTimeout,
|
||||
bool trackMyStatus,
|
||||
std::set<NetworkAddress>* knownAddrs)
|
||||
Reference<FailureMonitorClientState> fmState,
|
||||
bool trackMyStatus)
|
||||
{
|
||||
state Version version = 0;
|
||||
state Future<FailureMonitoringReply> request = Never();
|
||||
|
@ -37,7 +45,7 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
state double waitfor = 0;
|
||||
|
||||
monitor->setStatus(controller.failureMonitoring.getEndpoint().address, FailureStatus(false));
|
||||
knownAddrs->insert( controller.failureMonitoring.getEndpoint().address );
|
||||
fmState->knownAddrs.insert( controller.failureMonitoring.getEndpoint().address );
|
||||
|
||||
//The cluster controller's address (controller.failureMonitoring.getEndpoint().address) is treated specially because we can declare that it is down independently
|
||||
//of the response from the cluster controller. It still needs to be in knownAddrs in case the cluster controller changes, so the next cluster controller resets its state
|
||||
|
@ -51,14 +59,14 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
requestTimeout = Never();
|
||||
if (reply.allOthersFailed) {
|
||||
// Reset all systems *not* mentioned in the reply to the default (failed) state
|
||||
knownAddrs->erase( controller.failureMonitoring.getEndpoint().address );
|
||||
fmState->knownAddrs.erase( controller.failureMonitoring.getEndpoint().address );
|
||||
std::set<NetworkAddress> changedAddresses;
|
||||
for(int c=0; c<reply.changes.size(); c++)
|
||||
changedAddresses.insert( reply.changes[c].address );
|
||||
for(auto it : *knownAddrs)
|
||||
for(auto it : fmState->knownAddrs)
|
||||
if (!changedAddresses.count( it ))
|
||||
monitor->setStatus( it, FailureStatus() );
|
||||
knownAddrs->clear();
|
||||
fmState->knownAddrs.clear();
|
||||
} else {
|
||||
ASSERT( version != 0 );
|
||||
}
|
||||
|
@ -66,20 +74,20 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
if( monitor->getState( controller.failureMonitoring.getEndpoint() ).isFailed() )
|
||||
TraceEvent("FailureMonitoringServerUp").detail("OldServer",controller.id());
|
||||
monitor->setStatus( controller.failureMonitoring.getEndpoint().address, FailureStatus(false) );
|
||||
knownAddrs->insert( controller.failureMonitoring.getEndpoint().address );
|
||||
fmState->knownAddrs.insert( controller.failureMonitoring.getEndpoint().address );
|
||||
|
||||
//if (version != reply.failureInformationVersion)
|
||||
// printf("Client '%s': update from %lld to %lld (%d changes, aof=%d)\n", g_network->getLocalAddress().toString().c_str(), version, reply.failureInformationVersion, reply.changes.size(), reply.allOthersFailed);
|
||||
|
||||
version = reply.failureInformationVersion;
|
||||
*pServerFailedTimeout = reply.considerServerFailedTimeoutMS * .001;
|
||||
fmState->serverFailedTimeout = reply.considerServerFailedTimeoutMS * .001;
|
||||
for(int c=0; c<reply.changes.size(); c++) {
|
||||
//printf("Client '%s': status of '%s' is now '%s'\n", g_network->getLocalAddress().toString().c_str(), reply.changes[c].address.toString().c_str(), reply.changes[c].status.failed ? "Failed" : "OK");
|
||||
monitor->setStatus( reply.changes[c].address, reply.changes[c].status );
|
||||
if (reply.changes[c].status != FailureStatus())
|
||||
knownAddrs->insert( reply.changes[c].address );
|
||||
fmState->knownAddrs.insert( reply.changes[c].address );
|
||||
else
|
||||
knownAddrs->erase( reply.changes[c].address );
|
||||
fmState->knownAddrs.erase( reply.changes[c].address );
|
||||
ASSERT( reply.changes[c].address != controller.failureMonitoring.getEndpoint().address || !reply.changes[c].status.failed );
|
||||
}
|
||||
before = now();
|
||||
|
@ -91,7 +99,7 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
requestTimeout = Never();
|
||||
TraceEvent(SevWarn, "FailureMonitoringServerDown").detail("OldServerID",controller.id());
|
||||
monitor->setStatus( controller.failureMonitoring.getEndpoint().address, FailureStatus(true) );
|
||||
knownAddrs->erase( controller.failureMonitoring.getEndpoint().address );
|
||||
fmState->knownAddrs.erase( controller.failureMonitoring.getEndpoint().address );
|
||||
}
|
||||
when( Void _ = wait( nextRequest ) ) {
|
||||
g_network->setCurrentTask(TaskDefaultDelay);
|
||||
|
@ -111,7 +119,7 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
req.senderStatus = FailureStatus(false);
|
||||
request = controller.failureMonitoring.getReply( req, TaskFailureMonitor );
|
||||
if(!controller.failureMonitoring.getEndpoint().isLocal())
|
||||
requestTimeout = delay( *pServerFailedTimeout, TaskFailureMonitor );
|
||||
requestTimeout = delay( fmState->serverFailedTimeout, TaskFailureMonitor );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -125,11 +133,10 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
|
||||
ACTOR Future<Void> failureMonitorClient( Reference<AsyncVar<Optional<struct ClusterInterface>>> ci, bool trackMyStatus ) {
|
||||
state SimpleFailureMonitor* monitor = static_cast<SimpleFailureMonitor*>( &IFailureMonitor::failureMonitor() );
|
||||
state std::set<NetworkAddress> knownAddrs;
|
||||
state double serverFailedTimeout = CLIENT_KNOBS->FAILURE_TIMEOUT_DELAY;
|
||||
state Reference<FailureMonitorClientState> fmState = Reference<FailureMonitorClientState>(new FailureMonitorClientState());
|
||||
|
||||
loop {
|
||||
state Future<Void> client = ci->get().present() ? failureMonitorClientLoop(monitor, ci->get().get(), &serverFailedTimeout, trackMyStatus, &knownAddrs) : Void();
|
||||
state Future<Void> client = ci->get().present() ? failureMonitorClientLoop(monitor, ci->get().get(), fmState, trackMyStatus) : Void();
|
||||
Void _ = wait( ci->onChange() );
|
||||
}
|
||||
}
|
|
@ -127,6 +127,15 @@ std::map<std::string, std::string> configForToken( std::string const& mode ) {
|
|||
tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "data_hall",
|
||||
IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))
|
||||
));
|
||||
} else if(mode == "multi_dc") {
|
||||
redundancy="6";
|
||||
log_replicas="4";
|
||||
storagePolicy = IRepPolicyRef(new PolicyAcross(3, "dcid",
|
||||
IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))
|
||||
));
|
||||
tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "dcid",
|
||||
IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))
|
||||
));
|
||||
} else
|
||||
redundancySpecified = false;
|
||||
if (redundancySpecified) {
|
||||
|
@ -1133,6 +1142,39 @@ ACTOR Future<vector<AddressExclusion>> getExcludedServers( Database cx ) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<int> setDDMode( Database cx, int mode ) {
|
||||
state Transaction tr(cx);
|
||||
state int oldMode = -1;
|
||||
state BinaryWriter wr(Unversioned());
|
||||
wr << mode;
|
||||
|
||||
loop {
|
||||
try {
|
||||
Optional<Value> old = wait( tr.get( dataDistributionModeKey ) );
|
||||
if (oldMode < 0) {
|
||||
oldMode = 1;
|
||||
if (old.present()) {
|
||||
BinaryReader rd(old.get(), Unversioned());
|
||||
rd >> oldMode;
|
||||
}
|
||||
}
|
||||
if (!mode) {
|
||||
BinaryWriter wrMyOwner(Unversioned());
|
||||
wrMyOwner << dataDistributionModeLock;
|
||||
tr.set( moveKeysLockOwnerKey, wrMyOwner.toStringRef() );
|
||||
}
|
||||
|
||||
tr.set( dataDistributionModeKey, wr.toStringRef() );
|
||||
|
||||
Void _ = wait( tr.commit() );
|
||||
return oldMode;
|
||||
} catch (Error& e) {
|
||||
TraceEvent("setDDModeRetrying").error(e);
|
||||
Void _ = wait (tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitForExcludedServers( Database cx, vector<AddressExclusion> excl ) {
|
||||
state std::set<AddressExclusion> exclusions( excl.begin(), excl.end() );
|
||||
|
||||
|
|
|
@ -153,6 +153,8 @@ Future<Void> unlockDatabase( Database const& cx, UID const& id );
|
|||
Future<Void> checkDatabaseLock( Transaction* const& tr, UID const& id );
|
||||
Future<Void> checkDatabaseLock( Reference<ReadYourWritesTransaction> const& tr, UID const& id );
|
||||
|
||||
Future<int> setDDMode( Database const& cx, int const& mode );
|
||||
|
||||
// Gets the cluster connection string
|
||||
Future<std::vector<NetworkAddress>> getCoordinators( Database const& cx );
|
||||
#endif
|
|
@ -33,7 +33,7 @@ struct MasterProxyInterface {
|
|||
RequestStream< struct CommitTransactionRequest > commit;
|
||||
RequestStream< struct GetReadVersionRequest > getConsistentReadVersion; // Returns a version which (1) is committed, and (2) is >= the latest version reported committed (by a commit response) when this request was sent
|
||||
// (at some point between when this request is sent and when its response is received, the latest version reported committed)
|
||||
RequestStream< ReplyPromise<vector<StorageServerInterface>> > getKeyServersLocations;
|
||||
RequestStream< ReplyPromise<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>> > getKeyServersLocations;
|
||||
RequestStream< struct GetStorageServerRejoinInfoRequest > getStorageServerRejoinInfo;
|
||||
|
||||
RequestStream<ReplyPromise<Void>> waitFailure;
|
||||
|
|
21
bindings/java/src-completable/test/com/apple/cie/foundationdb/test/OSTest.java → fdbclient/MetricLogger.h
Normal file → Executable file
21
bindings/java/src-completable/test/com/apple/cie/foundationdb/test/OSTest.java → fdbclient/MetricLogger.h
Normal file → Executable file
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* OSTest.java
|
||||
* MetricLogger.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -18,21 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb.test;
|
||||
#pragma once
|
||||
|
||||
import java.io.InputStream;
|
||||
#include "NativeAPI.h"
|
||||
|
||||
public class OSTest {
|
||||
|
||||
/**
|
||||
* @param args
|
||||
*/
|
||||
public static void main(String[] args) {
|
||||
System.out.println("OS name: " + System.getProperty("os.name"));
|
||||
System.out.println("OS arch: " + System.getProperty("os.arch"));
|
||||
|
||||
InputStream stream = OSTest.class.getResourceAsStream("/lib/linux/amd64/libfdb_java.so");
|
||||
System.out.println("Stream: " + stream);
|
||||
}
|
||||
|
||||
}
|
||||
Future<Void> runMetrics( Future<Database> const& fcx, Key const& metricsPrefix );
|
|
@ -602,7 +602,7 @@ Reference<LocationInfo> DatabaseContext::setCachedLocation( const KeyRangeRef& k
|
|||
attempts++;
|
||||
auto r = locationCache.randomRange();
|
||||
Key begin = r.begin(), end = r.end(); // insert invalidates r, so can't be passed a mere reference into it
|
||||
if( begin >= keyServersPrefix )
|
||||
if( begin >= keyServersPrefix && attempts > maxEvictionAttempts / 2)
|
||||
continue;
|
||||
locationCache.insert( KeyRangeRef(begin, end), Reference<LocationInfo>() );
|
||||
}
|
||||
|
@ -754,8 +754,8 @@ Reference<Cluster> Cluster::createCluster(std::string connFileName, int apiVersi
|
|||
return Reference<Cluster>(new Cluster( rccf, apiVersion));
|
||||
}
|
||||
|
||||
Future<Database> Cluster::createDatabase( Standalone<StringRef> dbName ) {
|
||||
return DatabaseContext::createDatabase( clusterInterface, Reference<Cluster>::addRef( this ), dbName, LocalityData() );
|
||||
Future<Database> Cluster::createDatabase( Standalone<StringRef> dbName, LocalityData locality ) {
|
||||
return DatabaseContext::createDatabase( clusterInterface, Reference<Cluster>::addRef( this ), dbName, locality );
|
||||
}
|
||||
|
||||
Future<Void> Cluster::onConnected() {
|
||||
|
@ -1102,22 +1102,33 @@ ACTOR Future< pair<KeyRange,Reference<LocationInfo>> > getKeyLocation( Database
|
|||
|
||||
state vector<StorageServerInterface> serverInterfaces;
|
||||
state KeyRangeRef range;
|
||||
|
||||
|
||||
// We assume that not only /FF/keyServers but /FF/serverList is present on the keyServersLocations since we now need both of them to terminate our search. Currently this is guaranteed because nothing after /FF/keyServers is split.
|
||||
if ( ( key.startsWith( serverListPrefix) && (!isBackward || key.size() > serverListPrefix.size()) ) ||
|
||||
( key.startsWith( keyServersPrefix ) && (!isBackward || key.size() > keyServersPrefix.size()) )) {
|
||||
if( info.debugID.present() )
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.Before");
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.Before");
|
||||
loop {
|
||||
choose {
|
||||
when ( Void _ = wait( cx->onMasterProxiesChanged() ) ) {}
|
||||
when ( vector<StorageServerInterface> s = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getKeyServersLocations, ReplyPromise<vector<StorageServerInterface>>(), info.taskID ) ) ) {
|
||||
when ( vector<pair<KeyRangeRef, vector<StorageServerInterface>>> keyServersShards = wait( loadBalance( cx->getMasterProxies(), &MasterProxyInterface::getKeyServersLocations, ReplyPromise<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>>(), info.taskID ) ) ) {
|
||||
if( info.debugID.present() )
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.After");
|
||||
ASSERT( s.size() ); // There should always be storage servers, except on version 0 which should not get to this function
|
||||
range = KeyRangeRef( keyServersPrefix, allKeys.end );
|
||||
serverInterfaces = s;
|
||||
break;
|
||||
ASSERT( keyServersShards.size() ); // There should always be storage servers, except on version 0 which should not get to this function
|
||||
|
||||
Reference<LocationInfo> cachedLocation;
|
||||
for (pair<KeyRangeRef, vector<StorageServerInterface>> keyServersShard : keyServersShards) {
|
||||
auto locationInfo = cx->setCachedLocation(keyServersShard.first, keyServersShard.second);
|
||||
|
||||
if (isBackward ? (keyServersShard.first.begin < key && keyServersShard.first.end >= key) : keyServersShard.first.contains(key)) {
|
||||
range = keyServersShard.first;
|
||||
cachedLocation = locationInfo;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(isBackward ? (range.begin < key && range.end >= key) : range.contains(key));
|
||||
|
||||
return make_pair(range, cachedLocation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1654,6 +1665,15 @@ Future<Key> resolveKey( Database const& cx, KeySelector const& key, Version cons
|
|||
ACTOR Future<Standalone<RangeResultRef>> getRangeFallback( Database cx, Version version,
|
||||
KeySelector begin, KeySelector end, GetRangeLimits limits, bool reverse, TransactionInfo info )
|
||||
{
|
||||
if(version == latestVersion) {
|
||||
state Transaction transaction(cx);
|
||||
transaction.setOption(FDBTransactionOptions::CAUSAL_READ_RISKY);
|
||||
transaction.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
transaction.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
Version ver = wait( transaction.getReadVersion() );
|
||||
version = ver;
|
||||
}
|
||||
|
||||
Future<Key> fb = resolveKey(cx, begin, version, info);
|
||||
state Future<Key> fe = resolveKey(cx, end, version, info);
|
||||
|
||||
|
@ -1849,15 +1869,8 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Future<Version>
|
|||
cx->invalidateCache( beginServer.second );
|
||||
|
||||
if (e.code() == error_code_wrong_shard_server) {
|
||||
if (version == latestVersion) {
|
||||
// latestVersion queries are only for keyServersPrefix/*, which shard is guaranteed not to split,
|
||||
// so we should always be able to use the fast path--try again
|
||||
TEST(true); //Latest version retry fast path
|
||||
TraceEvent("LatestVersionRetryFastPath").detail("KeyBegin", printable(begin.getKey())).detail("KeyEnd", printable(end.getKey()));
|
||||
} else {
|
||||
Standalone<RangeResultRef> result = wait( getRangeFallback(cx, version, originalBegin, originalEnd, originalLimits, reverse, info ) );
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
Void _ = wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
|
||||
|
|
|
@ -110,7 +110,7 @@ public:
|
|||
static Reference<Cluster> createCluster(std::string connFileName, int apiVersion);
|
||||
|
||||
// See DatabaseContext::createDatabase
|
||||
Future<Database> createDatabase( Standalone<StringRef> dbName );
|
||||
Future<Database> createDatabase( Standalone<StringRef> dbName, LocalityData locality = LocalityData() );
|
||||
|
||||
void setOption(FDBClusterOptions::Option option, Optional<StringRef> value);
|
||||
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FLOW_NOTIFIED_H
|
||||
#define FLOW_NOTIFIED_H
|
||||
#ifndef FDBCLIENT_NOTIFIED_H
|
||||
#define FDBCLIENT_NOTIFIED_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "FDBTypes.h"
|
||||
#include "flow/TDMetric.actor.h"
|
||||
|
||||
struct NotifiedVersion {
|
||||
|
@ -78,4 +78,4 @@ private:
|
|||
VersionMetricHandle val;
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -33,7 +33,8 @@ const KeyRef afterAllKeys = LiteralStringRef("\xff\xff\x00");
|
|||
const KeyRangeRef keyServersKeys( LiteralStringRef("\xff/keyServers/"), LiteralStringRef("\xff/keyServers0") );
|
||||
const KeyRef keyServersPrefix = keyServersKeys.begin;
|
||||
const KeyRef keyServersEnd = keyServersKeys.end;
|
||||
const KeyRef keyServersKeyServersKey = LiteralStringRef("\xff/keyServers/\xff/keyServers/");
|
||||
const KeyRangeRef keyServersKeyServersKeys ( LiteralStringRef("\xff/keyServers/\xff/keyServers/"), LiteralStringRef("\xff/keyServers/\xff/keyServers0"));
|
||||
const KeyRef keyServersKeyServersKey = keyServersKeyServersKeys.begin;
|
||||
|
||||
const Key keyServersKey( const KeyRef& k ) {
|
||||
return k.withPrefix( keyServersPrefix );
|
||||
|
|
|
@ -34,7 +34,7 @@ extern const KeyRangeRef allKeys; // '' to systemKeys.end
|
|||
extern const KeyRef afterAllKeys;
|
||||
|
||||
// "\xff/keyServers/[[begin]]" := "[[vector<serverID>, vector<serverID>]]"
|
||||
extern const KeyRangeRef keyServersKeys;
|
||||
extern const KeyRangeRef keyServersKeys, keyServersKeyServersKeys;
|
||||
extern const KeyRef keyServersPrefix, keyServersEnd, keyServersKeyServersKey;
|
||||
const Key keyServersKey( const KeyRef& k );
|
||||
const KeyRef keyServersKey( const KeyRef& k, Arena& arena );
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">false</EnableCompile>
|
||||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Release|X64'">false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
<ClInclude Include="MetricLogger.h" />
|
||||
<ActorCompiler Include="MetricLogger.actor.cpp" />
|
||||
<ClInclude Include="FailureMonitorClient.h" />
|
||||
<ClInclude Include="FDBOptions.g.h" />
|
||||
<ClInclude Include="FDBOptions.h" />
|
||||
|
@ -57,6 +59,7 @@
|
|||
<ClInclude Include="MultiVersionTransaction.h" />
|
||||
<ClInclude Include="MutationList.h" />
|
||||
<ClInclude Include="NativeAPI.h" />
|
||||
<ClInclude Include="Notified.h" />
|
||||
<ClInclude Include="ReadYourWrites.h" />
|
||||
<ActorCompiler Include="RunTransaction.actor.h" />
|
||||
<ClInclude Include="RYWIterator.h" />
|
||||
|
@ -201,4 +204,4 @@
|
|||
<Target Name="MyPreCompileSteps" AfterTargets="CLCompile">
|
||||
<Exec Command=""$(SolutionDir)bin\$(Configuration)\coveragetool.exe" "$(OutDir)coverage.$(TargetName).xml" @(ActorCompiler -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLInclude -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLCompile -> '%(RelativeDir)%(Filename)%(Extension)', ' ')" />
|
||||
</Target>
|
||||
</Project>
|
||||
</Project>
|
||||
|
|
|
@ -94,7 +94,7 @@ description is not currently required but encouraged.
|
|||
<Option name="disable_client_statistics_logging" code="70"
|
||||
description="Disables logging of client statistics, such as sampled transaction activity." />
|
||||
<Option name="enable_slow_task_profiling" code="71"
|
||||
description="Enables slow task profiling. Requires trace logging to be enabled." />
|
||||
description="Enables debugging feature to perform slow task profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production." />
|
||||
<Option name="supported_client_versions" code="1000"
|
||||
paramType="String" paramDescription="[release version],[source version],[protocol version];..."
|
||||
description="This option is set automatically to communicate the list of supported clients to the active client."
|
||||
|
|
|
@ -253,7 +253,8 @@ public:
|
|||
uint32_t restart_delay_reset_interval;
|
||||
double last_start;
|
||||
bool quiet;
|
||||
bool delete_wd40_env;
|
||||
//bool delete_wd40_env;
|
||||
const char *delete_envvars;
|
||||
bool deconfigured;
|
||||
bool kill_on_configuration_change;
|
||||
|
||||
|
@ -261,7 +262,7 @@ public:
|
|||
int pipes[2][2];
|
||||
|
||||
Command() : argv(NULL) { }
|
||||
Command(const CSimpleIni& ini, std::string _section, uint64_t id, fdb_fd_set fds, int* maxfd) : section(_section), argv(NULL), quiet(false), delete_wd40_env(false), fds(fds), deconfigured(false), kill_on_configuration_change(true) {
|
||||
Command(const CSimpleIni& ini, std::string _section, uint64_t id, fdb_fd_set fds, int* maxfd) : section(_section), argv(NULL), quiet(false), delete_envvars(NULL), fds(fds), deconfigured(false), kill_on_configuration_change(true) {
|
||||
char _ssection[strlen(section.c_str()) + 22];
|
||||
snprintf(_ssection, strlen(section.c_str()) + 22, "%s.%llu", section.c_str(), id);
|
||||
ssection = _ssection;
|
||||
|
@ -351,10 +352,8 @@ public:
|
|||
if (q && !strcmp(q, "true"))
|
||||
quiet = true;
|
||||
|
||||
const char* dwe = get_value_multi(ini, "delete_wd40_env", ssection.c_str(), section.c_str(), "general", NULL);
|
||||
if(dwe && !strcmp(dwe, "true")) {
|
||||
delete_wd40_env = true;
|
||||
}
|
||||
const char* del_env = get_value_multi(ini, "delete_envvars", ssection.c_str(), section.c_str(), "general", NULL);
|
||||
delete_envvars = del_env;
|
||||
|
||||
const char* kocc = get_value_multi(ini, "kill_on_configuration_change", ssection.c_str(), section.c_str(), "general", NULL);
|
||||
if(kocc && strcmp(kocc, "true")) {
|
||||
|
@ -373,7 +372,7 @@ public:
|
|||
|
||||
for (auto i : keys) {
|
||||
if (!strcmp(i.pItem, "command") || !strcmp(i.pItem, "restart_delay") || !strcmp(i.pItem, "initial_restart_delay") || !strcmp(i.pItem, "restart_backoff") ||
|
||||
!strcmp(i.pItem, "restart_delay_reset_interval") || !strcmp(i.pItem, "disable_lifecycle_logging") || !strcmp(i.pItem, "delete_wd40_env") ||
|
||||
!strcmp(i.pItem, "restart_delay_reset_interval") || !strcmp(i.pItem, "disable_lifecycle_logging") || !strcmp(i.pItem, "delete_envvars") ||
|
||||
!strcmp(i.pItem, "kill_on_configuration_change"))
|
||||
{
|
||||
continue;
|
||||
|
@ -408,7 +407,7 @@ public:
|
|||
}
|
||||
void update(const Command& other) {
|
||||
quiet = other.quiet;
|
||||
delete_wd40_env = other.delete_wd40_env;
|
||||
delete_envvars = other.delete_envvars;
|
||||
initial_restart_delay = other.initial_restart_delay;
|
||||
max_restart_delay = other.max_restart_delay;
|
||||
restart_backoff = other.restart_backoff;
|
||||
|
@ -474,12 +473,21 @@ void start_process(Command* cmd, uint64_t id, uid_t uid, gid_t gid, int delay, s
|
|||
signal(SIGINT, SIG_DFL);
|
||||
signal(SIGTERM, SIG_DFL);
|
||||
|
||||
if(cmd->delete_wd40_env) {
|
||||
/* remove WD40 environment variables */
|
||||
if(unsetenv("WD40_BV") || unsetenv("WD40_IS_MY_DADDY") || unsetenv("CONF_BUILD_VERSION")) {
|
||||
log_err("unsetenv", errno, "Failed to remove parent environment variables");
|
||||
exit(1);
|
||||
}
|
||||
if(cmd->delete_envvars != NULL && std::strlen(cmd->delete_envvars) > 0) {
|
||||
std::string vars(cmd->delete_envvars);
|
||||
size_t start = 0;
|
||||
do {
|
||||
size_t bound = vars.find(" ", start);
|
||||
std::string var = vars.substr(start, bound - start);
|
||||
log_msg(LOG_INFO, "Deleting parent environment variable: \"%s\"\n", var.c_str());
|
||||
if(unsetenv(var.c_str())) {
|
||||
log_err("unsetenv", errno, "Failed to remove parent environment variable: %s\n", var.c_str());
|
||||
exit(1);
|
||||
}
|
||||
start = bound;
|
||||
while(vars[start] == ' ')
|
||||
start++;
|
||||
} while(start <= vars.length());
|
||||
}
|
||||
|
||||
dup2( cmd->pipes[0][1], fileno(stdout) );
|
||||
|
|
|
@ -504,7 +504,7 @@ private:
|
|||
}
|
||||
|
||||
void setIOTimeout(double timeout) {
|
||||
ioTimeout = timeout;
|
||||
ioTimeout = fabs(timeout);
|
||||
timeoutWarnOnly = timeout < 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -213,8 +213,9 @@ public:
|
|||
//If we are in the process of deleting a file, we can't let someone else modify it at the same time. We therefore block the creation of new files until deletion is complete
|
||||
state std::map<std::string, Future<Void>>::iterator deletedFile = filesBeingDeleted.find(filename);
|
||||
if(deletedFile != filesBeingDeleted.end()) {
|
||||
//TraceEvent("AsyncFileNonDurableOpenWaitOnDelete").detail("Filename", filename);
|
||||
//TraceEvent("AsyncFileNonDurableOpenWaitOnDelete1").detail("Filename", filename);
|
||||
Void _ = wait( deletedFile->second || shutdown );
|
||||
//TraceEvent("AsyncFileNonDurableOpenWaitOnDelete2").detail("Filename", filename);
|
||||
if(shutdown.isReady())
|
||||
throw io_error().asInjectedFault();
|
||||
}
|
||||
|
@ -711,35 +712,44 @@ private:
|
|||
|
||||
//Finishes all outstanding actors on an AsyncFileNonDurable and then deletes it
|
||||
ACTOR Future<Void> deleteFile(AsyncFileNonDurable *self) {
|
||||
//We must run on the main thread (instead of a SQLite coroutine). We don't want to signal any promises from a coroutine, so we switch at the beginning
|
||||
//of this ACTOR
|
||||
Void _ = wait(self->returnToMainThread());
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state std::string filename = self->filename;
|
||||
|
||||
//Make sure all writes have gone through.
|
||||
Promise<bool> startSyncPromise = self->startSyncPromise;
|
||||
self->startSyncPromise = Promise<bool>();
|
||||
startSyncPromise.send(true);
|
||||
Void _ = wait( g_simulator.onMachine( currentProcess ) );
|
||||
try {
|
||||
//Make sure all writes have gone through.
|
||||
Promise<bool> startSyncPromise = self->startSyncPromise;
|
||||
self->startSyncPromise = Promise<bool>();
|
||||
startSyncPromise.send(true);
|
||||
|
||||
std::vector<Future<Void>> outstandingModifications;
|
||||
std::vector<Future<Void>> outstandingModifications;
|
||||
|
||||
for(auto itr = self->pendingModifications.ranges().begin(); itr != self->pendingModifications.ranges().end(); ++itr)
|
||||
if(itr->value().isValid() && !itr->value().isReady())
|
||||
outstandingModifications.push_back(itr->value());
|
||||
for(auto itr = self->pendingModifications.ranges().begin(); itr != self->pendingModifications.ranges().end(); ++itr)
|
||||
if(itr->value().isValid() && !itr->value().isReady())
|
||||
outstandingModifications.push_back(itr->value());
|
||||
|
||||
//Ignore errors here so that all modifications can finish
|
||||
Void _ = wait(waitForAllReady(outstandingModifications));
|
||||
//Ignore errors here so that all modifications can finish
|
||||
Void _ = wait(waitForAllReady(outstandingModifications));
|
||||
|
||||
//Make sure we aren't in the process of killing the file
|
||||
if(self->killed.isSet())
|
||||
Void _ = wait(self->killComplete.getFuture());
|
||||
//Make sure we aren't in the process of killing the file
|
||||
if(self->killed.isSet())
|
||||
Void _ = wait(self->killComplete.getFuture());
|
||||
|
||||
//Remove this file from the filesBeingDeleted map so that new files can be created with this filename
|
||||
g_simulator.getMachineByNetworkAddress( self->openedAddress )->closingFiles.erase(self->getFilename());
|
||||
AsyncFileNonDurable::filesBeingDeleted.erase(self->filename);
|
||||
//TraceEvent("AsyncFileNonDurable_FinishDelete", self->id).detail("Filename", self->filename);
|
||||
//Remove this file from the filesBeingDeleted map so that new files can be created with this filename
|
||||
g_simulator.getMachineByNetworkAddress( self->openedAddress )->closingFiles.erase(self->getFilename());
|
||||
g_simulator.getMachineByNetworkAddress( self->openedAddress )->deletingFiles.erase(self->getFilename());
|
||||
AsyncFileNonDurable::filesBeingDeleted.erase(self->filename);
|
||||
//TraceEvent("AsyncFileNonDurable_FinishDelete", self->id).detail("Filename", self->filename);
|
||||
|
||||
delete self;
|
||||
return Void();
|
||||
delete self;
|
||||
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
|
||||
return Void();
|
||||
} catch( Error &e ) {
|
||||
state Error err = e;
|
||||
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -20,6 +20,10 @@
|
|||
|
||||
#include "IAsyncFile.h"
|
||||
|
||||
#if VALGRIND
|
||||
#include <memcheck.h>
|
||||
#endif
|
||||
|
||||
class AsyncFileWriteChecker : public IAsyncFile, public ReferenceCounted<AsyncFileWriteChecker> {
|
||||
public:
|
||||
void addref() { ReferenceCounted<AsyncFileWriteChecker>::addref(); }
|
||||
|
@ -73,7 +77,13 @@ public:
|
|||
private:
|
||||
Reference<IAsyncFile> m_f;
|
||||
|
||||
std::vector<uint32_t> checksumHistory;
|
||||
struct WriteInfo {
|
||||
WriteInfo() : checksum(0), timestamp(0) {}
|
||||
uint32_t checksum;
|
||||
uint32_t timestamp;
|
||||
};
|
||||
|
||||
std::vector<WriteInfo> checksumHistory;
|
||||
// This is the most page checksum history blocks we will use across all files.
|
||||
static int checksumHistoryBudget;
|
||||
static int checksumHistoryPageSize;
|
||||
|
@ -106,24 +116,31 @@ private:
|
|||
}
|
||||
|
||||
while(page < pageEnd) {
|
||||
//printf("%d %d %u %u\n", write, page, checksum, historySum);
|
||||
uint32_t checksum = hashlittle(start, checksumHistoryPageSize, 0xab12fd93);
|
||||
uint32_t &historySum = checksumHistory[page];
|
||||
WriteInfo &history = checksumHistory[page];
|
||||
//printf("%d %d %u %u\n", write, page, checksum, history.checksum);
|
||||
|
||||
#if VALGRIND
|
||||
// It's possible we'll read or write a page where not all of the data is defined, but the checksum of the page is still valid
|
||||
VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&checksum, sizeof(uint32_t));
|
||||
#endif
|
||||
|
||||
// For writes, just update the stored sum
|
||||
if(write) {
|
||||
historySum = checksum;
|
||||
history.timestamp = (uint32_t)now();
|
||||
history.checksum = checksum;
|
||||
}
|
||||
else {
|
||||
if(historySum != 0 && historySum != checksum) {
|
||||
if(history.checksum != 0 && history.checksum != checksum) {
|
||||
// For reads, verify the stored sum if it is not 0. If it fails, clear it.
|
||||
TraceEvent (SevError, "AsyncFileKAIODetectedLostWrite")
|
||||
TraceEvent (SevError, "AsyncFileLostWriteDetected")
|
||||
.detail("Filename", m_f->getFilename())
|
||||
.detail("PageNumber", page)
|
||||
.detail("ChecksumOfPage", checksum)
|
||||
.detail("ChecksumHistory", historySum)
|
||||
.detail("ChecksumHistory", history.checksum)
|
||||
.detail("LastWriteTime", history.timestamp)
|
||||
.error(checksum_failed());
|
||||
historySum = 0;
|
||||
history.checksum = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -344,10 +344,10 @@ TEST_CASE("flow/flow/quorum")
|
|||
vector<Future<int>> fs;
|
||||
vector<Future<Void>> qs;
|
||||
for (auto& p : ps) fs.push_back(p.getFuture());
|
||||
|
||||
|
||||
for (int i = 0; i <= ps.size(); i++)
|
||||
qs.push_back( quorum(fs, i) );
|
||||
|
||||
|
||||
for (int i = 0; i < ps.size(); i++) {
|
||||
ASSERT(qs[i].isReady());
|
||||
ASSERT(!qs[i + 1].isReady());
|
||||
|
@ -357,7 +357,7 @@ TEST_CASE("flow/flow/quorum")
|
|||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("flow/flow/trivial futures")
|
||||
TEST_CASE("flow/flow/trivial futures")
|
||||
{
|
||||
Future<int> invalid;
|
||||
ASSERT(!invalid.isValid());
|
||||
|
@ -499,7 +499,7 @@ TEST_CASE("flow/flow/promisestream callbacks")
|
|||
onReady(p.getFuture(), [&result](int x) { result = x; }, [&result](Error e){ result = -1; });
|
||||
|
||||
ASSERT(result == 0);
|
||||
|
||||
|
||||
p = PromiseStream<int>();
|
||||
|
||||
ASSERT(result == -1);
|
||||
|
@ -989,7 +989,7 @@ TEST_CASE("flow/flow/perf/actor patterns")
|
|||
ASSERT(out2[i].isReady());
|
||||
}
|
||||
printf("2xcheeseActor(chooseTwoActor(cheeseActor(fifo), never)): %0.2f M/sec\n", N / 1e6 / (timer() - start));
|
||||
printf("sizeof(CheeseWaitActorActor) == %d\n", sizeof(CheeseWaitActorActor));
|
||||
printf("sizeof(CheeseWaitActorActor) == %lu\n", sizeof(CheeseWaitActorActor));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1140,11 +1140,11 @@ TEST_CASE("flow/flow/YieldedAsyncMap/cancel2")
|
|||
state Future<Void> y2 = yam.onChange(2);
|
||||
|
||||
auto* pyam = &yam;
|
||||
uncancellable(trigger(
|
||||
uncancellable(trigger(
|
||||
[pyam](){
|
||||
printf("Triggered\n");
|
||||
pyam->triggerAll();
|
||||
},
|
||||
pyam->triggerAll();
|
||||
},
|
||||
delay(1)));
|
||||
|
||||
Void _ = wait(y1);
|
||||
|
@ -1191,4 +1191,4 @@ TEST_CASE("fdbrpc/flow/wait_expression_after_cancel")
|
|||
f.cancel();
|
||||
ASSERT( a == 1 );
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -137,7 +137,7 @@ class TransportData {
|
|||
public:
|
||||
TransportData(uint64_t transportId)
|
||||
: endpointNotFoundReceiver(endpoints),
|
||||
pingReceiver(endpoints),
|
||||
pingReceiver(endpoints),
|
||||
warnAlwaysForLargePacket(true),
|
||||
lastIncompatibleMessage(0),
|
||||
transportId(transportId)
|
||||
|
@ -204,7 +204,9 @@ struct ConnectPacket {
|
|||
static_assert( sizeof(ConnectPacket) == CONNECT_PACKET_V2_SIZE, "ConnectPacket packed incorrectly" );
|
||||
#pragma pack( pop )
|
||||
|
||||
static Future<Void> connectionReader( TransportData* const& transport, Reference<IConnection> const& conn, bool const& isOutgoing, Promise<NetworkAddress> const& onPeerAddress );
|
||||
static Future<Void> connectionReader( TransportData* const& transport, Reference<IConnection> const& conn, Peer* const& peer, Promise<Peer*> const& onConnected );
|
||||
|
||||
static PacketID sendPacket( TransportData* self, ISerializeSource const& what, const Endpoint& destination, bool reliable );
|
||||
|
||||
struct Peer : NonCopyable {
|
||||
// FIXME: Peers don't die!
|
||||
|
@ -215,12 +217,14 @@ struct Peer : NonCopyable {
|
|||
ReliablePacketList reliable;
|
||||
AsyncTrigger dataToSend; // Triggered when unsent.empty() becomes false
|
||||
Future<Void> connect;
|
||||
AsyncVar<bool> incompatibleDataRead;
|
||||
bool compatible;
|
||||
bool outgoingConnectionIdle; // We don't actually have a connection open and aren't trying to open one because we don't have anything to send
|
||||
double lastConnectTime;
|
||||
double reconnectionDelay;
|
||||
|
||||
explicit Peer( TransportData* transport, NetworkAddress const& destination, bool doConnect = true )
|
||||
: transport(transport), destination(destination), outgoingConnectionIdle(!doConnect), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME)
|
||||
: transport(transport), destination(destination), outgoingConnectionIdle(!doConnect), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true)
|
||||
{
|
||||
if(doConnect) {
|
||||
connect = connectionKeeper(this);
|
||||
|
@ -293,8 +297,7 @@ struct Peer : NonCopyable {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> connectionMonitor( Peer* peer ) {
|
||||
|
||||
ACTOR static Future<Void> connectionMonitor( Peer *peer ) {
|
||||
state RequestStream< ReplyPromise<Void> > remotePing( Endpoint( peer->destination, WLTOKEN_PING_PACKET ) );
|
||||
|
||||
loop {
|
||||
|
@ -305,9 +308,11 @@ struct Peer : NonCopyable {
|
|||
state ReplyPromise<Void> reply;
|
||||
FlowTransport::transport().sendUnreliable( SerializeSource<ReplyPromise<Void>>(reply), remotePing.getEndpoint() );
|
||||
|
||||
peer->incompatibleDataRead.set(false);
|
||||
choose {
|
||||
when (Void _ = wait( delay( FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT ) )) { TraceEvent("ConnectionTimeout").detail("WithAddr", peer->destination); throw connection_failed(); }
|
||||
when (Void _ = wait( reply.getFuture() )) {}
|
||||
when (Void _ = wait( peer->incompatibleDataRead.onChange())) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -364,18 +369,17 @@ struct Peer : NonCopyable {
|
|||
Reference<IConnection> _conn = wait( timeout( INetworkConnections::net()->connect(self->destination), FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT, Reference<IConnection>() ) );
|
||||
if (_conn) {
|
||||
conn = _conn;
|
||||
TraceEvent("ConnEstablishedTo", conn->getDebugID()).detail("PeerAddr", self->destination);
|
||||
TraceEvent("ConnectionExchangingConnectPacket", conn->getDebugID()).detail("PeerAddr", self->destination);
|
||||
self->prependConnectPacket();
|
||||
} else {
|
||||
TraceEvent("ConnTimedOut", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination);
|
||||
TraceEvent("ConnectionTimedOut", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination);
|
||||
throw connection_failed();
|
||||
}
|
||||
|
||||
reader = connectionReader( self->transport, conn, true, Promise<NetworkAddress>() );
|
||||
reader = connectionReader( self->transport, conn, self, Promise<Peer*>());
|
||||
} else {
|
||||
self->outgoingConnectionIdle = false;
|
||||
}
|
||||
self->transport->countConnEstablished++;
|
||||
|
||||
Void _ = wait( connectionWriter( self, conn ) || reader || connectionMonitor(self) );
|
||||
|
||||
|
@ -389,12 +393,17 @@ struct Peer : NonCopyable {
|
|||
self->discardUnreliablePackets();
|
||||
reader = Future<Void>();
|
||||
bool ok = e.code() == error_code_connection_failed || e.code() == error_code_actor_cancelled || ( g_network->isSimulated() && e.code() == error_code_checksum_failed );
|
||||
TraceEvent(ok ? SevInfo : SevError, "ConnectionClosed", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination).error(e, true);
|
||||
|
||||
if (ok)
|
||||
self->transport->countConnClosedWithoutError++;
|
||||
else
|
||||
self->transport->countConnClosedWithError++;
|
||||
if(self->compatible) {
|
||||
TraceEvent(ok ? SevInfo : SevError, "ConnectionClosed", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination).error(e, true);
|
||||
if (ok)
|
||||
self->transport->countConnClosedWithoutError++;
|
||||
else
|
||||
self->transport->countConnClosedWithError++;
|
||||
}
|
||||
else {
|
||||
TraceEvent(ok ? SevInfo : SevError, "IncompatibleConnectionClosed", conn ? conn->getDebugID() : UID()).detail("PeerAddr", self->destination).error(e, true);
|
||||
}
|
||||
|
||||
if (conn) {
|
||||
conn->close();
|
||||
|
@ -408,8 +417,6 @@ struct Peer : NonCopyable {
|
|||
}
|
||||
};
|
||||
|
||||
static PacketID sendPacket( TransportData* self, ISerializeSource const& what, const Endpoint& destination, bool reliable );
|
||||
|
||||
ACTOR static void deliver( TransportData* self, Endpoint destination, ArenaReader reader, bool inReadSocket ) {
|
||||
int priority = self->endpoints.getPriority(destination.token);
|
||||
if (priority < TaskReadSocket || !inReadSocket) {
|
||||
|
@ -533,7 +540,9 @@ static void scanPackets( TransportData* transport, uint8_t*& unprocessed_begin,
|
|||
ACTOR static Future<Void> connectionReader(
|
||||
TransportData* transport,
|
||||
Reference<IConnection> conn,
|
||||
bool isOutgoing, Promise<NetworkAddress> onPeerAddress ) {
|
||||
Peer *peer,
|
||||
Promise<Peer*> onConnected)
|
||||
{
|
||||
// This actor exists whenever there is an open or opening connection, whether incoming or outgoing
|
||||
// For incoming connections conn is set and peer is initially NULL; for outgoing connections it is the reverse
|
||||
|
||||
|
@ -542,12 +551,14 @@ ACTOR static Future<Void> connectionReader(
|
|||
state uint8_t* unprocessed_end = NULL;
|
||||
state uint8_t* buffer_end = NULL;
|
||||
state bool expectConnectPacket = true;
|
||||
state bool compatible = false;
|
||||
state NetworkAddress peerAddress;
|
||||
state uint64_t peerProtocolVersion = 0;
|
||||
|
||||
peerAddress = conn->getPeerAddress();
|
||||
if (!isOutgoing)
|
||||
if (peer == nullptr) {
|
||||
ASSERT( !peerAddress.isPublic() );
|
||||
}
|
||||
|
||||
loop {
|
||||
loop {
|
||||
|
@ -592,7 +603,8 @@ ACTOR static Future<Void> connectionReader(
|
|||
.detail("LocalVersion", currentProtocolVersion)
|
||||
.detail("RejectedVersion", p->protocolVersion)
|
||||
.detail("VersionMask", compatibleProtocolVersionMask)
|
||||
.detail("Peer", p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress());
|
||||
.detail("Peer", p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress())
|
||||
.detail("ConnectionId", connectionId);
|
||||
transport->lastIncompatibleMessage = now();
|
||||
}
|
||||
if(!transport->incompatiblePeers.count(addr)) {
|
||||
|
@ -601,7 +613,20 @@ ACTOR static Future<Void> connectionReader(
|
|||
} else if(connectionId > 1) {
|
||||
transport->multiVersionConnections[connectionId] = now() + FLOW_KNOBS->CONNECTION_ID_TIMEOUT;
|
||||
}
|
||||
throw incompatible_protocol_version();
|
||||
|
||||
compatible = false;
|
||||
if(p->protocolVersion < 0x0FDB00A551000000LL) {
|
||||
// Older versions expected us to hang up. It may work even if we don't hang up here, but it's safer to keep the old behavior.
|
||||
throw incompatible_protocol_version();
|
||||
}
|
||||
}
|
||||
else {
|
||||
compatible = true;
|
||||
TraceEvent("ConnectionEstablished", conn->getDebugID())
|
||||
.detail("Peer", conn->getPeerAddress())
|
||||
.detail("ConnectionId", connectionId);
|
||||
|
||||
transport->countConnEstablished++;
|
||||
}
|
||||
|
||||
if(connectionId > 1) {
|
||||
|
@ -611,21 +636,29 @@ ACTOR static Future<Void> connectionReader(
|
|||
expectConnectPacket = false;
|
||||
|
||||
peerProtocolVersion = p->protocolVersion;
|
||||
if (isOutgoing) {
|
||||
if (peer != nullptr) {
|
||||
// Outgoing connection; port information should be what we expect
|
||||
TraceEvent("ConnectedOutgoing").detail("PeerAddr", NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) );
|
||||
peer->compatible = compatible;
|
||||
ASSERT( p->canonicalRemotePort == peerAddress.port );
|
||||
} else {
|
||||
if (p->canonicalRemotePort) {
|
||||
peerAddress = NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort, true, peerAddress.isTLS() );
|
||||
}
|
||||
onPeerAddress.send( peerAddress );
|
||||
peer = transport->getPeer(peerAddress);
|
||||
peer->compatible = compatible;
|
||||
onConnected.send( peer );
|
||||
Void _ = wait( delay(0) ); // Check for cancellation
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!expectConnectPacket)
|
||||
if (compatible) {
|
||||
scanPackets( transport, unprocessed_begin, unprocessed_end, arena, peerAddress, peerProtocolVersion );
|
||||
}
|
||||
else if(!expectConnectPacket) {
|
||||
unprocessed_begin = unprocessed_end;
|
||||
peer->incompatibleDataRead.set(true);
|
||||
}
|
||||
|
||||
if (readWillBlock)
|
||||
break;
|
||||
|
@ -640,12 +673,11 @@ ACTOR static Future<Void> connectionReader(
|
|||
|
||||
ACTOR static Future<Void> connectionIncoming( TransportData* self, Reference<IConnection> conn ) {
|
||||
try {
|
||||
state Promise<NetworkAddress> onPeerAddress;
|
||||
state Future<Void> reader = connectionReader( self, conn, false, onPeerAddress );
|
||||
state Promise<Peer*> onConnected;
|
||||
state Future<Void> reader = connectionReader( self, conn, nullptr, onConnected );
|
||||
choose {
|
||||
when( Void _ = wait( reader ) ) { ASSERT(false); return Void(); }
|
||||
when( NetworkAddress pa = wait( onPeerAddress.getFuture() ) ) {
|
||||
Peer* p = self->getPeer( pa, false );
|
||||
when( Peer *p = wait( onConnected.getFuture() ) ) {
|
||||
p->onIncomingConnection( conn, reader );
|
||||
}
|
||||
when( Void _ = wait( delayJittered(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT) ) ) {
|
||||
|
@ -785,9 +817,9 @@ static PacketID sendPacket( TransportData* self, ISerializeSource const& what, c
|
|||
|
||||
Peer* peer = self->getPeer(destination.address);
|
||||
|
||||
// If there isn't an open connection or public address, we can't send
|
||||
if (peer->outgoingConnectionIdle && !destination.address.isPublic()) {
|
||||
TEST(true); // Can't send to private address without an open connection
|
||||
// If there isn't an open connection, a public address, or the peer isn't compatible, we can't send
|
||||
if ((peer->outgoingConnectionIdle && !destination.address.isPublic()) || (!peer->compatible && destination.token != WLTOKEN_PING_PACKET)) {
|
||||
TEST(true); // Can't send to private address without a compatible open connection
|
||||
return (PacketID)NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,25 +27,36 @@
|
|||
|
||||
IAsyncFile::IAsyncFile(){};
|
||||
|
||||
ACTOR static Future<Void> incrementalDeleteHelper( std::string filename, int64_t truncateAmt, double interval ){
|
||||
ACTOR static Future<Void> incrementalDeleteHelper( std::string filename, bool mustBeDurable, int64_t truncateAmt, double interval ) {
|
||||
state Reference<IAsyncFile> file;
|
||||
state int64_t remainingFileSize;
|
||||
state bool exists = fileExists(filename);
|
||||
|
||||
state Reference<IAsyncFile> f = wait(
|
||||
IAsyncFileSystem::filesystem()->open(filename, IAsyncFile::OPEN_READWRITE, 0));
|
||||
state int64_t filesize = wait(f->size());
|
||||
state int64_t i = filesize;
|
||||
if(exists) {
|
||||
Reference<IAsyncFile> f = wait(IAsyncFileSystem::filesystem()->open(filename, IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_UNCACHED, 0));
|
||||
file = f;
|
||||
|
||||
Void _ = wait(IAsyncFileSystem::filesystem()->deleteFile(filename, true));
|
||||
for( ;i > 0; i -= truncateAmt ){
|
||||
Void _ = wait(f->truncate(i));
|
||||
Void _ = wait(f->sync());
|
||||
Void _ = wait(delay(interval));
|
||||
int64_t fileSize = wait(file->size());
|
||||
remainingFileSize = fileSize;
|
||||
}
|
||||
|
||||
Void _ = wait(IAsyncFileSystem::filesystem()->deleteFile(filename, mustBeDurable));
|
||||
|
||||
if(exists) {
|
||||
for( ; remainingFileSize > 0; remainingFileSize -= truncateAmt ){
|
||||
Void _ = wait(file->truncate(remainingFileSize));
|
||||
Void _ = wait(file->sync());
|
||||
Void _ = wait(delay(interval));
|
||||
}
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> IAsyncFile::incrementalDelete( std::string filename){
|
||||
Future<Void> IAsyncFile::incrementalDelete( std::string filename, bool mustBeDurable ) {
|
||||
return uncancellable(incrementalDeleteHelper(
|
||||
filename,
|
||||
mustBeDurable,
|
||||
FLOW_KNOBS->INCREMENTAL_DELETE_TRUNCATE_AMOUNT,
|
||||
FLOW_KNOBS->INCREMENTAL_DELETE_INTERVAL));
|
||||
}
|
||||
|
@ -63,6 +74,6 @@ TEST_CASE( "fileio/incrementalDelete" ) {
|
|||
Void _ = wait(f->truncate(fileSize));
|
||||
//close the file by deleting the reference
|
||||
f.clear();
|
||||
Void _ = wait(IAsyncFile::incrementalDelete(filename));
|
||||
Void _ = wait(IAsyncFile::incrementalDelete(filename, true));
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -57,8 +57,10 @@ public:
|
|||
virtual Future<Void> flush() { return Void(); } // Sends previous writes to the OS if they have been buffered in memory, but does not make them power safe
|
||||
virtual Future<int64_t> size() = 0;
|
||||
virtual std::string getFilename() = 0;
|
||||
//start an actor to truncate the file repeatedly so that the operating system doesn't delete it all at once
|
||||
static Future<Void> incrementalDelete( std::string filename);
|
||||
|
||||
// Unlinks a file and then deletes it slowly by truncating the file repeatedly.
|
||||
// If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
|
||||
static Future<Void> incrementalDelete( std::string filename, bool mustBeDurable );
|
||||
|
||||
// Attempt to read the *length bytes at offset without copying. If successful, a pointer to the
|
||||
// requested bytes is written to *data, and the number of bytes successfully read is
|
||||
|
|
|
@ -204,6 +204,22 @@ Future< REPLY_TYPE(Request) > loadBalance(
|
|||
}
|
||||
}
|
||||
}
|
||||
if( nextMetric > 1e8 ) {
|
||||
for(int i=alternatives->countBest(); i<alternatives->size(); i++) {
|
||||
RequestStream<Request> const* thisStream = &alternatives->get( i, channel );
|
||||
if (!IFailureMonitor::failureMonitor().getState( thisStream->getEndpoint() ).failed) {
|
||||
auto& qd = model->getMeasurement(thisStream->getEndpoint().token.first());
|
||||
double thisMetric = qd.smoothOutstanding.smoothTotal();
|
||||
double thisTime = qd.latency;
|
||||
|
||||
if( thisMetric < nextMetric ) {
|
||||
nextAlt = i;
|
||||
nextMetric = thisMetric;
|
||||
nextTime = thisTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(nextTime < 1e9) {
|
||||
if(bestTime > FLOW_KNOBS->INSTANT_SECOND_REQUEST_MULTIPLIER*(model->secondMultiplier*(nextTime) + FLOW_KNOBS->BASE_SECOND_REQUEST_TIME)) {
|
||||
|
|
|
@ -89,15 +89,11 @@ void eraseDirectoryRecursive( std::string const& dir ) {
|
|||
INJECT_FAULT( platform_error, "eraseDirectoryRecursive" );
|
||||
#ifdef _WIN32
|
||||
system( ("rd /s /q \"" + dir + "\"").c_str() );
|
||||
#elif defined(__linux__)
|
||||
#elif defined(__linux__) || defined(__APPLE__)
|
||||
int error =
|
||||
nftw(dir.c_str(),
|
||||
[](const char *fpath, const struct stat *sb, int typeflag,
|
||||
struct FTW *ftwbuf) -> int {
|
||||
if (remove(fpath))
|
||||
return FTW_STOP;
|
||||
return FTW_CONTINUE;
|
||||
}, 64, FTW_DEPTH | FTW_PHYS | FTW_ACTIONRETVAL);
|
||||
[](const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftwbuf) -> int { return remove(fpath); }
|
||||
, 64, FTW_DEPTH | FTW_PHYS);
|
||||
/* Looks like calling code expects this to continue silently if
|
||||
the directory we're deleting doesn't exist in the first
|
||||
place */
|
||||
|
@ -105,14 +101,6 @@ void eraseDirectoryRecursive( std::string const& dir ) {
|
|||
TraceEvent(SevError, "nftw").detail("Directory", dir).GetLastError();
|
||||
throw platform_error();
|
||||
}
|
||||
#elif defined(__APPLE__)
|
||||
// const char* argv[2];
|
||||
// argv[0] = dir.c_str();
|
||||
// argv[1] = NULL;
|
||||
// FTS* fts = fts_open(argv, FTS_PHYSICAL | FTS_SEEDOT | FTS_NOSTAT, NULL);
|
||||
// while (FTSENT* ent = fts_read(fts)) {
|
||||
// if (ent->fts_info
|
||||
// }
|
||||
#else
|
||||
#error Port me!
|
||||
#endif
|
||||
|
|
|
@ -60,7 +60,7 @@ bool IReplicationPolicy::validateFull(
|
|||
}
|
||||
else if (validate(fromServers->getGroupEntries(), fromServers)) {
|
||||
if (g_replicationdebug > 2) {
|
||||
printf("Error: Validated unsolved policy with all%5lu servers\n", fromServers->size());
|
||||
printf("Error: Validated unsolved policy with all%5d servers\n", fromServers->size());
|
||||
}
|
||||
valid = false;
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ bool PolicyAcross::validate(
|
|||
count ++;
|
||||
}
|
||||
else if (g_replicationdebug > 4) {
|
||||
printf("Across invalid solution:%5lu key: %-7s value: (%3lu) %-10s policy: %-10s => %s\n", itValid.second.size(), _attribKey.c_str(), itValid.first._id, fromServers->valueText(itValid.first).c_str(), _policy->name().c_str(), _policy->info().c_str());
|
||||
printf("Across invalid solution:%5lu key: %-7s value: (%3d) %-10s policy: %-10s => %s\n", itValid.second.size(), _attribKey.c_str(), itValid.first._id, fromServers->valueText(itValid.first).c_str(), _policy->name().c_str(), _policy->info().c_str());
|
||||
if (g_replicationdebug > 5) {
|
||||
for (auto& entry : itValid.second) {
|
||||
printf(" entry: %s\n", fromServers->getEntryInfo(entry).c_str());
|
||||
|
|
|
@ -79,7 +79,7 @@ bool testFuzzActor( Future<int>(*actor)(FutureStream<int> const&, PromiseStream<
|
|||
}
|
||||
}
|
||||
if (outCount+1 != expectedOutput.size()) {
|
||||
printf("\tERROR: %s output length incorrect: %d != expected %d\n", desc, outCount+1, expectedOutput.size());
|
||||
printf("\tERROR: %s output length incorrect: %d != expected %lu\n", desc, outCount+1, expectedOutput.size());
|
||||
if (trial) printf("\t\tResult was inconsistent between runs!\n");
|
||||
ok = false;
|
||||
//return false;
|
||||
|
|
|
@ -65,6 +65,29 @@ bool simulator_should_inject_fault( const char* context, const char* file, int l
|
|||
return false;
|
||||
}
|
||||
|
||||
void ISimulator::displayWorkers() const
|
||||
{
|
||||
std::map<std::string, std::vector<ISimulator::ProcessInfo*>> zoneMap;
|
||||
|
||||
// Create a map of zone Id
|
||||
for (auto processInfo : getAllProcesses()) {
|
||||
std::string dataHall = processInfo->locality.dataHallId().present() ? processInfo->locality.dataHallId().get().printable() : "[unset]";
|
||||
std::string zoneId = processInfo->locality.zoneId().present() ? processInfo->locality.zoneId().get().printable() : "[unset]";
|
||||
zoneMap[format("%-8s %s", dataHall.c_str(), zoneId.c_str())].push_back(processInfo);
|
||||
}
|
||||
|
||||
printf("DataHall ZoneId\n");
|
||||
printf(" Address Name Class Excluded Failed Rebooting Role DataFolder\n");
|
||||
for (auto& zoneRecord : zoneMap) {
|
||||
printf("\n%s\n", zoneRecord.first.c_str());
|
||||
for (auto& processInfo : zoneRecord.second) {
|
||||
printf(" %9s %-10s%-13s%-8s %-6s %-9s %-48s %-40s\n",
|
||||
processInfo->address.toString().c_str(), processInfo->name, processInfo->startingClass.toString().c_str(), (processInfo->excluded ? "True" : "False"), (processInfo->failed ? "True" : "False"), (processInfo->rebooting ? "True" : "False"), getRoles(processInfo->address).c_str(), processInfo->dataFolder);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
namespace std {
|
||||
template<>
|
||||
|
@ -863,16 +886,29 @@ public:
|
|||
// This is a _rudimentary_ simulation of the untrustworthiness of non-durable deletes and the possibility of
|
||||
// rebooting during a durable one. It isn't perfect: for example, on real filesystems testing
|
||||
// for the existence of a non-durably deleted file BEFORE a reboot will show that it apparently doesn't exist.
|
||||
g_simulator.getCurrentProcess()->machine->openFiles.erase(filename);
|
||||
if(g_simulator.getCurrentProcess()->machine->openFiles.count(filename)) {
|
||||
g_simulator.getCurrentProcess()->machine->openFiles.erase(filename);
|
||||
g_simulator.getCurrentProcess()->machine->deletingFiles.insert(filename);
|
||||
}
|
||||
if ( mustBeDurable || g_random->random01() < 0.5 ) {
|
||||
Void _ = wait( ::delay(0.05 * g_random->random01()) );
|
||||
if (!self->getCurrentProcess()->rebooting) {
|
||||
auto f = IAsyncFileSystem::filesystem(self->net2)->deleteFile(filename, false);
|
||||
ASSERT( f.isReady() );
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
Void _ = wait( g_simulator.onMachine( currentProcess ) );
|
||||
try {
|
||||
Void _ = wait( ::delay(0.05 * g_random->random01()) );
|
||||
TEST( true ); // Simulated durable delete
|
||||
if (!currentProcess->rebooting) {
|
||||
auto f = IAsyncFileSystem::filesystem(self->net2)->deleteFile(filename, false);
|
||||
ASSERT( f.isReady() );
|
||||
Void _ = wait( ::delay(0.05 * g_random->random01()) );
|
||||
TEST( true ); // Simulated durable delete
|
||||
}
|
||||
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
|
||||
return Void();
|
||||
} catch( Error &e ) {
|
||||
state Error err = e;
|
||||
Void _ = wait( g_simulator.onProcess( currentProcess, currentTaskID ) );
|
||||
throw err;
|
||||
}
|
||||
return Void();
|
||||
} else {
|
||||
TEST( true ); // Simulated non-durable delete
|
||||
return Void();
|
||||
|
@ -965,8 +1001,11 @@ public:
|
|||
for (auto processInfo : getAllProcesses()) {
|
||||
// Add non-test processes (ie. datahall is not be set for test processes)
|
||||
if (processInfo->isAvailableClass()) {
|
||||
// Ignore excluded machines
|
||||
if (processInfo->excluded)
|
||||
;
|
||||
// Mark all of the unavailable as dead
|
||||
if (!processInfo->isAvailable())
|
||||
else if (!processInfo->isAvailable())
|
||||
processesDead.push_back(processInfo);
|
||||
else if (protectedAddresses.count(processInfo->address))
|
||||
processesLeft.push_back(processInfo);
|
||||
|
@ -1020,22 +1059,22 @@ public:
|
|||
}
|
||||
// Reboot and Delete if remaining machines do NOT fulfill policies
|
||||
else if ((kt != RebootAndDelete) && (kt != RebootProcessAndDelete) && (!processesLeft.validate(tLogPolicy))) {
|
||||
auto newKt = (g_random->random01() < 0.33) ? RebootAndDelete : Reboot;
|
||||
newKt = (g_random->random01() < 0.33) ? RebootAndDelete : Reboot;
|
||||
canSurvive = false;
|
||||
TraceEvent("KillChanged").detail("KillType", kt).detail("NewKillType", newKt).detail("tLogPolicy", tLogPolicy->info()).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("RemainingZones", ::describeZones(localitiesLeft)).detail("RemainingDataHalls", ::describeDataHalls(localitiesLeft)).detail("Reason", "tLogPolicy does not validates against remaining processes.");
|
||||
}
|
||||
else if ((kt != RebootAndDelete) && (kt != RebootProcessAndDelete) && (!processesLeft.validate(storagePolicy))) {
|
||||
auto newKt = (g_random->random01() < 0.33) ? RebootAndDelete : Reboot;
|
||||
newKt = (g_random->random01() < 0.33) ? RebootAndDelete : Reboot;
|
||||
canSurvive = false;
|
||||
TraceEvent("KillChanged").detail("KillType", kt).detail("NewKillType", newKt).detail("storagePolicy", storagePolicy->info()).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("RemainingZones", ::describeZones(localitiesLeft)).detail("RemainingDataHalls", ::describeDataHalls(localitiesLeft)).detail("Reason", "storagePolicy does not validates against remaining processes.");
|
||||
}
|
||||
else if ((kt != RebootAndDelete) && (kt != RebootProcessAndDelete) && (nQuorum > uniqueMachines.size())) {
|
||||
auto newKt = (g_random->random01() < 0.33) ? RebootAndDelete : Reboot;
|
||||
newKt = (g_random->random01() < 0.33) ? RebootAndDelete : Reboot;
|
||||
canSurvive = false;
|
||||
TraceEvent("KillChanged").detail("KillType", kt).detail("NewKillType", newKt).detail("storagePolicy", storagePolicy->info()).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("RemainingZones", ::describeZones(localitiesLeft)).detail("RemainingDataHalls", ::describeDataHalls(localitiesLeft)).detail("Quorum", nQuorum).detail("Machines", uniqueMachines.size()).detail("Reason", "Not enough unique machines to perform auto configuration of coordinators.");
|
||||
}
|
||||
else {
|
||||
TraceEvent("CanSurviveKills").detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("DeadZones", ::describeZones(localitiesDead)).detail("DeadDataHalls", ::describeDataHalls(localitiesDead)).detail("tLogPolicy", tLogPolicy->info()).detail("storagePolicy", storagePolicy->info()).detail("Quorum", nQuorum).detail("Machines", uniqueMachines.size()).detail("ZonesLeft", ::describeZones(localitiesLeft)).detail("ValidateRemaining", processesLeft.validate(tLogPolicy));
|
||||
TraceEvent("CanSurviveKills").detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("DeadZones", ::describeZones(localitiesDead)).detail("DeadDataHalls", ::describeDataHalls(localitiesDead)).detail("tLogPolicy", tLogPolicy->info()).detail("storagePolicy", storagePolicy->info()).detail("Quorum", nQuorum).detail("Machines", uniqueMachines.size()).detail("ZonesLeft", ::describeZones(localitiesLeft)).detail("DataHallsLeft", ::describeDataHalls(localitiesLeft)).detail("ValidateRemaining", processesLeft.validate(tLogPolicy));
|
||||
}
|
||||
}
|
||||
if (newKillType) *newKillType = newKt;
|
||||
|
@ -1059,12 +1098,12 @@ public:
|
|||
TEST( kt == InjectFaults ); // Simulated machine was killed with faults
|
||||
|
||||
if (kt == KillInstantly) {
|
||||
TraceEvent(SevWarn, "FailMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", describe(*machine)).detail("Rebooting", machine->rebooting).backtrace();
|
||||
TraceEvent(SevWarn, "FailMachine", machine->locality.zoneId()).detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", describe(*machine)).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||
// This will remove all the "tracked" messages that came from the machine being killed
|
||||
latestEventCache.clear();
|
||||
machine->failed = true;
|
||||
} else if (kt == InjectFaults) {
|
||||
TraceEvent(SevWarn, "FaultMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", describe(*machine)).detail("Rebooting", machine->rebooting).backtrace();
|
||||
TraceEvent(SevWarn, "FaultMachine", machine->locality.zoneId()).detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", describe(*machine)).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||
should_inject_fault = simulator_should_inject_fault;
|
||||
machine->fault_injection_r = g_random->randomUniqueID().first();
|
||||
machine->fault_injection_p1 = 0.1;
|
||||
|
@ -1075,8 +1114,10 @@ public:
|
|||
ASSERT(!protectedAddresses.count(machine->address) || machine->rebooting);
|
||||
}
|
||||
virtual void rebootProcess( ProcessInfo* process, KillType kt ) {
|
||||
if( kt == RebootProcessAndDelete && protectedAddresses.count(process->address) )
|
||||
if( kt == RebootProcessAndDelete && protectedAddresses.count(process->address) ) {
|
||||
TraceEvent("RebootChanged").detail("ZoneId", process->locality.describeZone()).detail("KillType", RebootProcess).detail("OrigKillType", kt).detail("Reason", "Protected process");
|
||||
kt = RebootProcess;
|
||||
}
|
||||
doReboot( process, kt );
|
||||
}
|
||||
virtual void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses ) {
|
||||
|
@ -1121,6 +1162,7 @@ public:
|
|||
TEST(kt == InjectFaults); // Trying to kill by injecting faults
|
||||
|
||||
if(speedUpSimulation && !forceKill) {
|
||||
TraceEvent(SevWarn, "AbortedKill", zoneId).detailext("ZoneId", zoneId).detail("Reason", "Unforced kill within speedy simulation.").backtrace();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1145,15 +1187,25 @@ public:
|
|||
if ((kt == KillInstantly) || (kt == InjectFaults) || (kt == RebootAndDelete) || (kt == RebootProcessAndDelete))
|
||||
{
|
||||
std::vector<ProcessInfo*> processesLeft, processesDead;
|
||||
int protectedWorker = 0, unavailable = 0, excluded = 0;
|
||||
|
||||
for (auto machineRec : machines) {
|
||||
for (auto processInfo : machineRec.second.processes) {
|
||||
// Add non-test processes (ie. datahall is not be set for test processes)
|
||||
if (processInfo->isAvailableClass()) {
|
||||
if (!processInfo->isAvailable())
|
||||
// Do not include any excluded machines
|
||||
if (processInfo->excluded) {
|
||||
processesDead.push_back(processInfo);
|
||||
else if (protectedAddresses.count(processInfo->address))
|
||||
excluded ++;
|
||||
}
|
||||
else if (!processInfo->isAvailable()) {
|
||||
processesDead.push_back(processInfo);
|
||||
unavailable ++;
|
||||
}
|
||||
else if (protectedAddresses.count(processInfo->address)) {
|
||||
processesLeft.push_back(processInfo);
|
||||
protectedWorker ++;
|
||||
}
|
||||
else if (machineRec.second.zoneId != zoneId)
|
||||
processesLeft.push_back(processInfo);
|
||||
// Add processes from dead machines and datacenter machines to dead group
|
||||
|
@ -1166,7 +1218,7 @@ public:
|
|||
if ((kt != Reboot) && (!killIsSafe)) {
|
||||
kt = Reboot;
|
||||
}
|
||||
TraceEvent("ChangedKillMachine", zoneId).detailext("ZoneId", zoneId).detail("KillType", kt).detail("OrigKillType", ktOrig).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("processesPerMachine", processesPerMachine).detail("tLogPolicy", tLogPolicy->info()).detail("storagePolicy", storagePolicy->info());
|
||||
TraceEvent("ChangedKillMachine", zoneId).detailext("ZoneId", zoneId).detail("KillType", kt).detail("OrigKillType", ktOrig).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("processesPerMachine", processesPerMachine).detail("Protected", protectedWorker).detail("Unavailable", unavailable).detail("Excluded", excluded).detail("ProtectedTotal", protectedAddresses.size()).detail("tLogPolicy", tLogPolicy->info()).detail("storagePolicy", storagePolicy->info());
|
||||
}
|
||||
else if ((kt == KillInstantly) || (kt == InjectFaults)) {
|
||||
TraceEvent("DeadMachine", zoneId).detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("processesPerMachine", processesPerMachine).detail("tLogPolicy", tLogPolicy->info()).detail("storagePolicy", storagePolicy->info());
|
||||
|
@ -1193,31 +1245,30 @@ public:
|
|||
// Check if any processes on machine are rebooting
|
||||
if( processesOnMachine != processesPerMachine && kt >= RebootAndDelete ) {
|
||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||
TraceEvent(SevWarn, "AbortedReboot", zoneId).detailext("ZoneId", zoneId).detail("Reason", "The target did not have all of its processes running.").detail("processes", processesOnMachine).detail("processesPerMachine", processesPerMachine).backtrace();
|
||||
TraceEvent(SevWarn, "AbortedKill", zoneId).detail("KillType", kt).detailext("ZoneId", zoneId).detail("Reason", "Machine processes does not match number of processes per machine").detail("processes", processesOnMachine).detail("processesPerMachine", processesPerMachine).backtrace();
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if any processes on machine are rebooting
|
||||
if ( processesOnMachine != processesPerMachine) {
|
||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||
TraceEvent(SevWarn, "AbortedKill", zoneId).detailext("ZoneId", zoneId).detail("Reason", "The target did not have all of its processes running.").detail("processes", processesOnMachine).detail("processesPerMachine", processesPerMachine).backtrace();
|
||||
TraceEvent(SevWarn, "AbortedKill", zoneId).detail("KillType", kt).detailext("ZoneId", zoneId).detail("Reason", "Machine processes does not match number of processes per machine").detail("processes", processesOnMachine).detail("processesPerMachine", processesPerMachine).backtrace();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
TraceEvent("KillMachine", zoneId).detailext("ZoneId", zoneId).detail("Kt", kt).detail("KtOrig", ktOrig).detail("KilledMachines", killedMachines).detail("KillableMachines", processesOnMachine).detail("ProcessPerMachine", processesPerMachine).detail("KillChanged", kt!=ktOrig).detail("killIsSafe", killIsSafe);
|
||||
if (kt < RebootAndDelete ) {
|
||||
if(kt == InjectFaults && machines[zoneId].machineProcess != nullptr)
|
||||
killProcess_internal( machines[zoneId].machineProcess, kt );
|
||||
for (auto& process : machines[zoneId].processes) {
|
||||
TraceEvent("KillMachineProcess", zoneId).detail("KillType", kt).detail("Process", process->toString()).detail("startingClass", process->startingClass.toString());
|
||||
TraceEvent("KillMachineProcess", zoneId).detail("KillType", kt).detail("Process", process->toString()).detail("startingClass", process->startingClass.toString()).detail("failed", process->failed).detail("excluded", process->excluded).detail("rebooting", process->rebooting);
|
||||
if (process->startingClass != ProcessClass::TesterClass)
|
||||
killProcess_internal( process, kt );
|
||||
}
|
||||
}
|
||||
else if ( kt == Reboot || killIsSafe) {
|
||||
for (auto& process : machines[zoneId].processes) {
|
||||
TraceEvent("KillMachineProcess", zoneId).detail("KillType", kt).detail("Process", process->toString()).detail("startingClass", process->startingClass.toString());
|
||||
TraceEvent("KillMachineProcess", zoneId).detail("KillType", kt).detail("Process", process->toString()).detail("startingClass", process->startingClass.toString()).detail("failed", process->failed).detail("excluded", process->excluded).detail("rebooting", process->rebooting);
|
||||
if (process->startingClass != ProcessClass::TesterClass)
|
||||
doReboot(process, kt );
|
||||
}
|
||||
|
@ -1233,13 +1284,16 @@ public:
|
|||
int dcProcesses = 0;
|
||||
|
||||
// Switch to a reboot, if anything protected on machine
|
||||
for (auto& process : processes) {
|
||||
auto processDcId = process->locality.dcId();
|
||||
auto processZoneId = process->locality.zoneId();
|
||||
for (auto& procRecord : processes) {
|
||||
auto processDcId = procRecord->locality.dcId();
|
||||
auto processZoneId = procRecord->locality.zoneId();
|
||||
ASSERT(processZoneId.present());
|
||||
if (processDcId.present() && (processDcId == dcId)) {
|
||||
if (protectedAddresses.count(process->address))
|
||||
if ((kt != Reboot) && (protectedAddresses.count(procRecord->address))) {
|
||||
kt = Reboot;
|
||||
TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig)
|
||||
.detail("Reason", "Datacenter has protected process").detail("ProcessAddress", procRecord->address).detail("failed", procRecord->failed).detail("rebooting", procRecord->rebooting).detail("excluded", procRecord->excluded).detail("Process", describe(*procRecord));
|
||||
}
|
||||
datacenterZones[processZoneId.get()] ++;
|
||||
dcProcesses ++;
|
||||
}
|
||||
|
@ -1254,7 +1308,9 @@ public:
|
|||
// Add non-test processes (ie. datahall is not be set for test processes)
|
||||
if (processInfo->isAvailableClass()) {
|
||||
// Mark all of the unavailable as dead
|
||||
if (!processInfo->isAvailable())
|
||||
if (processInfo->excluded)
|
||||
processesDead.push_back(processInfo);
|
||||
else if (!processInfo->isAvailable())
|
||||
processesDead.push_back(processInfo);
|
||||
else if (protectedAddresses.count(processInfo->address))
|
||||
processesLeft.push_back(processInfo);
|
||||
|
@ -1268,7 +1324,7 @@ public:
|
|||
}
|
||||
|
||||
if (!canKillProcesses(processesLeft, processesDead, kt, &kt)) {
|
||||
TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", ktOrig).detail("NewKillType", kt);
|
||||
TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig);
|
||||
}
|
||||
else {
|
||||
TraceEvent("DeadDataCenter").detailext("DataCenter", dcId).detail("KillType", kt).detail("DcZones", datacenterZones.size()).detail("DcProcesses", dcProcesses).detail("ProcessesDead", processesDead.size()).detail("ProcessesLeft", processesLeft.size()).detail("tLogPolicy", storagePolicy->info()).detail("storagePolicy", storagePolicy->info());
|
||||
|
@ -1283,10 +1339,13 @@ public:
|
|||
.detail("DcZones", datacenterZones.size())
|
||||
.detail("DcProcesses", dcProcesses)
|
||||
.detailext("DCID", dcId)
|
||||
.detail("KillType", kt);
|
||||
.detail("KillType", kt)
|
||||
.detail("OrigKillType", ktOrig);
|
||||
|
||||
for (auto& datacenterZone : datacenterZones)
|
||||
killMachine( datacenterZone.first, kt, (kt == RebootAndDelete), true);
|
||||
killMachine( datacenterZone.first, kt, (kt == RebootAndDelete), true);
|
||||
// ahm If above doesn't work, go conservative
|
||||
// killMachine( datacenterZone.first, kt, false, true);
|
||||
}
|
||||
virtual void clogInterface( uint32_t ip, double seconds, ClogMode mode = ClogDefault ) {
|
||||
if (mode == ClogDefault) {
|
||||
|
@ -1464,6 +1523,9 @@ static double networkLatency() {
|
|||
}
|
||||
|
||||
ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) {
|
||||
TraceEvent("RebootingProcessAttempt").detailext("ZoneId", p->locality.zoneId()).detail("KillType", kt).detail("Process", p->toString()).detail("startingClass", p->startingClass.toString()).detail("failed", p->failed).detail("excluded", p->excluded).detail("rebooting", p->rebooting).detail("TaskDefaultDelay", TaskDefaultDelay);
|
||||
// ASSERT(p->failed); //ahm
|
||||
|
||||
Void _ = wait( g_sim2.delay( 0, TaskDefaultDelay, p ) ); // Switch to the machine in question
|
||||
|
||||
try {
|
||||
|
@ -1476,7 +1538,7 @@ ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) {
|
|||
|
||||
if( p->rebooting )
|
||||
return;
|
||||
TraceEvent("RebootingMachine").detail("KillType", kt).detail("Address", p->address).detailext("ZoneId", p->locality.zoneId()).detailext("DataHall", p->locality.dataHallId()).detail("Locality", p->locality.toString());
|
||||
TraceEvent("RebootingProcess").detail("KillType", kt).detail("Address", p->address).detailext("ZoneId", p->locality.zoneId()).detailext("DataHall", p->locality.dataHallId()).detail("Locality", p->locality.toString()).detail("failed", p->failed).detail("excluded", p->excluded).backtrace();
|
||||
p->rebooting = true;
|
||||
p->shutdownSignal.send( kt );
|
||||
} catch (Error& e) {
|
||||
|
|
|
@ -109,6 +109,7 @@ public:
|
|||
ProcessInfo* machineProcess;
|
||||
std::vector<ProcessInfo*> processes;
|
||||
std::map<std::string, Future<Reference<IAsyncFile>>> openFiles;
|
||||
std::set<std::string> deletingFiles;
|
||||
std::set<std::string> closingFiles;
|
||||
Optional<Standalone<StringRef>> zoneId;
|
||||
|
||||
|
@ -149,18 +150,83 @@ public:
|
|||
//virtual KillType getMachineKillState( UID zoneID ) = 0;
|
||||
virtual bool canKillProcesses(std::vector<ProcessInfo*> const& availableProcesses, std::vector<ProcessInfo*> const& deadProcesses, KillType kt, KillType* newKillType) const = 0;
|
||||
virtual bool isAvailable() const = 0;
|
||||
virtual void displayWorkers() const;
|
||||
|
||||
virtual void addRole(NetworkAddress const& address, std::string const& role) {
|
||||
roleAddresses[address][role] ++;
|
||||
TraceEvent("RoleAdd").detail("Address", address).detail("Role", role).detail("Roles", roleAddresses[address].size()).detail("Value", roleAddresses[address][role]);
|
||||
}
|
||||
|
||||
virtual void removeRole(NetworkAddress const& address, std::string const& role) {
|
||||
auto addressIt = roleAddresses.find(address);
|
||||
if (addressIt != roleAddresses.end()) {
|
||||
auto rolesIt = addressIt->second.find(role);
|
||||
if (rolesIt != addressIt->second.end()) {
|
||||
if (rolesIt->second > 1) {
|
||||
rolesIt->second --;
|
||||
TraceEvent("RoleRemove").detail("Address", address).detail("Role", role).detail("Roles", addressIt->second.size()).detail("Value", rolesIt->second).detail("Result", "Decremented Role");
|
||||
}
|
||||
else {
|
||||
addressIt->second.erase(rolesIt);
|
||||
if (addressIt->second.size()) {
|
||||
TraceEvent("RoleRemove").detail("Address", address).detail("Role", role).detail("Roles", addressIt->second.size()).detail("Value", 0).detail("Result", "Removed Role");
|
||||
}
|
||||
else {
|
||||
roleAddresses.erase(addressIt);
|
||||
TraceEvent("RoleRemove").detail("Address", address).detail("Role", role).detail("Roles", 0).detail("Value", 0).detail("Result", "Removed Address");
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
TraceEvent(SevWarn,"RoleRemove").detail("Address", address).detail("Role", role).detail("Result", "Role Missing");
|
||||
}
|
||||
}
|
||||
else {
|
||||
TraceEvent(SevWarn,"RoleRemove").detail("Address", address).detail("Role", role).detail("Result", "Address Missing");
|
||||
}
|
||||
}
|
||||
|
||||
virtual std::string getRoles(NetworkAddress const& address, bool skipWorkers = true) const {
|
||||
auto addressIt = roleAddresses.find(address);
|
||||
std::string roleText;
|
||||
if (addressIt != roleAddresses.end()) {
|
||||
for (auto& roleIt : addressIt->second) {
|
||||
if ((!skipWorkers) || (roleIt.first != "Worker"))
|
||||
roleText += roleIt.first + ((roleIt.second > 1) ? format("-%d ", roleIt.second) : " ");
|
||||
}
|
||||
}
|
||||
if (roleText.empty())
|
||||
roleText = "[unset]";
|
||||
return roleText;
|
||||
}
|
||||
|
||||
virtual void excludeAddress(NetworkAddress const& address) {
|
||||
excludedAddresses.insert(address);
|
||||
excludedAddresses[address]++;
|
||||
TraceEvent("ExcludeAddress").detail("Address", address).detail("Value", excludedAddresses[address]);
|
||||
}
|
||||
|
||||
virtual void includeAddress(NetworkAddress const& address) {
|
||||
excludedAddresses.erase(address);
|
||||
auto addressIt = excludedAddresses.find(address);
|
||||
if (addressIt != excludedAddresses.end()) {
|
||||
if (addressIt->second > 1) {
|
||||
addressIt->second --;
|
||||
TraceEvent("IncludeAddress").detail("Address", address).detail("Value", addressIt->second).detail("Result", "Decremented");
|
||||
}
|
||||
else {
|
||||
excludedAddresses.erase(addressIt);
|
||||
TraceEvent("IncludeAddress").detail("Address", address).detail("Value", 0).detail("Result", "Removed");
|
||||
}
|
||||
}
|
||||
else {
|
||||
TraceEvent(SevWarn,"IncludeAddress").detail("Address", address).detail("Result", "Missing");
|
||||
}
|
||||
}
|
||||
virtual void includeAllAddresses() {
|
||||
TraceEvent("IncludeAddressAll").detail("AddressTotal", excludedAddresses.size());
|
||||
excludedAddresses.clear();
|
||||
}
|
||||
virtual bool isExcluded(NetworkAddress const& address) const {
|
||||
return excludedAddresses.count(address) == 0;
|
||||
return excludedAddresses.find(address) != excludedAddresses.end();
|
||||
}
|
||||
|
||||
virtual void disableSwapToMachine(Optional<Standalone<StringRef>> zoneId ) {
|
||||
|
@ -229,7 +295,8 @@ protected:
|
|||
|
||||
private:
|
||||
std::set<Optional<Standalone<StringRef>>> swapsDisabled;
|
||||
std::set<NetworkAddress> excludedAddresses;
|
||||
std::map<NetworkAddress, int> excludedAddresses;
|
||||
std::map<NetworkAddress, std::map<std::string, int>> roleAddresses;
|
||||
bool allSwapsDisabled;
|
||||
};
|
||||
|
||||
|
|
|
@ -25,10 +25,10 @@
|
|||
#include "fdbclient/MutationList.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/BackupAgent.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "IKeyValueStore.h"
|
||||
#include "LogSystem.h"
|
||||
#include "LogProtocolMessage.h"
|
||||
#include "flow/Notified.h"
|
||||
|
||||
static bool isMetadataMutation(MutationRef const& m) {
|
||||
// FIXME: This is conservative - not everything in system keyspace is necessarily processed by applyMetadataMutations
|
||||
|
|
|
@ -152,13 +152,13 @@ public:
|
|||
|
||||
std::pair<WorkerInterface, ProcessClass> getStorageWorker( RecruitStorageRequest const& req ) {
|
||||
std::set<Optional<Standalone<StringRef>>> excludedMachines( req.excludeMachines.begin(), req.excludeMachines.end() );
|
||||
std::set<Optional<Standalone<StringRef>>> includeDCs( req.includeDCs.begin(), req.includeDCs.end() );
|
||||
std::set<Optional<Standalone<StringRef>>> excludedDCs( req.excludeDCs.begin(), req.excludeDCs.end() );
|
||||
std::set<AddressExclusion> excludedAddresses( req.excludeAddresses.begin(), req.excludeAddresses.end() );
|
||||
|
||||
for( auto& it : id_worker )
|
||||
if( workerAvailable( it.second, false ) &&
|
||||
!excludedMachines.count(it.second.interf.locality.zoneId()) &&
|
||||
( includeDCs.size() == 0 || includeDCs.count(it.second.interf.locality.dcId()) ) &&
|
||||
!excludedDCs.count(it.second.interf.locality.dcId()) &&
|
||||
!addressExcluded(excludedAddresses, it.second.interf.address()) &&
|
||||
it.second.processClass.machineClassFitness( ProcessClass::Storage ) <= ProcessClass::UnsetFit ) {
|
||||
return std::make_pair(it.second.interf, it.second.processClass);
|
||||
|
@ -171,7 +171,7 @@ public:
|
|||
ProcessClass::Fitness fit = it.second.processClass.machineClassFitness( ProcessClass::Storage );
|
||||
if( workerAvailable( it.second, false ) &&
|
||||
!excludedMachines.count(it.second.interf.locality.zoneId()) &&
|
||||
( includeDCs.size() == 0 || includeDCs.count(it.second.interf.locality.dcId()) ) &&
|
||||
!excludedDCs.count(it.second.interf.locality.dcId()) &&
|
||||
!addressExcluded(excludedAddresses, it.second.interf.address()) &&
|
||||
fit < bestFit ) {
|
||||
bestFit = fit;
|
||||
|
@ -211,26 +211,27 @@ public:
|
|||
throw no_more_servers();
|
||||
}
|
||||
|
||||
std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( DatabaseConfiguration const& conf, std::map< Optional<Standalone<StringRef>>, int>& id_used, bool checkStable = false, std::set<Optional<Key>> dcIds = std::set<Optional<Key>>(), std::set<NetworkAddress> additionalExlusions = std::set<NetworkAddress>() )
|
||||
std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogsAcrossDatacenters( DatabaseConfiguration const& conf, std::map< Optional<Standalone<StringRef>>, int>& id_used, bool checkStable = false )
|
||||
{
|
||||
std::map<ProcessClass::Fitness, vector<std::pair<WorkerInterface, ProcessClass>>> fitness_workers;
|
||||
std::vector<std::pair<WorkerInterface, ProcessClass>> results;
|
||||
std::vector<LocalityData> unavailableLocals;
|
||||
LocalitySetRef logServerSet;
|
||||
LocalityMap<std::pair<WorkerInterface, ProcessClass>>* logServerMap;
|
||||
bool bCompleted = false;
|
||||
std::vector<std::pair<WorkerInterface, ProcessClass>> results;
|
||||
std::vector<LocalityData> unavailableLocals;
|
||||
LocalitySetRef logServerSet;
|
||||
LocalityMap<std::pair<WorkerInterface, ProcessClass>>* logServerMap;
|
||||
UID functionId = g_nondeterministic_random->randomUniqueID();
|
||||
bool bCompleted = false;
|
||||
|
||||
logServerSet = Reference<LocalitySet>(new LocalityMap<std::pair<WorkerInterface, ProcessClass>>());
|
||||
logServerMap = (LocalityMap<std::pair<WorkerInterface, ProcessClass>>*) logServerSet.getPtr();
|
||||
|
||||
for( auto& it : id_worker ) {
|
||||
auto fitness = it.second.processClass.machineClassFitness( ProcessClass::TLog );
|
||||
if( workerAvailable(it.second, checkStable) && !conf.isExcludedServer(it.second.interf.address()) && !additionalExlusions.count(it.second.interf.address()) && fitness != ProcessClass::NeverAssign && (!dcIds.size() || dcIds.count(it.second.interf.locality.dcId())) ) {
|
||||
if( workerAvailable(it.second, checkStable) && !conf.isExcludedServer(it.second.interf.address()) && fitness != ProcessClass::NeverAssign ) {
|
||||
fitness_workers[ fitness ].push_back(std::make_pair(it.second.interf, it.second.processClass));
|
||||
}
|
||||
else {
|
||||
if (it.second.interf.locality.dataHallId().present())
|
||||
TraceEvent(SevWarn,"GWFTADNotAvailable", id)
|
||||
TraceEvent(SevWarn,"GWFTADNotAvailable", functionId)
|
||||
.detail("Fitness", fitness)
|
||||
.detailext("Zone", it.second.interf.locality.zoneId())
|
||||
.detailext("DataHall", it.second.interf.locality.dataHallId())
|
||||
|
@ -243,7 +244,8 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
.detail("Locality", it.second.interf.locality.toString())
|
||||
.detail("tLogReplicationFactor", conf.tLogReplicationFactor)
|
||||
.detail("tLogPolicy", conf.tLogPolicy ? conf.tLogPolicy->info() : "[unset]")
|
||||
.detail("DesiredLogs", conf.getDesiredLogs());
|
||||
.detail("DesiredLogs", conf.getDesiredLogs())
|
||||
.detail("InterfaceId", id);
|
||||
unavailableLocals.push_back(it.second.interf.locality);
|
||||
}
|
||||
}
|
||||
|
@ -258,16 +260,17 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
logServerMap->add(worker.first.locality, &worker);
|
||||
}
|
||||
if (logServerSet->size() < conf.tLogReplicationFactor) {
|
||||
TraceEvent(SevWarn,"GWFTADTooFew", id)
|
||||
TraceEvent(SevWarn,"GWFTADTooFew", functionId)
|
||||
.detail("Fitness", fitness)
|
||||
.detail("Processes", logServerSet->size())
|
||||
.detail("tLogReplicationFactor", conf.tLogReplicationFactor)
|
||||
.detail("tLogPolicy", conf.tLogPolicy ? conf.tLogPolicy->info() : "[unset]")
|
||||
.detail("DesiredLogs", conf.getDesiredLogs());
|
||||
.detail("DesiredLogs", conf.getDesiredLogs())
|
||||
.detail("InterfaceId", id);
|
||||
}
|
||||
else if (logServerSet->size() <= conf.getDesiredLogs()) {
|
||||
ASSERT(conf.tLogPolicy);
|
||||
if (logServerSet->validate(conf.tLogPolicy)) {
|
||||
if (logServerSet->validate(conf.tLogPolicy)) {
|
||||
for (auto& object : logServerMap->getObjects()) {
|
||||
results.push_back(*object);
|
||||
}
|
||||
|
@ -275,18 +278,19 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
break;
|
||||
}
|
||||
else {
|
||||
TraceEvent(SevWarn,"GWFTADNotAcceptable", id)
|
||||
TraceEvent(SevWarn,"GWFTADNotAcceptable", functionId)
|
||||
.detail("Fitness", fitness)
|
||||
.detail("Processes", logServerSet->size())
|
||||
.detail("tLogReplicationFactor", conf.tLogReplicationFactor)
|
||||
.detail("tLogPolicy", conf.tLogPolicy ? conf.tLogPolicy->info() : "[unset]")
|
||||
.detail("DesiredLogs", conf.getDesiredLogs());
|
||||
.detail("DesiredLogs", conf.getDesiredLogs())
|
||||
.detail("InterfaceId", id);
|
||||
}
|
||||
}
|
||||
// Try to select the desired size, if larger
|
||||
else {
|
||||
std::vector<LocalityEntry> bestSet;
|
||||
std::vector<LocalityData> tLocalities;
|
||||
std::vector<LocalityEntry> bestSet;
|
||||
std::vector<LocalityData> tLocalities;
|
||||
ASSERT(conf.tLogPolicy);
|
||||
|
||||
// Try to find the best team of servers to fulfill the policy
|
||||
|
@ -300,7 +304,7 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
results.push_back(*object);
|
||||
tLocalities.push_back(object->first.locality);
|
||||
}
|
||||
TraceEvent("GWFTADBestResults", id)
|
||||
TraceEvent("GWFTADBestResults", functionId)
|
||||
.detail("Fitness", fitness)
|
||||
.detail("Processes", logServerSet->size())
|
||||
.detail("BestCount", bestSet.size())
|
||||
|
@ -308,17 +312,19 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
.detail("BestDataHalls", ::describeDataHalls(tLocalities))
|
||||
.detail("tLogPolicy", conf.tLogPolicy ? conf.tLogPolicy->info() : "[unset]")
|
||||
.detail("TotalResults", results.size())
|
||||
.detail("DesiredLogs", conf.getDesiredLogs());
|
||||
.detail("DesiredLogs", conf.getDesiredLogs())
|
||||
.detail("InterfaceId", id);
|
||||
bCompleted = true;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
TraceEvent(SevWarn,"GWFTADNoBest", id)
|
||||
TraceEvent(SevWarn,"GWFTADNoBest", functionId)
|
||||
.detail("Fitness", fitness)
|
||||
.detail("Processes", logServerSet->size())
|
||||
.detail("tLogReplicationFactor", conf.tLogReplicationFactor)
|
||||
.detail("tLogPolicy", conf.tLogPolicy ? conf.tLogPolicy->info() : "[unset]")
|
||||
.detail("DesiredLogs", conf.getDesiredLogs());
|
||||
.detail("DesiredLogs", conf.getDesiredLogs())
|
||||
.detail("InterfaceId", id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -331,7 +337,7 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
tLocalities.push_back(object->first.locality);
|
||||
}
|
||||
|
||||
TraceEvent(SevWarn, "GetTLogTeamFailed")
|
||||
TraceEvent(SevWarn, "GetTLogTeamFailed", functionId)
|
||||
.detail("Policy", conf.tLogPolicy->info())
|
||||
.detail("Processes", logServerSet->size())
|
||||
.detail("Workers", id_worker.size())
|
||||
|
@ -344,7 +350,8 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
.detail("DesiredLogs", conf.getDesiredLogs())
|
||||
.detail("RatingTests",SERVER_KNOBS->POLICY_RATING_TESTS)
|
||||
.detail("checkStable", checkStable)
|
||||
.detail("PolicyGenerations",SERVER_KNOBS->POLICY_GENERATIONS).backtrace();
|
||||
.detail("PolicyGenerations",SERVER_KNOBS->POLICY_GENERATIONS)
|
||||
.detail("InterfaceId", id).backtrace();
|
||||
|
||||
// Free the set
|
||||
logServerSet->clear();
|
||||
|
@ -356,14 +363,25 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
id_used[result.first.locality.processId()]++;
|
||||
}
|
||||
|
||||
TraceEvent("GetTLogTeamDone")
|
||||
TraceEvent("GetTLogTeamDone", functionId)
|
||||
.detail("Completed", bCompleted).detail("Policy", conf.tLogPolicy->info())
|
||||
.detail("Results", results.size()).detail("Processes", logServerSet->size())
|
||||
.detail("Workers", id_worker.size())
|
||||
.detail("Replication", conf.tLogReplicationFactor)
|
||||
.detail("Desired", conf.getDesiredLogs())
|
||||
.detail("RatingTests",SERVER_KNOBS->POLICY_RATING_TESTS)
|
||||
.detail("PolicyGenerations",SERVER_KNOBS->POLICY_GENERATIONS);
|
||||
.detail("PolicyGenerations",SERVER_KNOBS->POLICY_GENERATIONS)
|
||||
.detail("InterfaceId", id);
|
||||
|
||||
for (auto& result : results) {
|
||||
TraceEvent("GetTLogTeamWorker", functionId)
|
||||
.detail("Class", result.second.toString())
|
||||
.detail("Address", result.first.address())
|
||||
.detailext("Zone", result.first.locality.zoneId())
|
||||
.detailext("DataHall", result.first.locality.dataHallId())
|
||||
.detail("isExcludedServer", conf.isExcludedServer(result.first.address()))
|
||||
.detail("isAvailable", IFailureMonitor::failureMonitor().getState(result.first.storage.getEndpoint()).isAvailable());
|
||||
}
|
||||
|
||||
// Free the set
|
||||
logServerSet->clear();
|
||||
|
@ -420,7 +438,7 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
throw no_more_servers();
|
||||
}
|
||||
|
||||
vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForRoleInDatacenter(Optional<Standalone<StringRef>> const& dcId, ProcessClass::ClusterRole role, int amount, DatabaseConfiguration const& conf, std::map< Optional<Standalone<StringRef>>, int>& id_used, Optional<WorkerFitnessInfo> minWorker = Optional<WorkerFitnessInfo>(), bool checkStable = false ) {
|
||||
vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForRoleInDatacenter(Optional<Standalone<StringRef>> const& dcId, ProcessClass::ClusterRole role, int amount, DatabaseConfiguration const& conf, std::map< Optional<Standalone<StringRef>>, int>& id_used, WorkerFitnessInfo minWorker, bool checkStable = false ) {
|
||||
std::map<std::pair<ProcessClass::Fitness,int>, vector<std::pair<WorkerInterface, ProcessClass>>> fitness_workers;
|
||||
vector<std::pair<WorkerInterface, ProcessClass>> results;
|
||||
if (amount <= 0)
|
||||
|
@ -428,8 +446,7 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
|
||||
for( auto& it : id_worker ) {
|
||||
auto fitness = it.second.processClass.machineClassFitness( role );
|
||||
if( workerAvailable(it.second, checkStable) && !conf.isExcludedServer(it.second.interf.address()) && it.second.interf.locality.dcId() == dcId &&
|
||||
( !minWorker.present() || ( it.second.interf.id() != minWorker.get().worker.first.id() && ( fitness < minWorker.get().fitness || (fitness == minWorker.get().fitness && id_used[it.first] <= minWorker.get().used ) ) ) ) ) {
|
||||
if( workerAvailable(it.second, checkStable) && !conf.isExcludedServer(it.second.interf.address()) && it.second.interf.id() != minWorker.worker.first.id() && (fitness < minWorker.fitness || (fitness == minWorker.fitness && id_used[it.first] <= minWorker.used)) && it.second.interf.locality.dcId()==dcId ) {
|
||||
fitness_workers[ std::make_pair(fitness, id_used[it.first]) ].push_back(std::make_pair(it.second.interf, it.second.processClass));
|
||||
}
|
||||
}
|
||||
|
@ -549,161 +566,67 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
return result;
|
||||
}
|
||||
|
||||
RecruitRemoteFromConfigurationReply findRemoteWorkersForConfiguration( RecruitRemoteFromConfigurationRequest const& req ) {
|
||||
RecruitRemoteFromConfigurationReply result;
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
id_used[masterProcessId]++;
|
||||
|
||||
std::set<Optional<Key>> remoteDC;
|
||||
remoteDC.insert(req.dcId);
|
||||
|
||||
auto remoteLogs = getWorkersForTlogs( req.configuration, id_used, false, remoteDC );
|
||||
for(int i = 0; i < remoteLogs.size(); i++) {
|
||||
result.remoteTLogs.push_back(remoteLogs[i].first);
|
||||
}
|
||||
|
||||
auto logRouters = getWorkersForRoleInDatacenter( req.dcId, ProcessClass::LogRouter, req.configuration.logRouterCount, req.configuration, id_used );
|
||||
for(int i = 0; i < logRouters.size(); i++) {
|
||||
result.logRouters.push_back(logRouters[i].first);
|
||||
}
|
||||
|
||||
//FIXME: fitness for logs is wrong
|
||||
if( now() - startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY &&
|
||||
( AcrossDatacenterFitness(remoteLogs) > AcrossDatacenterFitness((ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs()) ) ) {
|
||||
throw operation_failed();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
RecruitFromConfigurationReply findWorkersForConfiguration( RecruitFromConfigurationRequest const& req, Optional<Key> dcId ) {
|
||||
RecruitFromConfigurationReply findWorkersForConfiguration( RecruitFromConfigurationRequest const& req ) {
|
||||
RecruitFromConfigurationReply result;
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
|
||||
id_used[masterProcessId]++;
|
||||
ASSERT(dcId == req.configuration.primaryDcId || dcId == req.configuration.remoteDcId);
|
||||
std::set<Optional<Key>> primaryDC;
|
||||
primaryDC.insert(dcId == req.configuration.primaryDcId ? req.configuration.primaryDcId : req.configuration.remoteDcId);
|
||||
result.remoteDcId = dcId == req.configuration.primaryDcId ? req.configuration.remoteDcId : req.configuration.primaryDcId;
|
||||
|
||||
auto tlogs = getWorkersForTlogs( req.configuration, id_used, false, primaryDC );
|
||||
for(int i = 0; i < tlogs.size(); i++) {
|
||||
auto tlogs = getWorkersForTlogsAcrossDatacenters( req.configuration, id_used );
|
||||
for(int i = 0; i < tlogs.size(); i++)
|
||||
result.tLogs.push_back(tlogs[i].first);
|
||||
}
|
||||
|
||||
if(req.configuration.satelliteTLogReplicationFactor > 0) {
|
||||
std::set<Optional<Key>> satelliteDCs;
|
||||
if( dcId == req.configuration.primaryDcId ) {
|
||||
satelliteDCs.insert( req.configuration.primarySatelliteDcIds.begin(), req.configuration.primarySatelliteDcIds.end() );
|
||||
} else {
|
||||
satelliteDCs.insert( req.configuration.remoteSatelliteDcIds.begin(), req.configuration.remoteSatelliteDcIds.end() );
|
||||
}
|
||||
auto satelliteLogs = getWorkersForTlogs( req.configuration, id_used, false, satelliteDCs );
|
||||
auto datacenters = getDatacenters( req.configuration );
|
||||
|
||||
for(int i = 0; i < satelliteLogs.size(); i++) {
|
||||
result.satelliteTLogs.push_back(satelliteLogs[i].first);
|
||||
InDatacenterFitness bestFitness;
|
||||
int numEquivalent = 1;
|
||||
|
||||
for(auto dcId : datacenters ) {
|
||||
auto used = id_used;
|
||||
auto first_resolver = getWorkerForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration, used );
|
||||
auto first_proxy = getWorkerForRoleInDatacenter( dcId, ProcessClass::Proxy, req.configuration, used );
|
||||
|
||||
auto proxies = getWorkersForRoleInDatacenter( dcId, ProcessClass::Proxy, req.configuration.getDesiredProxies()-1, req.configuration, used, first_proxy );
|
||||
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers()-1, req.configuration, used, first_resolver );
|
||||
|
||||
proxies.push_back(first_proxy.worker);
|
||||
resolvers.push_back(first_resolver.worker);
|
||||
|
||||
auto fitness = InDatacenterFitness(proxies, resolvers);
|
||||
if(fitness < bestFitness) {
|
||||
bestFitness = fitness;
|
||||
numEquivalent = 1;
|
||||
result.resolvers = vector<WorkerInterface>();
|
||||
result.proxies = vector<WorkerInterface>();
|
||||
for(int i = 0; i < resolvers.size(); i++)
|
||||
result.resolvers.push_back(resolvers[i].first);
|
||||
for(int i = 0; i < proxies.size(); i++)
|
||||
result.proxies.push_back(proxies[i].first);
|
||||
} else if( fitness == bestFitness && g_random->random01() < 1.0/++numEquivalent ) {
|
||||
result.resolvers = vector<WorkerInterface>();
|
||||
result.proxies = vector<WorkerInterface>();
|
||||
for(int i = 0; i < resolvers.size(); i++)
|
||||
result.resolvers.push_back(resolvers[i].first);
|
||||
for(int i = 0; i < proxies.size(); i++)
|
||||
result.proxies.push_back(proxies[i].first);
|
||||
}
|
||||
}
|
||||
|
||||
auto first_resolver = getWorkerForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration, id_used );
|
||||
auto first_proxy = getWorkerForRoleInDatacenter( dcId, ProcessClass::Proxy, req.configuration, id_used );
|
||||
ASSERT(bestFitness != InDatacenterFitness());
|
||||
|
||||
auto proxies = getWorkersForRoleInDatacenter( dcId, ProcessClass::Proxy, req.configuration.getDesiredProxies()-1, req.configuration, id_used, first_proxy );
|
||||
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers()-1, req.configuration, id_used, first_resolver );
|
||||
TraceEvent("findWorkersForConfig").detail("replication", req.configuration.tLogReplicationFactor)
|
||||
.detail("desiredLogs", req.configuration.getDesiredLogs()).detail("actualLogs", result.tLogs.size())
|
||||
.detail("desiredProxies", req.configuration.getDesiredProxies()).detail("actualProxies", result.proxies.size())
|
||||
.detail("desiredResolvers", req.configuration.getDesiredResolvers()).detail("actualResolvers", result.resolvers.size());
|
||||
|
||||
proxies.push_back(first_proxy.worker);
|
||||
resolvers.push_back(first_resolver.worker);
|
||||
|
||||
auto fitness = InDatacenterFitness(proxies, resolvers);
|
||||
for(int i = 0; i < resolvers.size(); i++)
|
||||
result.resolvers.push_back(resolvers[i].first);
|
||||
for(int i = 0; i < proxies.size(); i++)
|
||||
result.proxies.push_back(proxies[i].first);
|
||||
|
||||
//FIXME: fitness for logs is wrong
|
||||
if( now() - startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY &&
|
||||
( AcrossDatacenterFitness(tlogs) > AcrossDatacenterFitness((ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs()) ||
|
||||
fitness > InDatacenterFitness((ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_PROXY_FITNESS, (ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS, req.configuration.getDesiredProxies(), req.configuration.getDesiredResolvers()) ) ) {
|
||||
bestFitness > InDatacenterFitness((ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_PROXY_FITNESS, (ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS, req.configuration.getDesiredProxies(), req.configuration.getDesiredResolvers()) ) ) {
|
||||
throw operation_failed();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
RecruitFromConfigurationReply findWorkersForConfiguration( RecruitFromConfigurationRequest const& req ) {
|
||||
if(req.configuration.remoteTLogReplicationFactor > 0) {
|
||||
try {
|
||||
return findWorkersForConfiguration(req, req.configuration.primaryDcId);
|
||||
} catch( Error& e ) {
|
||||
if (e.code() == error_code_no_more_servers || e.code() == error_code_operation_failed) {
|
||||
TraceEvent(SevWarn, "AttemptingRecruitmentInRemoteDC", id).error(e);
|
||||
return findWorkersForConfiguration(req, req.configuration.remoteDcId);
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
RecruitFromConfigurationReply result;
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
id_used[masterProcessId]++;
|
||||
|
||||
auto tlogs = getWorkersForTlogs( req.configuration, id_used );
|
||||
for(int i = 0; i < tlogs.size(); i++) {
|
||||
result.tLogs.push_back(tlogs[i].first);
|
||||
}
|
||||
|
||||
auto datacenters = getDatacenters( req.configuration );
|
||||
|
||||
InDatacenterFitness bestFitness;
|
||||
int numEquivalent = 1;
|
||||
|
||||
for(auto dcId : datacenters ) {
|
||||
auto used = id_used;
|
||||
auto first_resolver = getWorkerForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration, used );
|
||||
auto first_proxy = getWorkerForRoleInDatacenter( dcId, ProcessClass::Proxy, req.configuration, used );
|
||||
|
||||
auto proxies = getWorkersForRoleInDatacenter( dcId, ProcessClass::Proxy, req.configuration.getDesiredProxies()-1, req.configuration, used, first_proxy );
|
||||
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers()-1, req.configuration, used, first_resolver );
|
||||
|
||||
proxies.push_back(first_proxy.worker);
|
||||
resolvers.push_back(first_resolver.worker);
|
||||
|
||||
auto fitness = InDatacenterFitness(proxies, resolvers);
|
||||
if(fitness < bestFitness) {
|
||||
bestFitness = fitness;
|
||||
numEquivalent = 1;
|
||||
result.resolvers = vector<WorkerInterface>();
|
||||
result.proxies = vector<WorkerInterface>();
|
||||
for(int i = 0; i < resolvers.size(); i++)
|
||||
result.resolvers.push_back(resolvers[i].first);
|
||||
for(int i = 0; i < proxies.size(); i++)
|
||||
result.proxies.push_back(proxies[i].first);
|
||||
} else if( fitness == bestFitness && g_random->random01() < 1.0/++numEquivalent ) {
|
||||
result.resolvers = vector<WorkerInterface>();
|
||||
result.proxies = vector<WorkerInterface>();
|
||||
for(int i = 0; i < resolvers.size(); i++)
|
||||
result.resolvers.push_back(resolvers[i].first);
|
||||
for(int i = 0; i < proxies.size(); i++)
|
||||
result.proxies.push_back(proxies[i].first);
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(bestFitness != InDatacenterFitness());
|
||||
|
||||
TraceEvent("findWorkersForConfig").detail("replication", req.configuration.tLogReplicationFactor)
|
||||
.detail("desiredLogs", req.configuration.getDesiredLogs()).detail("actualLogs", result.tLogs.size())
|
||||
.detail("desiredProxies", req.configuration.getDesiredProxies()).detail("actualProxies", result.proxies.size())
|
||||
.detail("desiredResolvers", req.configuration.getDesiredResolvers()).detail("actualResolvers", result.resolvers.size());
|
||||
|
||||
if( now() - startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY &&
|
||||
( AcrossDatacenterFitness(tlogs) > AcrossDatacenterFitness((ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs()) ||
|
||||
bestFitness > InDatacenterFitness((ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_PROXY_FITNESS, (ProcessClass::Fitness)SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS, req.configuration.getDesiredProxies(), req.configuration.getDesiredResolvers()) ) ) {
|
||||
throw operation_failed();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
bool betterMasterExists() {
|
||||
ServerDBInfo dbi = db.serverInfo->get();
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
|
@ -728,16 +651,15 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
|
||||
if(oldMasterFit < newMasterFit) return false;
|
||||
|
||||
//FIXME: implement for remote logs and log routers
|
||||
std::vector<ProcessClass> tlogProcessClasses;
|
||||
for(auto& it : dbi.logSystemConfig.tLogs[0].tLogs ) {
|
||||
for(auto& it : dbi.logSystemConfig.tLogs ) {
|
||||
auto tlogWorker = id_worker.find(it.interf().locality.processId());
|
||||
if ( tlogWorker == id_worker.end() )
|
||||
return false;
|
||||
tlogProcessClasses.push_back(tlogWorker->second.processClass);
|
||||
}
|
||||
AcrossDatacenterFitness oldAcrossFit(dbi.logSystemConfig.tLogs[0].tLogs, tlogProcessClasses);
|
||||
AcrossDatacenterFitness newAcrossFit(getWorkersForTlogs(db.config, id_used, true));
|
||||
AcrossDatacenterFitness oldAcrossFit(dbi.logSystemConfig.tLogs, tlogProcessClasses);
|
||||
AcrossDatacenterFitness newAcrossFit(getWorkersForTlogsAcrossDatacenters(db.config, id_used, true));
|
||||
|
||||
if(oldAcrossFit < newAcrossFit) return false;
|
||||
|
||||
|
@ -797,10 +719,8 @@ std::vector<std::pair<WorkerInterface, ProcessClass>> getWorkersForTlogs( Databa
|
|||
Standalone<RangeResultRef> lastProcessClasses;
|
||||
bool gotProcessClasses;
|
||||
Optional<Standalone<StringRef>> masterProcessId;
|
||||
Optional<Standalone<StringRef>> masterDcId;
|
||||
UID id;
|
||||
std::vector<RecruitFromConfigurationRequest> outstandingRecruitmentRequests;
|
||||
std::vector<RecruitRemoteFromConfigurationRequest> outstandingRemoteRecruitmentRequests;
|
||||
std::vector<std::pair<RecruitStorageRequest, double>> outstandingStorageRequests;
|
||||
ActorCollection ac;
|
||||
UpdateWorkerList updateWorkerList;
|
||||
|
@ -857,7 +777,6 @@ ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, Cluster
|
|||
rmq.lifetime = db->serverInfo->get().masterLifetime;
|
||||
|
||||
cluster->masterProcessId = masterWorker.first.locality.processId();
|
||||
cluster->masterDcId = masterWorker.first.locality.dcId();
|
||||
ErrorOr<MasterInterface> newMaster = wait( masterWorker.first.master.tryGetReply( rmq ) );
|
||||
if (newMaster.present()) {
|
||||
TraceEvent("CCWDB", cluster->id).detail("Recruited", newMaster.get().id());
|
||||
|
@ -1002,24 +921,6 @@ void checkOutstandingRecruitmentRequests( ClusterControllerData* self ) {
|
|||
}
|
||||
}
|
||||
|
||||
void checkOutstandingRemoteRecruitmentRequests( ClusterControllerData* self ) {
|
||||
for( int i = 0; i < self->outstandingRemoteRecruitmentRequests.size(); i++ ) {
|
||||
RecruitRemoteFromConfigurationRequest& req = self->outstandingRemoteRecruitmentRequests[i];
|
||||
try {
|
||||
req.reply.send( self->findRemoteWorkersForConfiguration( req ) );
|
||||
std::swap( self->outstandingRemoteRecruitmentRequests[i--], self->outstandingRemoteRecruitmentRequests.back() );
|
||||
self->outstandingRemoteRecruitmentRequests.pop_back();
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_no_more_servers || e.code() == error_code_operation_failed) {
|
||||
TraceEvent(SevWarn, "RecruitRemoteTLogMatchingSetNotAvailable", self->id).error(e);
|
||||
} else {
|
||||
TraceEvent(SevError, "RecruitRemoteTLogsRequestError", self->id).error(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void checkOutstandingStorageRequests( ClusterControllerData* self ) {
|
||||
for( int i = 0; i < self->outstandingStorageRequests.size(); i++ ) {
|
||||
auto& req = self->outstandingStorageRequests[i];
|
||||
|
@ -1053,15 +954,12 @@ void checkOutstandingStorageRequests( ClusterControllerData* self ) {
|
|||
|
||||
ACTOR Future<Void> doCheckOutstandingMasterRequests( ClusterControllerData* self ) {
|
||||
Void _ = wait( delay(SERVER_KNOBS->CHECK_BETTER_MASTER_INTERVAL) );
|
||||
//FIXME: re-enable betterMasterExists
|
||||
/*
|
||||
if (self->betterMasterExists()) {
|
||||
if (!self->db.forceMasterFailure.isSet()) {
|
||||
self->db.forceMasterFailure.send( Void() );
|
||||
TraceEvent("MasterRegistrationKill", self->id).detail("MasterId", self->db.serverInfo->get().master.id());
|
||||
}
|
||||
}
|
||||
*/
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -1074,7 +972,6 @@ void checkOutstandingMasterRequests( ClusterControllerData* self ) {
|
|||
|
||||
void checkOutstandingRequests( ClusterControllerData* self ) {
|
||||
checkOutstandingRecruitmentRequests( self );
|
||||
checkOutstandingRemoteRecruitmentRequests( self );
|
||||
checkOutstandingStorageRequests( self );
|
||||
checkOutstandingMasterRequests( self );
|
||||
}
|
||||
|
@ -1305,30 +1202,6 @@ ACTOR Future<Void> clusterRecruitFromConfiguration( ClusterControllerData* self,
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> clusterRecruitRemoteFromConfiguration( ClusterControllerData* self, RecruitRemoteFromConfigurationRequest req ) {
|
||||
// At the moment this doesn't really need to be an actor (it always completes immediately)
|
||||
TEST(true); //ClusterController RecruitTLogsRequest
|
||||
loop {
|
||||
try {
|
||||
req.reply.send( self->findRemoteWorkersForConfiguration( req ) );
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_no_more_servers && now() - self->startTime >= SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY) {
|
||||
self->outstandingRemoteRecruitmentRequests.push_back( req );
|
||||
TraceEvent(SevWarn, "RecruitRemoteFromConfigurationNotAvailable", self->id).error(e);
|
||||
return Void();
|
||||
} else if(e.code() == error_code_operation_failed || e.code() == error_code_no_more_servers) {
|
||||
//recruitment not good enough, try again
|
||||
}
|
||||
else {
|
||||
TraceEvent(SevError, "RecruitRemoteFromConfigurationError", self->id).error(e);
|
||||
throw; // goodbye, cluster controller
|
||||
}
|
||||
}
|
||||
Void _ = wait( delay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY) );
|
||||
}
|
||||
}
|
||||
|
||||
void clusterRegisterMaster( ClusterControllerData* self, RegisterMasterRequest const& req ) {
|
||||
req.reply.send( Void() );
|
||||
|
||||
|
@ -1651,9 +1524,6 @@ ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf,
|
|||
when( RecruitFromConfigurationRequest req = waitNext( interf.recruitFromConfiguration.getFuture() ) ) {
|
||||
addActor.send( clusterRecruitFromConfiguration( &self, req ) );
|
||||
}
|
||||
when( RecruitRemoteFromConfigurationRequest req = waitNext( interf.recruitRemoteFromConfiguration.getFuture() ) ) {
|
||||
addActor.send( clusterRecruitRemoteFromConfiguration( &self, req ) );
|
||||
}
|
||||
when( RecruitStorageRequest req = waitNext( interf.recruitStorage.getFuture() ) ) {
|
||||
clusterRecruitStorage( &self, req );
|
||||
}
|
||||
|
|
|
@ -478,11 +478,12 @@ struct DDTeamCollection {
|
|||
PromiseStream<RelocateShard> const& output,
|
||||
Reference<ShardsAffectedByTeamFailure> const& shardsAffectedByTeamFailure,
|
||||
DatabaseConfiguration configuration,
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > const& serverChanges )
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > const& serverChanges,
|
||||
Future<Void> readyToStart )
|
||||
:cx(cx), masterId(masterId), lock(lock), output(output), shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), doBuildTeams( true ), teamBuilder( Void() ),
|
||||
configuration(configuration), serverChanges(serverChanges),
|
||||
initialFailureReactionDelay( delay( BUGGIFY ? 0 : SERVER_KNOBS->INITIAL_FAILURE_REACTION_DELAY, TaskDataDistribution ) ), healthyTeamCount( 0 ),
|
||||
initializationDoneActor(logOnCompletion(initialFailureReactionDelay, this)), optimalTeamCount( 0 ), recruitingStream(0), restartRecruiting( SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY ),
|
||||
initializationDoneActor(logOnCompletion(readyToStart && initialFailureReactionDelay, this)), optimalTeamCount( 0 ), recruitingStream(0), restartRecruiting( SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY ),
|
||||
unhealthyServers(0)
|
||||
{
|
||||
TraceEvent("DDTrackerStarting", masterId)
|
||||
|
@ -992,6 +993,7 @@ struct DDTeamCollection {
|
|||
state int teamsToBuild = desiredTeams - teamCount;
|
||||
|
||||
state vector<std::vector<UID>> builtTeams;
|
||||
|
||||
if( self->configuration.storageTeamSize > 3) {
|
||||
int addedTeams = self->addTeamsBestOf( teamsToBuild );
|
||||
TraceEvent("AddTeamsBestOf", self->masterId).detail("CurrentTeams", self->teams.size()).detail("AddedTeams", addedTeams);
|
||||
|
@ -1768,8 +1770,7 @@ ACTOR Future<Void> dataDistributionTeamCollection(
|
|||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > serverChanges,
|
||||
Future<Void> readyToStart )
|
||||
{
|
||||
state DDTeamCollection self( cx, masterId, lock, output, shardsAffectedByTeamFailure, configuration, serverChanges );
|
||||
|
||||
state DDTeamCollection self( cx, masterId, lock, output, shardsAffectedByTeamFailure, configuration, serverChanges, readyToStart );
|
||||
state Future<Void> loggingTrigger = Void();
|
||||
state PromiseStream<Void> serverRemoved;
|
||||
state Future<Void> interfaceChanges;
|
||||
|
@ -1879,54 +1880,6 @@ ACTOR Future<bool> isDataDistributionEnabled( Database cx ) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<int> disableDataDistribution( Database cx ) {
|
||||
state Transaction tr(cx);
|
||||
state int oldMode = -1;
|
||||
state BinaryWriter wr(Unversioned());
|
||||
wr << 0;
|
||||
|
||||
loop {
|
||||
try {
|
||||
Optional<Value> old = wait( tr.get( dataDistributionModeKey ) );
|
||||
if (oldMode < 0) {
|
||||
oldMode = 1;
|
||||
if (old.present()) {
|
||||
BinaryReader rd(old.get(), Unversioned());
|
||||
rd >> oldMode;
|
||||
}
|
||||
}
|
||||
// SOMEDAY: Write a wrapper in MoveKeys.h
|
||||
BinaryWriter wrMyOwner(Unversioned()); wrMyOwner << dataDistributionModeLock;
|
||||
tr.set( moveKeysLockOwnerKey, wrMyOwner.toStringRef() );
|
||||
tr.set( dataDistributionModeKey, wr.toStringRef() );
|
||||
|
||||
Void _ = wait( tr.commit() );
|
||||
return oldMode;
|
||||
} catch (Error& e) {
|
||||
TraceEvent("disableDDModeRetrying").error(e);
|
||||
Void _ = wait ( tr.onError(e) );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> enableDataDistribution( Database cx, int mode ) {
|
||||
state Transaction tr(cx);
|
||||
state BinaryWriter wr(Unversioned());
|
||||
wr << mode;
|
||||
|
||||
loop {
|
||||
try {
|
||||
Optional<Value> old = wait( tr.get( dataDistributionModeKey ) );
|
||||
tr.set( dataDistributionModeKey, wr.toStringRef() );
|
||||
Void _ = wait( tr.commit() );
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
TraceEvent("enableDDModeRetrying").error(e);
|
||||
Void _ = wait( tr.onError(e) );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Ensures that the serverKeys key space is properly coalesced
|
||||
//This method is only used for testing and is not implemented in a manner that is safe for large databases
|
||||
ACTOR Future<Void> debugCheckCoalescing(Database cx) {
|
||||
|
@ -2167,7 +2120,8 @@ DDTeamCollection* testTeamCollection(int teamSize, IRepPolicyRef policy, int pro
|
|||
PromiseStream<RelocateShard>(),
|
||||
Reference<ShardsAffectedByTeamFailure>(new ShardsAffectedByTeamFailure()),
|
||||
conf,
|
||||
PromiseStream<std::pair<UID, Optional<StorageServerInterface>>>()
|
||||
PromiseStream<std::pair<UID, Optional<StorageServerInterface>>>(),
|
||||
Future<Void>(Void())
|
||||
);
|
||||
|
||||
for(int id = 1; id <= processCount; id++) {
|
||||
|
|
|
@ -210,7 +210,4 @@ struct ShardSizeBounds {
|
|||
ShardSizeBounds getShardSizeBounds(KeyRangeRef shard, int64_t maxShardSize);
|
||||
|
||||
//Determines the maximum shard size based on the size of the database
|
||||
int64_t getMaxShardSize( double dbSizeEstimate );
|
||||
|
||||
Future<Void> enableDataDistribution( Database const& cx, int const& mode );
|
||||
Future<int> disableDataDistribution( Database const& cx );
|
||||
int64_t getMaxShardSize( double dbSizeEstimate );
|
|
@ -376,15 +376,6 @@ Future<Void> shardMerger(
|
|||
TEST(true); // shard to be merged
|
||||
ASSERT( keys.begin > allKeys.begin );
|
||||
|
||||
// We must not merge the keyServers shard
|
||||
if (keys.begin == keyServersPrefix) {
|
||||
TraceEvent(SevError, "LastShardMerge", self->masterId)
|
||||
.detail("ShardKeyBegin", printable(keys.begin))
|
||||
.detail("ShardKeyEnd", printable(keys.end))
|
||||
.detail("TrackerID", trackerId);
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
// This will merge shards both before and after "this" shard in keyspace.
|
||||
int shardsMerged = 1;
|
||||
bool forwardComplete = false;
|
||||
|
@ -394,7 +385,7 @@ Future<Void> shardMerger(
|
|||
loop {
|
||||
Optional<StorageMetrics> newMetrics;
|
||||
if( !forwardComplete ) {
|
||||
if( nextIter->range().end == keyServersPrefix ) {
|
||||
if( nextIter->range().end == allKeys.end ) {
|
||||
forwardComplete = true;
|
||||
continue;
|
||||
}
|
||||
|
@ -610,14 +601,6 @@ ACTOR Future<Void> trackInitialShards(DataDistributionTracker *self,
|
|||
state int lastBegin = -1;
|
||||
state vector<UID> last;
|
||||
|
||||
//The ending shard does not have a shardTracker, so instead just track the size of the shard
|
||||
Reference<AsyncVar<Optional<StorageMetrics>>> endShardSize( new AsyncVar<Optional<StorageMetrics>>() );
|
||||
KeyRangeRef endShardRange( keyServersPrefix, allKeys.end );
|
||||
ShardTrackedData endShardData;
|
||||
endShardData.stats = endShardSize;
|
||||
endShardData.trackBytes = trackShardBytes( self, endShardRange, endShardSize, g_random->randomUniqueID(), false );
|
||||
self->shards.insert( endShardRange, endShardData );
|
||||
|
||||
state int s;
|
||||
for(s=0; s<initData->shards.size(); s++) {
|
||||
state InitialDataDistribution::Team src = initData->shards[s].value.first;
|
||||
|
@ -637,8 +620,7 @@ ACTOR Future<Void> trackInitialShards(DataDistributionTracker *self,
|
|||
|
||||
if (lastBegin >= 0) {
|
||||
state KeyRangeRef keys( initData->shards[lastBegin].begin, initData->shards[s].begin );
|
||||
if (keys.begin < keyServersPrefix) // disallow spliting of keyServers shard
|
||||
restartShardTrackers( self, keys );
|
||||
restartShardTrackers( self, keys );
|
||||
shardsAffectedByTeamFailure->defineShard( keys );
|
||||
shardsAffectedByTeamFailure->moveShard( keys, last );
|
||||
}
|
||||
|
@ -648,7 +630,7 @@ ACTOR Future<Void> trackInitialShards(DataDistributionTracker *self,
|
|||
Void _ = wait( yield( TaskDataDistribution ) );
|
||||
}
|
||||
|
||||
Future<Void> initialSize = changeSizes( self, KeyRangeRef(allKeys.begin, keyServersPrefix), 0 );
|
||||
Future<Void> initialSize = changeSizes( self, KeyRangeRef(allKeys.begin, allKeys.end), 0 );
|
||||
self->readyToStart.send(Void());
|
||||
Void _ = wait( initialSize );
|
||||
self->maxShardSizeUpdater = updateMaxShardSize( self->cx->dbName, self->dbSizeEstimate, self->maxShardSize );
|
||||
|
|
|
@ -298,11 +298,11 @@ public:
|
|||
.detail("File0Size", self->files[0].size).detail("File1Size", self->files[1].size)
|
||||
.detail("File0Name", self->files[0].dbgFilename).detail("SyncedFiles", syncFiles.size());*/
|
||||
|
||||
committed.send(Void());
|
||||
if(g_random->random01() < 0.01) {
|
||||
//occasionally delete all the ready future in the AndFuture
|
||||
self->lastCommit.cleanup();
|
||||
}
|
||||
committed.send(Void());
|
||||
} catch (Error& e) {
|
||||
delete pageMem;
|
||||
TEST(true); // push error
|
||||
|
@ -405,8 +405,8 @@ public:
|
|||
TraceEvent("DiskQueueShutdownDeleting", self->dbgid)
|
||||
.detail("File0", self->filename(0))
|
||||
.detail("File1", self->filename(1));
|
||||
Void _ = wait( IAsyncFileSystem::filesystem()->deleteFile( self->filename(0), false ) );
|
||||
Void _ = wait( IAsyncFileSystem::filesystem()->deleteFile( self->filename(1), true ) );
|
||||
Void _ = wait( IAsyncFile::incrementalDelete( self->filename(0), false ) );
|
||||
Void _ = wait( IAsyncFile::incrementalDelete( self->filename(1), true ) );
|
||||
}
|
||||
TraceEvent("DiskQueueShutdownComplete", self->dbgid)
|
||||
.detail("DeleteFiles", deleteFiles)
|
||||
|
@ -419,8 +419,8 @@ public:
|
|||
}
|
||||
|
||||
if( error.code() != error_code_actor_cancelled ) {
|
||||
if (!self->stopped.isSet()) self->stopped.send(Void());
|
||||
if (!self->error.isSet()) self->error.send(Never());
|
||||
if (self->stopped.canBeSet()) self->stopped.send(Void());
|
||||
if (self->error.canBeSet()) self->error.send(Never());
|
||||
delete self;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
#include "IKeyValueStore.h"
|
||||
#include "IDiskQueue.h"
|
||||
#include "flow/IndexedSet.h"
|
||||
#include "flow/Notified.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
|
||||
#define OP_DISK_OVERHEAD (sizeof(OpHeader) + 1)
|
||||
|
@ -705,4 +705,4 @@ IKeyValueStore* keyValueStoreMemory( std::string const& basename, UID logID, int
|
|||
|
||||
IKeyValueStore* keyValueStoreLogSystem( class IDiskQueue* queue, UID logID, int64_t memoryLimit, bool disableSnapshot ) {
|
||||
return new KeyValueStoreMemory( queue, logID, memoryLimit, disableSnapshot );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1823,8 +1823,8 @@ private:
|
|||
self->logging.cancel();
|
||||
Void _ = wait( self->readThreads->stop() && self->writeThread->stop() );
|
||||
if (deleteOnClose) {
|
||||
Void _ = wait( IAsyncFileSystem::filesystem()->deleteFile( self->filename, true ) );
|
||||
Void _ = wait( IAsyncFileSystem::filesystem()->deleteFile( self->filename + "-wal", false ) );
|
||||
Void _ = wait( IAsyncFile::incrementalDelete( self->filename, true ) );
|
||||
Void _ = wait( IAsyncFile::incrementalDelete( self->filename + "-wal", false ) );
|
||||
}
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError, "KVDoCloseError", self->logID)
|
||||
|
|
|
@ -229,6 +229,7 @@ struct ILogSystem {
|
|||
bool parallelGetMore;
|
||||
int sequence;
|
||||
Deque<Future<TLogPeekReply>> futureResults;
|
||||
Future<Void> interfaceChanged;
|
||||
|
||||
ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>> const& interf, Tag tag, Version begin, Version end, bool returnIfBlocked, bool parallelGetMore );
|
||||
|
||||
|
|
|
@ -128,6 +128,10 @@ ACTOR Future<Void> serverPeekParallelGetMore( ILogSystem::ServerPeekCursor* self
|
|||
throw internal_error();
|
||||
}
|
||||
|
||||
if(!self->interfaceChanged.isValid()) {
|
||||
self->interfaceChanged = self->interf->onChange();
|
||||
}
|
||||
|
||||
loop {
|
||||
try {
|
||||
while(self->futureResults.size() < SERVER_KNOBS->PARALLEL_GET_MORE_REQUESTS && self->interf->get().present()) {
|
||||
|
@ -148,7 +152,9 @@ ACTOR Future<Void> serverPeekParallelGetMore( ILogSystem::ServerPeekCursor* self
|
|||
//TraceEvent("SPC_getMoreB", self->randomID).detail("has", self->hasMessage()).detail("end", res.end).detail("popped", res.popped.present() ? res.popped.get() : 0);
|
||||
return Void();
|
||||
}
|
||||
when( Void _ = wait( self->interf->onChange() ) ) {
|
||||
when( Void _ = wait( self->interfaceChanged ) ) {
|
||||
self->interfaceChanged = self->interf->onChange();
|
||||
self->randomID = g_random->randomUniqueID();
|
||||
self->sequence = 0;
|
||||
self->futureResults.clear();
|
||||
}
|
||||
|
@ -159,6 +165,7 @@ ACTOR Future<Void> serverPeekParallelGetMore( ILogSystem::ServerPeekCursor* self
|
|||
return Void();
|
||||
} else if(e.code() == error_code_timed_out) {
|
||||
TraceEvent("PeekCursorTimedOut", self->randomID);
|
||||
self->interfaceChanged = self->interf->onChange();
|
||||
self->randomID = g_random->randomUniqueID();
|
||||
self->sequence = 0;
|
||||
self->futureResults.clear();
|
||||
|
|
|
@ -31,9 +31,9 @@
|
|||
#include "LogSystemDiskQueueAdapter.h"
|
||||
#include "IKeyValueStore.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "flow/Notified.h"
|
||||
#include "fdbrpc/sim_validation.h"
|
||||
#include "fdbrpc/batcher.actor.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "fdbclient/KeyRangeMap.h"
|
||||
#include "ConflictSet.h"
|
||||
#include "flow/Stats.h"
|
||||
|
@ -1003,17 +1003,49 @@ ACTOR static Future<Void> readRequestServer(
|
|||
TraceEvent("ProxyReadyForReads", proxy.id());
|
||||
|
||||
loop choose{
|
||||
when(ReplyPromise<vector<StorageServerInterface>> req = waitNext(proxy.getKeyServersLocations.getFuture())) {
|
||||
// SOMEDAY: keep ssis around?
|
||||
vector<UID> src, dest;
|
||||
decodeKeyServersValue(commitData->txnStateStore->readValue(keyServersKeyServersKey).get().get(), src, dest);
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(src.size());
|
||||
for (auto const& id : src) {
|
||||
ssis.push_back(decodeServerListValue(commitData->txnStateStore->readValue(serverListKeyFor(id)).get().get()));
|
||||
when(ReplyPromise<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>> req = waitNext(proxy.getKeyServersLocations.getFuture())) {
|
||||
Standalone<VectorRef<KeyValueRef>> keyServersBegin = commitData->txnStateStore->readRange(KeyRangeRef(allKeys.begin, keyServersKeyServersKeys.begin), -1).get();
|
||||
Standalone<VectorRef<KeyValueRef>> keyServersEnd = commitData->txnStateStore->readRange(KeyRangeRef(keyServersKeyServersKeys.end, allKeys.end), 2).get();
|
||||
Standalone<VectorRef<KeyValueRef>> keyServersShardBoundaries = commitData->txnStateStore->readRange(KeyRangeRef(keyServersBegin[0].key, keyServersEnd[1].key)).get();
|
||||
|
||||
Standalone<VectorRef<KeyValueRef>> serverListBegin = commitData->txnStateStore->readRange(KeyRangeRef(allKeys.begin, keyServersKey(serverListKeys.begin)), -1).get();
|
||||
Standalone<VectorRef<KeyValueRef>> serverListEnd = commitData->txnStateStore->readRange(KeyRangeRef(keyServersKey(serverListKeys.end), allKeys.end), 2).get();
|
||||
Standalone<VectorRef<KeyValueRef>> serverListShardBoundaries = commitData->txnStateStore->readRange(KeyRangeRef(serverListBegin[0].key, serverListEnd[1].key)).get();
|
||||
|
||||
bool ignoreFirstServerListShard = false;
|
||||
if (keyServersShardBoundaries.back().key > serverListShardBoundaries.front().key)
|
||||
ignoreFirstServerListShard = true;
|
||||
|
||||
// shards include all keyServers and serverLists information
|
||||
vector<pair<KeyRangeRef, vector<StorageServerInterface>>> shards;
|
||||
int reserveSize = keyServersShardBoundaries.size() + serverListShardBoundaries.size() - 2 - (ignoreFirstServerListShard ? 1 : 0);
|
||||
shards.reserve(reserveSize);
|
||||
|
||||
for (int i = 0; i < keyServersShardBoundaries.size() - 1; i++) {
|
||||
vector<UID> src, dest;
|
||||
decodeKeyServersValue(keyServersShardBoundaries[i].value, src, dest);
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(src.size());
|
||||
for (auto const& id : src) {
|
||||
ssis.push_back(decodeServerListValue(commitData->txnStateStore->readValue(serverListKeyFor(id)).get().get()));
|
||||
}
|
||||
|
||||
shards.push_back(std::make_pair(KeyRangeRef(keyServersShardBoundaries[i].key.removePrefix(keyServersPrefix), keyServersShardBoundaries[i + 1].key.removePrefix(keyServersPrefix)), ssis));
|
||||
}
|
||||
|
||||
req.send(ssis);
|
||||
for (int i = ignoreFirstServerListShard ? 1 : 0 ; i < serverListShardBoundaries.size() - 1; i++) {
|
||||
vector<UID> src, dest;
|
||||
decodeKeyServersValue(serverListShardBoundaries[i].value, src, dest);
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(src.size());
|
||||
for (auto const& id : src) {
|
||||
ssis.push_back(decodeServerListValue(commitData->txnStateStore->readValue(serverListKeyFor(id)).get().get()));
|
||||
}
|
||||
|
||||
shards.push_back(std::make_pair(KeyRangeRef(serverListShardBoundaries[i].key.removePrefix(keyServersPrefix), serverListShardBoundaries[i + 1].key.removePrefix(keyServersPrefix)), ssis));
|
||||
}
|
||||
|
||||
req.send(shards);
|
||||
}
|
||||
when(GetStorageServerRejoinInfoRequest req = waitNext(proxy.getStorageServerRejoinInfo.getFuture())) {
|
||||
if (commitData->txnStateStore->readValue(serverListKeyFor(req.id)).get().present()) {
|
||||
|
|
|
@ -891,8 +891,7 @@ void seedShardServers(
|
|||
|
||||
// We have to set this range in two blocks, because the master tracking of "keyServersLocations" depends on a change to a specific
|
||||
// key (keyServersKeyServersKey)
|
||||
krmSetPreviouslyEmptyRange( tr, arena, keyServersPrefix, KeyRangeRef(KeyRef(), keyServersPrefix), keyServersValue( serverIds ), Value() );
|
||||
krmSetPreviouslyEmptyRange( tr, arena, keyServersPrefix, KeyRangeRef(keyServersPrefix, allKeys.end), keyServersValue( serverIds ), Value() );
|
||||
krmSetPreviouslyEmptyRange( tr, arena, keyServersPrefix, KeyRangeRef(KeyRef(), allKeys.end), keyServersValue( serverIds ), Value() );
|
||||
|
||||
for(int s=0; s<servers.size(); s++)
|
||||
krmSetPreviouslyEmptyRange( tr, arena, serverKeysPrefixFor( servers[s].id() ), allKeys, serverKeysTrue, serverKeysFalse );
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,8 +27,8 @@
|
|||
#elif !defined(FDBSERVER_ORDERER_ACTOR_H)
|
||||
#define FDBSERVER_ORDERER_ACTOR_H
|
||||
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "flow/actorcompiler.h"
|
||||
#include "flow/Notified.h"
|
||||
|
||||
template <class Seq>
|
||||
class Orderer {
|
||||
|
@ -71,4 +71,4 @@ private:
|
|||
Promise<Void> shutdown; // Never set, only broken on destruction
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -139,6 +139,7 @@ ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, boo
|
|||
state Transaction tr( cx );
|
||||
if (use_system_priority)
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
loop {
|
||||
try {
|
||||
Standalone<RangeResultRef> serverList = wait( tr.getRange( serverListKeys, CLIENT_KNOBS->TOO_MANY ) );
|
||||
|
|
|
@ -204,8 +204,8 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
loop {
|
||||
auto waitTime = SERVER_KNOBS->MIN_REBOOT_TIME + (SERVER_KNOBS->MAX_REBOOT_TIME - SERVER_KNOBS->MIN_REBOOT_TIME) * g_random->random01();
|
||||
cycles ++;
|
||||
TraceEvent("SimulatedFDBDWait").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("ProcessAddress", NetworkAddress(ip, port, true, false))
|
||||
TraceEvent("SimulatedFDBDPreWait").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("Address", NetworkAddress(ip, port, true, false))
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detail("waitTime", waitTime).detail("Port", port);
|
||||
|
||||
|
@ -219,10 +219,10 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
TraceEvent("SimulatedRebooterStarting", localities.zoneId()).detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detailext("DataHall", localities.dataHallId())
|
||||
.detail("ProcessAddress", process->address.toString())
|
||||
.detail("ProcessExcluded", process->excluded)
|
||||
.detail("Address", process->address.toString())
|
||||
.detail("Excluded", process->excluded)
|
||||
.detail("UsingSSL", useSSL);
|
||||
TraceEvent("ProgramStart").detail("Cycles", cycles)
|
||||
TraceEvent("ProgramStart").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("SourceVersion", getHGVersion())
|
||||
.detail("Version", FDB_VT_VERSION)
|
||||
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
||||
|
@ -248,7 +248,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
} catch (Error& e) {
|
||||
// If in simulation, if we make it here with an error other than io_timeout but enASIOTimedOut is set then somewhere an io_timeout was converted to a different error.
|
||||
if(g_network->isSimulated() && e.code() != error_code_io_timeout && (bool)g_network->global(INetwork::enASIOTimedOut))
|
||||
TraceEvent(SevError, "IOTimeoutErrorSuppressed").detail("ErrorCode", e.code()).backtrace();
|
||||
TraceEvent(SevError, "IOTimeoutErrorSuppressed").detail("ErrorCode", e.code()).detail("RandomId", randomId).backtrace();
|
||||
|
||||
if (onShutdown.isReady() && onShutdown.isError()) throw onShutdown.getError();
|
||||
if(e.code() != error_code_actor_cancelled)
|
||||
|
@ -258,15 +258,15 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
}
|
||||
|
||||
TraceEvent("SimulatedFDBDDone", localities.zoneId()).detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("ProcessAddress", process->address)
|
||||
.detail("ProcessExcluded", process->excluded)
|
||||
.detail("Address", process->address)
|
||||
.detail("Excluded", process->excluded)
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detail("KillType", onShutdown.isReady() ? onShutdown.get() : ISimulator::None);
|
||||
|
||||
if (!onShutdown.isReady())
|
||||
onShutdown = ISimulator::InjectFaults;
|
||||
} catch (Error& e) {
|
||||
TraceEvent(destructed ? SevInfo : SevError, "SimulatedFDBDRebooterError", localities.zoneId()).error(e, true);
|
||||
TraceEvent(destructed ? SevInfo : SevError, "SimulatedFDBDRebooterError", localities.zoneId()).detail("RandomId", randomId).error(e, true);
|
||||
onShutdown = e;
|
||||
}
|
||||
|
||||
|
@ -276,6 +276,11 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
process->rebooting = true;
|
||||
process->shutdownSignal.send(ISimulator::None);
|
||||
}
|
||||
TraceEvent("SimulatedFDBDWait", localities.zoneId()).detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("Address", process->address)
|
||||
.detail("Excluded", process->excluded)
|
||||
.detail("Rebooting", process->rebooting)
|
||||
.detailext("ZoneId", localities.zoneId());
|
||||
Void _ = wait( g_simulator.onProcess( simProcess ) );
|
||||
|
||||
Void _ = wait(delay(0.00001 + FLOW_KNOBS->MAX_BUGGIFIED_DELAY)); // One last chance for the process to clean up?
|
||||
|
@ -284,15 +289,15 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
|
||||
auto shutdownResult = onShutdown.get();
|
||||
TraceEvent("SimulatedFDBDShutdown", localities.zoneId()).detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("ProcessAddress", process->address)
|
||||
.detail("ProcessExcluded", process->excluded)
|
||||
.detail("Address", process->address)
|
||||
.detail("Excluded", process->excluded)
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detail("KillType", shutdownResult);
|
||||
|
||||
if( shutdownResult < ISimulator::RebootProcessAndDelete ) {
|
||||
TraceEvent("SimulatedFDBDLowerReboot", localities.zoneId()).detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("ProcessAddress", process->address)
|
||||
.detail("ProcessExcluded", process->excluded)
|
||||
.detail("Address", process->address)
|
||||
.detail("Excluded", process->excluded)
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detail("KillType", shutdownResult);
|
||||
return onShutdown.get();
|
||||
|
@ -300,7 +305,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
|
||||
if( onShutdown.get() == ISimulator::RebootProcessAndDelete ) {
|
||||
TraceEvent("SimulatedFDBDRebootAndDelete", localities.zoneId()).detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("ProcessAddress", process->address)
|
||||
.detail("Address", process->address)
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detail("KillType", shutdownResult);
|
||||
*coordFolder = joinPath(baseFolder, g_random->randomUniqueID().toString());
|
||||
|
@ -317,7 +322,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
|
|||
}
|
||||
else {
|
||||
TraceEvent("SimulatedFDBDJustRepeat", localities.zoneId()).detail("Cycles", cycles).detail("RandomId", randomId)
|
||||
.detail("ProcessAddress", process->address)
|
||||
.detail("Address", process->address)
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detail("KillType", shutdownResult);
|
||||
}
|
||||
|
@ -351,6 +356,7 @@ ACTOR Future<Void> simulatedMachine(
|
|||
state int bootCount = 0;
|
||||
state std::vector<std::string> myFolders;
|
||||
state std::vector<std::string> coordFolders;
|
||||
state UID randomId = g_nondeterministic_random->randomUniqueID();
|
||||
|
||||
try {
|
||||
CSimpleIni ini;
|
||||
|
@ -387,6 +393,7 @@ ACTOR Future<Void> simulatedMachine(
|
|||
std::string path = joinPath(myFolders[i], "fdb.cluster");
|
||||
Reference<ClusterConnectionFile> clusterFile(useSeedFile ? new ClusterConnectionFile(path, connStr.toString()) : new ClusterConnectionFile(path));
|
||||
processes.push_back(simulatedFDBDRebooter(clusterFile, ips[i], sslEnabled, i + 1, localities, processClass, &myFolders[i], &coordFolders[i], baseFolder, connStr, useSeedFile, runBackupAgents));
|
||||
TraceEvent("SimulatedMachineProcess", randomId).detail("Address", NetworkAddress(ips[i], i+1, true, false)).detailext("ZoneId", localities.zoneId()).detailext("DataHall", localities.dataHallId()).detail("Folder", myFolders[i]);
|
||||
}
|
||||
|
||||
TEST( bootCount >= 1 ); // Simulated machine rebooted
|
||||
|
@ -394,7 +401,7 @@ ACTOR Future<Void> simulatedMachine(
|
|||
TEST( bootCount >= 3 ); // Simulated machine rebooted three times
|
||||
++bootCount;
|
||||
|
||||
TraceEvent("SimulatedMachineStart")
|
||||
TraceEvent("SimulatedMachineStart", randomId)
|
||||
.detail("Folder0", myFolders[0])
|
||||
.detail("CFolder0", coordFolders[0])
|
||||
.detail("MachineIPs", toIPVectorString(ips))
|
||||
|
@ -410,7 +417,7 @@ ACTOR Future<Void> simulatedMachine(
|
|||
|
||||
Void _ = wait( waitForAll( processes ) );
|
||||
|
||||
TraceEvent("SimulatedMachineRebootStart")
|
||||
TraceEvent("SimulatedMachineRebootStart", randomId)
|
||||
.detail("Folder0", myFolders[0])
|
||||
.detail("CFolder0", coordFolders[0])
|
||||
.detail("MachineIPs", toIPVectorString(ips))
|
||||
|
@ -442,7 +449,12 @@ ACTOR Future<Void> simulatedMachine(
|
|||
ASSERT( it.second.isReady() && !it.second.isError() );
|
||||
}
|
||||
|
||||
TraceEvent("SimulatedMachineRebootAfterKills")
|
||||
for( auto it : g_simulator.getMachineById(localities.zoneId())->deletingFiles ) {
|
||||
filenames.insert( it );
|
||||
closingStr += it + ", ";
|
||||
}
|
||||
|
||||
TraceEvent("SimulatedMachineRebootAfterKills", randomId)
|
||||
.detail("Folder0", myFolders[0])
|
||||
.detail("CFolder0", coordFolders[0])
|
||||
.detail("MachineIPs", toIPVectorString(ips))
|
||||
|
@ -471,12 +483,12 @@ ACTOR Future<Void> simulatedMachine(
|
|||
openFiles += *it + ", ";
|
||||
i++;
|
||||
}
|
||||
TraceEvent("MachineFilesOpen").detail("PAddr", toIPVectorString(ips)).detail("OpenFiles", openFiles);
|
||||
TraceEvent("MachineFilesOpen", randomId).detail("PAddr", toIPVectorString(ips)).detail("OpenFiles", openFiles);
|
||||
} else
|
||||
break;
|
||||
|
||||
if( shutdownDelayCount++ >= 50 ) { // Worker doesn't shut down instantly on reboot
|
||||
TraceEvent(SevError, "SimulatedFDBDFilesCheck")
|
||||
TraceEvent(SevError, "SimulatedFDBDFilesCheck", randomId)
|
||||
.detail("PAddrs", toIPVectorString(ips))
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detailext("DataHall", localities.dataHallId());
|
||||
|
@ -487,8 +499,8 @@ ACTOR Future<Void> simulatedMachine(
|
|||
backoff = std::min( backoff + 1.0, 6.0 );
|
||||
}
|
||||
|
||||
TraceEvent("SimulatedFDBDFilesClosed")
|
||||
.detail("ProcessAddress", toIPVectorString(ips))
|
||||
TraceEvent("SimulatedFDBDFilesClosed", randomId)
|
||||
.detail("Address", toIPVectorString(ips))
|
||||
.detailext("ZoneId", localities.zoneId())
|
||||
.detailext("DataHall", localities.dataHallId());
|
||||
|
||||
|
@ -510,7 +522,7 @@ ACTOR Future<Void> simulatedMachine(
|
|||
|
||||
auto rebootTime = g_random->random01() * MACHINE_REBOOT_TIME;
|
||||
|
||||
TraceEvent("SimulatedMachineShutdown")
|
||||
TraceEvent("SimulatedMachineShutdown", randomId)
|
||||
.detail("Swap", swap)
|
||||
.detail("KillType", killType)
|
||||
.detail("RebootTime", rebootTime)
|
||||
|
@ -530,7 +542,7 @@ ACTOR Future<Void> simulatedMachine(
|
|||
|
||||
if( myFolders != toRebootFrom ) {
|
||||
TEST( true ); // Simulated machine swapped data folders
|
||||
TraceEvent("SimulatedMachineFolderSwap")
|
||||
TraceEvent("SimulatedMachineFolderSwap", randomId)
|
||||
.detail("OldFolder0", myFolders[0]).detail("NewFolder0", toRebootFrom[0])
|
||||
.detail("MachineIPs", toIPVectorString(ips));
|
||||
}
|
||||
|
@ -648,93 +660,157 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>> *systemActors, st
|
|||
return Void();
|
||||
}
|
||||
|
||||
std::string randomConfiguration( int physicalDatacenters ) {
|
||||
int r = std::min(g_random->randomInt(0, 6), 3);
|
||||
// r = 1; //ahm
|
||||
struct SimulationConfig {
|
||||
explicit SimulationConfig(int extraDB);
|
||||
int extraDB;
|
||||
|
||||
// See also random configuration choices in ConfigureDatabase workload
|
||||
DatabaseConfiguration db;
|
||||
|
||||
std::string startingConfig = "new";
|
||||
if (r == 0) {
|
||||
void set_config(std::string config);
|
||||
|
||||
// Simulation layout
|
||||
int datacenters;
|
||||
int machine_count; // Total, not per DC.
|
||||
int processes_per_machine;
|
||||
int coordinators;
|
||||
|
||||
std::string toString();
|
||||
|
||||
private:
|
||||
void generateNormalConfig();
|
||||
};
|
||||
|
||||
SimulationConfig::SimulationConfig(int extraDB) : extraDB(extraDB) {
|
||||
generateNormalConfig();
|
||||
}
|
||||
|
||||
void SimulationConfig::set_config(std::string config) {
|
||||
// The only mechanism we have for turning "single" into what single means
|
||||
// is buildConfiguration()... :/
|
||||
std::map<std::string, std::string> hack_map;
|
||||
ASSERT( buildConfiguration(config, hack_map) );
|
||||
for(auto kv : hack_map) db.set( kv.first, kv.second );
|
||||
}
|
||||
|
||||
StringRef StringRefOf(const char* s) {
|
||||
return StringRef((uint8_t*)s, strlen(s));
|
||||
}
|
||||
|
||||
void SimulationConfig::generateNormalConfig() {
|
||||
set_config("new");
|
||||
datacenters = g_random->randomInt( 1, 4 );
|
||||
if (g_random->random01() < 0.25) db.desiredTLogCount = g_random->randomInt(1,7);
|
||||
if (g_random->random01() < 0.25) db.masterProxyCount = g_random->randomInt(1,7);
|
||||
if (g_random->random01() < 0.25) db.resolverCount = g_random->randomInt(1,7);
|
||||
if (g_random->random01() < 0.5) {
|
||||
set_config("ssd");
|
||||
} else {
|
||||
set_config("memory");
|
||||
}
|
||||
|
||||
int replication_type = std::min(g_random->randomInt( 1, 6 ), 3);
|
||||
//replication_type = 1; //ahm
|
||||
switch (replication_type) {
|
||||
case 0: {
|
||||
TEST( true ); // Simulated cluster using custom redundancy mode
|
||||
int storage_replicas = g_random->randomInt(1,5);
|
||||
startingConfig += " storage_replicas:=" + format("%d", storage_replicas);
|
||||
startingConfig += " storage_quorum:=" + format("%d", storage_replicas);
|
||||
int log_replicas = g_random->randomInt(1,5);
|
||||
startingConfig += " log_replicas:=" + format("%d", log_replicas);
|
||||
int log_anti_quorum = g_random->randomInt(0, log_replicas);
|
||||
startingConfig += " log_anti_quorum:=" + format("%d", log_anti_quorum);
|
||||
startingConfig += " replica_datacenters:=1";
|
||||
startingConfig += " min_replica_datacenters:=1";
|
||||
int storage_servers = g_random->randomInt(1,5);
|
||||
int replication_factor = g_random->randomInt(1,5);
|
||||
int anti_quorum = g_random->randomInt(0, db.tLogReplicationFactor);
|
||||
// Go through buildConfiguration, as it sets tLogPolicy/storagePolicy.
|
||||
set_config(format("storage_replicas:=%d storage_quorum:=%d "
|
||||
"log_replicas:=%d log_anti_quorum:=%1 "
|
||||
"replica_datacenters:=1 min_replica_datacenters:=1",
|
||||
storage_servers, storage_servers,
|
||||
replication_factor, anti_quorum));
|
||||
break;
|
||||
}
|
||||
else if (r == 1) {
|
||||
case 1: {
|
||||
TEST( true ); // Simulated cluster running in single redundancy mode
|
||||
startingConfig += " single";
|
||||
set_config("single");
|
||||
break;
|
||||
}
|
||||
else if( r == 2 ) {
|
||||
case 2: {
|
||||
TEST( true ); // Simulated cluster running in double redundancy mode
|
||||
startingConfig += " double";
|
||||
set_config("double");
|
||||
break;
|
||||
}
|
||||
else if( r == 3 ) {
|
||||
if( physicalDatacenters == 1 ) {
|
||||
case 3: {
|
||||
if( datacenters == 1 ) {
|
||||
TEST( true ); // Simulated cluster running in triple redundancy mode
|
||||
startingConfig += " triple";
|
||||
set_config("triple");
|
||||
}
|
||||
else if( physicalDatacenters == 2 ) {
|
||||
else if( datacenters == 2 ) {
|
||||
TEST( true ); // Simulated cluster running in 2 datacenter mode
|
||||
startingConfig += " two_datacenter";
|
||||
set_config("two_datacenter");
|
||||
}
|
||||
else if( physicalDatacenters == 3 ) {
|
||||
else if( datacenters == 3 ) {
|
||||
TEST( true ); // Simulated cluster running in 3 data-hall mode
|
||||
startingConfig += " three_data_hall";
|
||||
set_config("three_data_hall");
|
||||
}
|
||||
else {
|
||||
ASSERT( false );
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ASSERT(false); // Programmer forgot to adjust cases.
|
||||
}
|
||||
|
||||
if (g_random->random01() < 0.25) startingConfig += " logs=" + format("%d", g_random->randomInt(1,7));
|
||||
if (g_random->random01() < 0.25) startingConfig += " proxies=" + format("%d", g_random->randomInt(1,7));
|
||||
if (g_random->random01() < 0.25) startingConfig += " resolvers=" + format("%d", g_random->randomInt(1,7));
|
||||
machine_count = g_random->randomInt( std::max( 2+datacenters, db.minMachinesRequired() ), extraDB ? 6 : 10 );
|
||||
processes_per_machine = g_random->randomInt(1, (extraDB ? 14 : 28)/machine_count + 2 );
|
||||
coordinators = BUGGIFY ? g_random->randomInt(1, machine_count+1) : std::min( machine_count, db.maxMachineFailuresTolerated()*2 + 1 );
|
||||
}
|
||||
|
||||
startingConfig += g_random->random01() < 0.5 ? " ssd" : " memory";
|
||||
return startingConfig;
|
||||
std::string SimulationConfig::toString() {
|
||||
std::stringstream config;
|
||||
std::map<std::string, std::string>&& dbconfig = db.toMap();
|
||||
config << "new";
|
||||
|
||||
if (dbconfig["redundancy_mode"] != "custom") {
|
||||
config << " " << dbconfig["redundancy_mode"];
|
||||
} else {
|
||||
config << " " << "log_replicas:=" << db.tLogReplicationFactor;
|
||||
config << " " << "log_anti_quorum:=" << db.tLogWriteAntiQuorum;
|
||||
config << " " << "storage_replicas:=" << db.storageTeamSize;
|
||||
config << " " << "storage_quorum:=" << db.durableStorageQuorum;
|
||||
}
|
||||
|
||||
config << " logs=" << db.getDesiredLogs();
|
||||
config << " proxies=" << db.getDesiredProxies();
|
||||
config << " resolvers=" << db.getDesiredResolvers();
|
||||
|
||||
config << " " << dbconfig["storage_engine"];
|
||||
return config.str();
|
||||
}
|
||||
|
||||
void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseFolder,
|
||||
int* pTesterCount, Optional<ClusterConnectionString> *pConnString,
|
||||
Standalone<StringRef> *pStartingConfiguration, int extraDB)
|
||||
{
|
||||
int dataCenters = g_random->randomInt( 1, 4 );
|
||||
|
||||
// SOMEDAY: this does not test multi-interface configurations
|
||||
std::string startingConfigString = randomConfiguration(dataCenters);
|
||||
std::map<std::string,std::string> startingConfigMap;
|
||||
ASSERT( buildConfiguration( startingConfigString, startingConfigMap ) == ConfigurationResult::SUCCESS );
|
||||
SimulationConfig simconfig(extraDB);
|
||||
std::string startingConfigString = simconfig.toString();
|
||||
|
||||
DatabaseConfiguration startingConfig;
|
||||
for(auto kv : startingConfigMap) startingConfig.set( kv.first, kv.second );
|
||||
g_simulator.storagePolicy = startingConfig.storagePolicy;
|
||||
g_simulator.tLogPolicy = startingConfig.tLogPolicy;
|
||||
g_simulator.tLogWriteAntiQuorum = startingConfig.tLogWriteAntiQuorum;
|
||||
g_simulator.storagePolicy = simconfig.db.storagePolicy;
|
||||
g_simulator.tLogPolicy = simconfig.db.tLogPolicy;
|
||||
g_simulator.tLogWriteAntiQuorum = simconfig.db.tLogWriteAntiQuorum;
|
||||
ASSERT(g_simulator.storagePolicy);
|
||||
ASSERT(g_simulator.tLogPolicy);
|
||||
TraceEvent("simulatorConfig").detail("tLogPolicy", g_simulator.tLogPolicy->info()).detail("storagePolicy", g_simulator.storagePolicy->info()).detail("tLogWriteAntiQuorum", g_simulator.tLogWriteAntiQuorum).detail("ConfigString", startingConfigString);
|
||||
|
||||
int machineCount = g_random->randomInt( std::max( 2+dataCenters, startingConfig.minMachinesRequired() ), extraDB ? 6 : 10 );
|
||||
const int dataCenters = simconfig.datacenters;
|
||||
const int machineCount = simconfig.machine_count;
|
||||
const int coordinatorCount = simconfig.coordinators;
|
||||
const int processesPerMachine = simconfig.processes_per_machine;
|
||||
|
||||
// half the time, when we have more than 4 machines that are not the first in their dataCenter, assign classes
|
||||
bool assignClasses = machineCount - dataCenters > 4 && g_random->random01() < 0.5;
|
||||
int processesPerMachine = g_random->randomInt(1, (extraDB ? 14 : 28)/machineCount + 2 );
|
||||
|
||||
// Use SSL half the time
|
||||
bool sslEnabled = g_random->random01() < 0.05;
|
||||
TEST( sslEnabled ); // SSL enabled
|
||||
TEST( !sslEnabled ); // SSL disabled
|
||||
|
||||
// Pick coordination processes.
|
||||
int coordinatorCount = BUGGIFY ? g_random->randomInt(1, machineCount+1) : std::min( machineCount, startingConfig.maxMachineFailuresTolerated()*2 + 1 );
|
||||
|
||||
vector<NetworkAddress> coordinatorAddresses;
|
||||
for( int dc = 0; dc < dataCenters; dc++ ) {
|
||||
int machines = machineCount / dataCenters + (dc < machineCount % dataCenters); // add remainder of machines to first datacenter
|
||||
|
@ -817,12 +893,12 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
|
|||
}
|
||||
|
||||
g_simulator.desiredCoordinators = coordinatorCount;
|
||||
g_simulator.killableMachines = startingConfig.maxMachineFailuresTolerated();
|
||||
g_simulator.killableMachines = simconfig.db.maxMachineFailuresTolerated();
|
||||
g_simulator.neededDatacenters = 1;
|
||||
g_simulator.killableDatacenters = 0;
|
||||
g_simulator.physicalDatacenters = dataCenters;
|
||||
g_simulator.maxCoordinatorsInDatacenter = ((coordinatorCount-1)/dataCenters) + 1;
|
||||
g_simulator.machinesNeededForProgress = startingConfig.minMachinesRequired() + nonVersatileMachines;
|
||||
g_simulator.machinesNeededForProgress = simconfig.db.minMachinesRequired() + nonVersatileMachines;
|
||||
g_simulator.processesPerMachine = processesPerMachine;
|
||||
|
||||
TraceEvent("SetupSimulatorSettings")
|
||||
|
|
|
@ -947,6 +947,7 @@ ACTOR static Future<double> doGrvProbe(Transaction *tr, Optional<FDBTransactionO
|
|||
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
if(priority.present()) {
|
||||
tr->setOption(priority.get());
|
||||
}
|
||||
|
@ -969,6 +970,7 @@ ACTOR static Future<double> doReadProbe(Future<double> grvProbe, Transaction *tr
|
|||
state double start = timer_monotonic();
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
try {
|
||||
Optional<Standalone<StringRef> > _ = wait(tr->get(LiteralStringRef("\xff/StatusJsonTestKey62793")));
|
||||
return timer_monotonic() - start;
|
||||
|
@ -993,6 +995,7 @@ ACTOR static Future<double> doCommitProbe(Future<double> grvProbe, Transaction *
|
|||
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr->makeSelfConflicting();
|
||||
Void _ = wait(tr->commit());
|
||||
|
@ -1022,9 +1025,7 @@ ACTOR static Future<Void> doProbe(Future<double> probe, int timeoutSeconds, cons
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<StatusObject> latencyProbeFetcher(Reference<AsyncVar<struct ServerDBInfo>> db, StatusArray *messages, std::set<std::string> *incomplete_reasons) {
|
||||
Database cx = openDBOnServer(db, TaskDefaultEndpoint, true, true); // Open a new database connection that is lock-aware
|
||||
|
||||
ACTOR static Future<StatusObject> latencyProbeFetcher(Database cx, StatusArray *messages, std::set<std::string> *incomplete_reasons) {
|
||||
state Transaction trImmediate(cx);
|
||||
state Transaction trDefault(cx);
|
||||
state Transaction trBatch(cx);
|
||||
|
@ -1787,9 +1788,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
|
||||
if (configuration.present()){
|
||||
// Do the latency probe by itself to avoid interference from other status activities
|
||||
state Future<StatusObject> latencyProbe = latencyProbeFetcher(db, &messages, &status_incomplete_reasons);
|
||||
|
||||
StatusObject latencyProbeResults = wait(latencyProbe);
|
||||
StatusObject latencyProbeResults = wait(latencyProbeFetcher(cx, &messages, &status_incomplete_reasons));
|
||||
|
||||
statusObj["database_available"] = latencyProbeResults.count("immediate_priority_transaction_start_seconds") && latencyProbeResults.count("read_seconds") && latencyProbeResults.count("commit_seconds");
|
||||
if (!latencyProbeResults.empty()) {
|
||||
|
|
|
@ -54,7 +54,7 @@ struct StorageMetricSample {
|
|||
bck_split.decrementNonEnd();
|
||||
|
||||
KeyRef split = keyBetween(KeyRangeRef(bck_split != sample.begin() ? std::max<KeyRef>(*bck_split,range.begin) : range.begin, *it));
|
||||
if( split.size() <= CLIENT_KNOBS->SPLIT_KEY_SIZE_LIMIT )
|
||||
if(!front || (getEstimate(KeyRangeRef(range.begin, split)) > 0 && split.size() <= CLIENT_KNOBS->SPLIT_KEY_SIZE_LIMIT))
|
||||
return split;
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ struct StorageMetricSample {
|
|||
++it;
|
||||
|
||||
KeyRef split = keyBetween(KeyRangeRef(*fwd_split, it != sample.end() ? std::min<KeyRef>(*it, range.end) : range.end));
|
||||
if( split.size() <= CLIENT_KNOBS->SPLIT_KEY_SIZE_LIMIT )
|
||||
if(front || (getEstimate(KeyRangeRef(split, range.end)) > 0 && split.size() <= CLIENT_KNOBS->SPLIT_KEY_SIZE_LIMIT))
|
||||
return split;
|
||||
|
||||
fwd_split = it;
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
#include "flow/Stats.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "fdbclient/NativeAPI.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "fdbclient/KeyRangeMap.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "WorkerInterface.h"
|
||||
#include "TLogInterface.h"
|
||||
#include "flow/Notified.h"
|
||||
#include "Knobs.h"
|
||||
#include "IKeyValueStore.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
|
@ -215,13 +215,14 @@ struct TLogData : NonCopyable {
|
|||
Future<Void> updatePersist; //SOMEDAY: integrate the recovery and update storage so that only one of them is committing to persistant data.
|
||||
|
||||
PromiseStream<Future<Void>> sharedActors;
|
||||
bool terminated;
|
||||
|
||||
TLogData(UID dbgid, IKeyValueStore* persistentData, IDiskQueue * persistentQueue, Reference<AsyncVar<ServerDBInfo>> const& dbInfo)
|
||||
: dbgid(dbgid), instanceID(g_random->randomUniqueID().first()),
|
||||
persistentData(persistentData), rawPersistentQueue(persistentQueue), persistentQueue(new TLogQueue(persistentQueue, dbgid)),
|
||||
dbInfo(dbInfo), queueCommitBegin(0), queueCommitEnd(0), prevVersion(0),
|
||||
diskQueueCommitBytes(0), largeDiskQueueCommitBytes(false),
|
||||
bytesInput(0), bytesDurable(0), updatePersist(Void())
|
||||
bytesInput(0), bytesDurable(0), updatePersist(Void()), terminated(false)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
@ -315,6 +316,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
TLogInterface tli;
|
||||
PromiseStream<Future<Void>> addActor;
|
||||
TLogData* tLogData;
|
||||
Future<Void> recovery;
|
||||
|
||||
Reference<AsyncVar<Reference<ILogSystem>>> logSystem;
|
||||
Optional<Tag> remoteTag;
|
||||
|
@ -323,7 +325,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
explicit LogData(TLogData* tLogData, TLogInterface interf, Optional<Tag> remoteTag, int persistentDataFormat = 1) : tLogData(tLogData), knownCommittedVersion(0), tli(interf), logId(interf.id()),
|
||||
cc("TLog", interf.id().toString()), bytesInput("bytesInput", cc), bytesDurable("bytesDurable", cc), remoteTag(remoteTag), persistentDataFormat(persistentDataFormat), logSystem(new AsyncVar<Reference<ILogSystem>>()),
|
||||
// These are initialized differently on init() or recovery
|
||||
recoveryCount(), stopped(false), initialized(false), queueCommittingVersion(0), newPersistentDataVersion(invalidVersion)
|
||||
recoveryCount(), stopped(false), initialized(false), queueCommittingVersion(0), newPersistentDataVersion(invalidVersion), recovery(Void())
|
||||
{
|
||||
startRole(interf.id(), UID(), "TLog");
|
||||
|
||||
|
@ -351,6 +353,16 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
|
||||
ASSERT(tLogData->bytesDurable <= tLogData->bytesInput);
|
||||
endRole(tli.id(), "TLog", "Error", true);
|
||||
|
||||
if(!tLogData->terminated) {
|
||||
Key logIdKey = BinaryWriter::toValue(logId,Unversioned());
|
||||
tLogData->persistentData->clear( singleKeyRange(logIdKey.withPrefix(persistCurrentVersionKeys.begin)) );
|
||||
tLogData->persistentData->clear( singleKeyRange(logIdKey.withPrefix(persistRecoveryCountKeys.begin)) );
|
||||
Key msgKey = logIdKey.withPrefix(persistTagMessagesKeys.begin);
|
||||
tLogData->persistentData->clear( KeyRangeRef( msgKey, strinc(msgKey) ) );
|
||||
Key poppedKey = logIdKey.withPrefix(persistTagPoppedKeys.begin);
|
||||
tLogData->persistentData->clear( KeyRangeRef( poppedKey, strinc(poppedKey) ) );
|
||||
}
|
||||
}
|
||||
|
||||
LogEpoch epoch() const { return recoveryCount; }
|
||||
|
@ -366,6 +378,7 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("isStopped", logData->stopped).detail("queueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
logData->stopped = true;
|
||||
logData->recovery = Void();
|
||||
|
||||
// Lock once the current version has been committed
|
||||
Void _ = wait( logData->queueCommittedVersion.whenAtLeast( stopVersion ) );
|
||||
|
@ -589,9 +602,15 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
} else {
|
||||
Void _ = wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
}
|
||||
|
||||
if( logData->removed.isReady() ) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self->queueOrder.pop_front();
|
||||
if(logData->persistentDataDurableVersion == logData->version.get()) {
|
||||
self->queueOrder.pop_front();
|
||||
}
|
||||
Void _ = wait( delay(0.0, TaskUpdateStorage) );
|
||||
} else {
|
||||
Void _ = wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
|
@ -1168,11 +1187,11 @@ ACTOR Future<Void> respondToRecovered( TLogInterface tli, Future<Void> recovery
|
|||
|
||||
ACTOR Future<Void> cleanupPeekTrackers( TLogData* self ) {
|
||||
loop {
|
||||
double minExpireTime = SERVER_KNOBS->PEEK_TRACKER_EXPIRATION_TIME;
|
||||
double minTimeUntilExpiration = SERVER_KNOBS->PEEK_TRACKER_EXPIRATION_TIME;
|
||||
auto it = self->peekTracker.begin();
|
||||
while(it != self->peekTracker.end()) {
|
||||
double expireTime = SERVER_KNOBS->PEEK_TRACKER_EXPIRATION_TIME - now()-it->second.lastUpdate;
|
||||
if(expireTime < 1.0e-6) {
|
||||
double timeUntilExpiration = it->second.lastUpdate + SERVER_KNOBS->PEEK_TRACKER_EXPIRATION_TIME - now();
|
||||
if(timeUntilExpiration < 1.0e-6) {
|
||||
for(auto seq : it->second.sequence_version) {
|
||||
if(!seq.second.isSet()) {
|
||||
seq.second.sendError(timed_out());
|
||||
|
@ -1180,12 +1199,12 @@ ACTOR Future<Void> cleanupPeekTrackers( TLogData* self ) {
|
|||
}
|
||||
it = self->peekTracker.erase(it);
|
||||
} else {
|
||||
minExpireTime = std::min(minExpireTime, expireTime);
|
||||
minTimeUntilExpiration = std::min(minTimeUntilExpiration, timeUntilExpiration);
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
Void _ = wait( delay(minExpireTime) );
|
||||
Void _ = wait( delay(minTimeUntilExpiration) );
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1262,6 +1281,7 @@ ACTOR Future<Void> serveTLogInterface( TLogData* self, TLogInterface tli, Refere
|
|||
void removeLog( TLogData* self, Reference<LogData> logData ) {
|
||||
TraceEvent("TLogRemoved", logData->logId).detail("input", logData->bytesInput.getValue()).detail("durable", logData->bytesDurable.getValue());
|
||||
logData->stopped = true;
|
||||
logData->recovery = Void();
|
||||
|
||||
logData->addActor = PromiseStream<Future<Void>>(); //there could be items still in the promise stream if one of the actors threw an error immediately
|
||||
self->id_data.erase(logData->logId);
|
||||
|
@ -1388,7 +1408,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, Ta
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> tLogCore( TLogData* self, Reference<LogData> logData, Future<Void> recovery ) {
|
||||
ACTOR Future<Void> tLogCore( TLogData* self, Reference<LogData> logData ) {
|
||||
if(logData->removed.isReady()) {
|
||||
Void _ = wait(delay(0)); //to avoid iterator invalidation in restorePersistentState when removed is already ready
|
||||
ASSERT(logData->removed.isError());
|
||||
|
@ -1409,15 +1429,15 @@ ACTOR Future<Void> tLogCore( TLogData* self, Reference<LogData> logData, Future<
|
|||
state Future<Void> warningCollector = timeoutWarningCollector( warningCollectorInput.getFuture(), 1.0, "TLogQueueCommitSlow", self->dbgid );
|
||||
state Future<Void> error = actorCollection( logData->addActor.getFuture() );
|
||||
|
||||
if( recovery.isValid() && !recovery.isReady()) {
|
||||
logData->addActor.send( recovery );
|
||||
if( logData->recovery.isValid() && !logData->recovery.isReady()) {
|
||||
logData->addActor.send( logData->recovery );
|
||||
}
|
||||
|
||||
logData->addActor.send( waitFailureServer(logData->tli.waitFailure.getFuture()) );
|
||||
logData->addActor.send( respondToRecovered(logData->tli, recovery) );
|
||||
logData->addActor.send( respondToRecovered(logData->tli, logData->recovery) );
|
||||
logData->addActor.send( logData->removed );
|
||||
//FIXME: update tlogMetrics to include new information, or possibly only have one copy for the shared instance
|
||||
logData->addActor.send( traceCounters("TLogMetrics", logData->logId, SERVER_KNOBS->STORAGE_LOGGING_DELAY, &logData->cc, self->dbgid.toString() + "/TLogMetrics"));
|
||||
logData->addActor.send( traceCounters("TLogMetrics", logData->logId, SERVER_KNOBS->STORAGE_LOGGING_DELAY, &logData->cc, logData->logId.toString() + "/TLogMetrics"));
|
||||
logData->addActor.send( serveTLogInterface(self, logData->tli, logData, warningCollectorInput) );
|
||||
|
||||
if(logData->remoteTag.present()) {
|
||||
|
@ -1560,8 +1580,13 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
|||
when( TLogQueueEntry qe = wait( self->persistentQueue->readNext() ) ) {
|
||||
if(!self->queueOrder.size() || self->queueOrder.back() != qe.id) self->queueOrder.push_back(qe.id);
|
||||
if(qe.id != lastId) {
|
||||
logData = self->id_data[qe.id];
|
||||
lastId = qe.id;
|
||||
auto it = self->id_data.find(qe.id);
|
||||
if(it != self->id_data.end()) {
|
||||
logData = it->second;
|
||||
} else {
|
||||
logData = Reference<LogData>();
|
||||
}
|
||||
} else {
|
||||
ASSERT( qe.version >= lastVer );
|
||||
lastVer = qe.version;
|
||||
|
@ -1570,19 +1595,21 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
|||
//TraceEvent("TLogRecoveredQE", self->dbgid).detail("logId", qe.id).detail("ver", qe.version).detail("MessageBytes", qe.messages.size()).detail("Tags", qe.tags.size())
|
||||
// .detail("Tag0", qe.tags.size() ? qe.tags[0].tag : invalidTag).detail("version", logData->version.get());
|
||||
|
||||
logData->knownCommittedVersion = std::max(logData->knownCommittedVersion, qe.knownCommittedVersion);
|
||||
if( qe.version > logData->version.get() ) {
|
||||
commitMessages(logData, qe.version, qe.arena(), qe.messages, qe.tags, self->bytesInput);
|
||||
logData->version.set( qe.version );
|
||||
logData->queueCommittedVersion.set( qe.version );
|
||||
if(logData) {
|
||||
logData->knownCommittedVersion = std::max(logData->knownCommittedVersion, qe.knownCommittedVersion);
|
||||
if( qe.version > logData->version.get() ) {
|
||||
commitMessages(logData, qe.version, qe.arena(), qe.messages, qe.tags, self->bytesInput);
|
||||
logData->version.set( qe.version );
|
||||
logData->queueCommittedVersion.set( qe.version );
|
||||
|
||||
while (self->bytesInput - self->bytesDurable >= recoverMemoryLimit) {
|
||||
TEST(true); // Flush excess data during TLog queue recovery
|
||||
TraceEvent("FlushLargeQueueDuringRecovery", self->dbgid).detail("BytesInput", self->bytesInput).detail("BytesDurable", self->bytesDurable).detail("Version", logData->version.get()).detail("PVer", logData->persistentDataVersion);
|
||||
while (self->bytesInput - self->bytesDurable >= recoverMemoryLimit) {
|
||||
TEST(true); // Flush excess data during TLog queue recovery
|
||||
TraceEvent("FlushLargeQueueDuringRecovery", self->dbgid).detail("BytesInput", self->bytesInput).detail("BytesDurable", self->bytesDurable).detail("Version", logData->version.get()).detail("PVer", logData->persistentDataVersion);
|
||||
|
||||
choose {
|
||||
when( Void _ = wait( updateStorage(self) ) ) {}
|
||||
when( Void _ = wait( allRemoved ) ) { throw worker_removed(); }
|
||||
choose {
|
||||
when( Void _ = wait( updateStorage(self) ) ) {}
|
||||
when( Void _ = wait( allRemoved ) ) { throw worker_removed(); }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1602,7 +1629,7 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
|||
TraceEvent("TLogZeroVersion", self->dbgid).detail("logId", it.first);
|
||||
it.second->queueCommittedVersion.set(it.second->version.get());
|
||||
}
|
||||
self->sharedActors.send( tLogCore( self, it.second, Void() ) );
|
||||
self->sharedActors.send( tLogCore( self, it.second ) );
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -1610,6 +1637,7 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
|||
|
||||
bool tlogTerminated( TLogData* self, IKeyValueStore* persistentData, TLogQueue* persistentQueue, Error const& e ) {
|
||||
// Dispose the IKVS (destroying its data permanently) only if this shutdown is definitely permanent. Otherwise just close it.
|
||||
self->terminated = true;
|
||||
if (e.code() == error_code_worker_removed || e.code() == error_code_recruitment_failed) {
|
||||
persistentData->dispose();
|
||||
persistentQueue->dispose();
|
||||
|
@ -1635,7 +1663,7 @@ ACTOR Future<Void> recoverTagFromLogSystem( TLogData* self, Reference<LogData> l
|
|||
state Version tagPopped = 0;
|
||||
state Version lastVer = 0;
|
||||
|
||||
TraceEvent("LogRecoveringTagBegin", self->dbgid).detail("Tag", tag).detail("recoverAt", endVersion);
|
||||
TraceEvent("LogRecoveringTagBegin", logData->logId).detail("Tag", tag).detail("recoverAt", endVersion);
|
||||
|
||||
while (tagAt <= endVersion) {
|
||||
loop {
|
||||
|
@ -1654,18 +1682,18 @@ ACTOR Future<Void> recoverTagFromLogSystem( TLogData* self, Reference<LogData> l
|
|||
}
|
||||
}
|
||||
|
||||
TraceEvent("LogRecoveringTagResults", logData->logId).detail("Tag", tag);
|
||||
//TraceEvent("LogRecoveringTagResults", logData->logId).detail("Tag", tag);
|
||||
|
||||
Version ver = 0;
|
||||
BinaryWriter wr( Unversioned() );
|
||||
int writtenBytes = 0;
|
||||
while (true) {
|
||||
bool foundMessage = r->hasMessage();
|
||||
//TraceEvent("LogRecoveringMsg").detail("Tag", tag).detail("foundMessage", foundMessage).detail("ver", r->version().toString());
|
||||
//TraceEvent("LogRecoveringMsg", logData->logId).detail("Tag", tag).detail("foundMessage", foundMessage).detail("ver", r->version().toString());
|
||||
if (!foundMessage || r->version().version != ver) {
|
||||
ASSERT(r->version().version > lastVer);
|
||||
if (ver) {
|
||||
//TraceEvent("LogRecoveringTagVersion", self->dbgid).detail("Tag", tag).detail("Ver", ver).detail("Bytes", wr.getLength());
|
||||
//TraceEvent("LogRecoveringTagVersion", logData->logId).detail("Tag", tag).detail("Ver", ver).detail("Bytes", wr.getLength());
|
||||
writtenBytes += 100 + wr.getLength();
|
||||
self->persistentData->set( KeyValueRef( persistTagMessagesKey( logData->logId, tag, ver ), wr.toStringRef() ) );
|
||||
}
|
||||
|
@ -1706,6 +1734,8 @@ ACTOR Future<Void> recoverTagFromLogSystem( TLogData* self, Reference<LogData> l
|
|||
Void _ = wait(tLogPop( self, TLogPopRequest(tagPopped, tag), logData ));
|
||||
|
||||
updatePersistentPopped( self, logData, tag, logData->tag_data.find(tag)->value );
|
||||
|
||||
TraceEvent("LogRecoveringTagComplete", logData->logId).detail("Tag", tag).detail("recoverAt", endVersion);
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -1747,56 +1777,62 @@ ACTOR Future<Void> recoverFromLogSystem( TLogData* self, Reference<LogData> logD
|
|||
state Future<Void> recoveryDone = Never();
|
||||
state Future<Void> commitTimeout = delay(SERVER_KNOBS->LONG_TLOG_COMMIT_TIME);
|
||||
|
||||
loop {
|
||||
choose {
|
||||
when(Void _ = wait(copyDone)) {
|
||||
recoverFutures.clear();
|
||||
for(auto tag : recoverTags )
|
||||
recoverFutures.push_back(recoverTagFromLogSystem(self, logData, 0, knownCommittedVersion, tag, uncommittedBytes, logSystem));
|
||||
copyDone = Never();
|
||||
recoveryDone = waitForAll(recoverFutures);
|
||||
try {
|
||||
loop {
|
||||
choose {
|
||||
when(Void _ = wait(copyDone)) {
|
||||
recoverFutures.clear();
|
||||
for(auto tag : recoverTags )
|
||||
recoverFutures.push_back(recoverTagFromLogSystem(self, logData, 0, knownCommittedVersion, tag, uncommittedBytes, logSystem));
|
||||
copyDone = Never();
|
||||
recoveryDone = waitForAll(recoverFutures);
|
||||
|
||||
Void __ = wait( committing );
|
||||
Void __ = wait( self->updatePersist );
|
||||
committing = self->persistentData->commit();
|
||||
commitTimeout = delay(SERVER_KNOBS->LONG_TLOG_COMMIT_TIME);
|
||||
uncommittedBytes->set(0);
|
||||
Void __ = wait( committing );
|
||||
TraceEvent("TLogCommitCopyData", self->dbgid);
|
||||
Void __ = wait( committing );
|
||||
Void __ = wait( self->updatePersist );
|
||||
committing = self->persistentData->commit();
|
||||
commitTimeout = delay(SERVER_KNOBS->LONG_TLOG_COMMIT_TIME);
|
||||
uncommittedBytes->set(0);
|
||||
Void __ = wait( committing );
|
||||
TraceEvent("TLogCommitCopyData", logData->logId);
|
||||
|
||||
if(!copyComplete.isSet())
|
||||
copyComplete.send(Void());
|
||||
}
|
||||
when(Void _ = wait(recoveryDone)) { break; }
|
||||
when(Void _ = wait(commitTimeout)) {
|
||||
TEST(true); // We need to commit occasionally if this process is long to avoid running out of memory.
|
||||
// We let one, but not more, commits pipeline with the network transfer
|
||||
Void __ = wait( committing );
|
||||
Void __ = wait( self->updatePersist );
|
||||
committing = self->persistentData->commit();
|
||||
commitTimeout = delay(SERVER_KNOBS->LONG_TLOG_COMMIT_TIME);
|
||||
uncommittedBytes->set(0);
|
||||
TraceEvent("TLogCommitRecoveryData", self->dbgid).detail("MemoryUsage", DEBUG_DETERMINISM ? 0 : getMemoryUsage());
|
||||
}
|
||||
when(Void _ = wait(uncommittedBytes->onChange())) {
|
||||
if(uncommittedBytes->get() >= SERVER_KNOBS->LARGE_TLOG_COMMIT_BYTES)
|
||||
commitTimeout = Void();
|
||||
if(!copyComplete.isSet())
|
||||
copyComplete.send(Void());
|
||||
}
|
||||
when(Void _ = wait(recoveryDone)) { break; }
|
||||
when(Void _ = wait(commitTimeout)) {
|
||||
TEST(true); // We need to commit occasionally if this process is long to avoid running out of memory.
|
||||
// We let one, but not more, commits pipeline with the network transfer
|
||||
Void __ = wait( committing );
|
||||
Void __ = wait( self->updatePersist );
|
||||
committing = self->persistentData->commit();
|
||||
commitTimeout = delay(SERVER_KNOBS->LONG_TLOG_COMMIT_TIME);
|
||||
uncommittedBytes->set(0);
|
||||
//TraceEvent("TLogCommitRecoveryData", self->dbgid).detail("MemoryUsage", DEBUG_DETERMINISM ? 0 : getMemoryUsage());
|
||||
}
|
||||
when(Void _ = wait(uncommittedBytes->onChange())) {
|
||||
if(uncommittedBytes->get() >= SERVER_KNOBS->LARGE_TLOG_COMMIT_BYTES)
|
||||
commitTimeout = Void();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Void _ = wait( committing );
|
||||
Void _ = wait( self->updatePersist );
|
||||
Void _ = wait( self->persistentData->commit() );
|
||||
|
||||
TraceEvent("TLogRecoveryComplete", logData->logId).detail("Locality", self->dbInfo->get().myLocality.toString());
|
||||
TEST(true); // tLog restore from old log system completed
|
||||
|
||||
return Void();
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("TLogRecoveryError", logData->logId).error(e,true);
|
||||
if(!copyComplete.isSet())
|
||||
copyComplete.sendError(worker_removed());
|
||||
throw;
|
||||
}
|
||||
|
||||
Void _ = wait( committing );
|
||||
Void _ = wait( self->updatePersist );
|
||||
Void _ = wait( self->persistentData->commit() );
|
||||
|
||||
TraceEvent("TLogRecoveryComplete", self->dbgid).detail("Locality", self->dbInfo->get().myLocality.toString());
|
||||
TEST(true); // tLog restore from old log system completed
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, LocalityData locality ) {
|
||||
state Future<Void> recovery = Void();
|
||||
state TLogInterface recruited;
|
||||
recruited.locality = locality;
|
||||
recruited.initEndpoints();
|
||||
|
@ -1816,6 +1852,7 @@ ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, Localit
|
|||
}
|
||||
}
|
||||
it.second->stopped = true;
|
||||
it.second->recovery = Void();
|
||||
}
|
||||
|
||||
state Reference<LogData> logData = Reference<LogData>( new LogData(self, recruited, req.remoteTag) );
|
||||
|
@ -1843,8 +1880,9 @@ ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, Localit
|
|||
|
||||
state Promise<Void> copyComplete;
|
||||
TraceEvent("TLogRecover", self->dbgid).detail("logId", logData->logId).detail("at", req.recoverAt).detail("known", req.knownCommittedVersion).detail("tags", describe(req.recoverTags));
|
||||
recovery = recoverFromLogSystem( self, logData, req.recoverFrom, req.recoverAt, req.knownCommittedVersion, req.recoverTags, copyComplete );
|
||||
Void _ = wait(recovery); //FIXME
|
||||
|
||||
logData->recovery = recoverFromLogSystem( self, logData, req.recoverFrom, req.recoverAt, req.knownCommittedVersion, req.recoverTags, copyComplete );
|
||||
Void _ = wait(logData->recovery); //FIXME
|
||||
Void _ = wait(copyComplete.getFuture() || logData->removed );
|
||||
} else {
|
||||
// Brand new tlog, initialization has already been done by caller
|
||||
|
@ -1869,7 +1907,7 @@ ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, Localit
|
|||
|
||||
req.reply.send( recruited );
|
||||
|
||||
Void _ = wait( tLogCore( self, logData, recovery ) );
|
||||
Void _ = wait( tLogCore( self, logData ) );
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -1906,11 +1944,13 @@ ACTOR Future<Void> tLog( IKeyValueStore* persistentData, IDiskQueue* persistentQ
|
|||
}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
TraceEvent("TLogError", tlogId).error(e);
|
||||
if(e.code() != error_code_actor_cancelled) {
|
||||
while(!tlogRequests.isEmpty()) {
|
||||
tlogRequests.getFuture().pop().reply.sendError(e);
|
||||
}
|
||||
TraceEvent("TLogError", tlogId).error(e, true);
|
||||
while(!tlogRequests.isEmpty()) {
|
||||
tlogRequests.getFuture().pop().reply.sendError(recruitment_failed());
|
||||
}
|
||||
|
||||
for( auto& it : self.id_data ) {
|
||||
it.second->recovery = Void();
|
||||
}
|
||||
|
||||
if (tlogTerminated( &self, persistentData, self.persistentQueue, e )) {
|
||||
|
|
|
@ -83,11 +83,11 @@ struct TesterInterface {
|
|||
}
|
||||
};
|
||||
|
||||
Future<Void> testerServerCore( TesterInterface const& interf, Reference<ClusterConnectionFile> const& ccf, Reference<AsyncVar<struct ServerDBInfo>> const& );
|
||||
Future<Void> testerServerCore( TesterInterface const& interf, Reference<ClusterConnectionFile> const& ccf, Reference<AsyncVar<struct ServerDBInfo>> const&, LocalityData const& );
|
||||
|
||||
enum test_location_t { TEST_HERE, TEST_ON_SERVERS, TEST_ON_TESTERS };
|
||||
enum test_type_t { TEST_TYPE_FROM_FILE, TEST_TYPE_CONSISTENCY_CHECK };
|
||||
|
||||
Future<Void> runTests( Reference<ClusterConnectionFile> const& connFile, test_type_t const& whatToRun, test_location_t const& whereToRun, int const& minTestersExpected, std::string const& fileName = std::string(), StringRef const& startingConfiguration = StringRef() );
|
||||
Future<Void> runTests( Reference<ClusterConnectionFile> const& connFile, test_type_t const& whatToRun, test_location_t const& whereToRun, int const& minTestersExpected, std::string const& fileName = std::string(), StringRef const& startingConfiguration = StringRef(), LocalityData const& locality = LocalityData() );
|
||||
|
||||
#endif
|
||||
|
|
|
@ -309,7 +309,6 @@ Future<Void> debugQueryServer( DebugQueryRequest const& req );
|
|||
Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> const& ccInterface, Reference<ClusterConnectionFile> const&, LocalityData const&, Reference<AsyncVar<ServerDBInfo>> const& dbInfo );
|
||||
Future<Void> resolver( ResolverInterface const& proxy, InitializeResolverRequest const&, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||
Future<Void> logRouter( TLogInterface const& interf, InitializeLogRouterRequest const& req, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||
Future<Void> runMetrics( Future<Database> const& fcx, Key const& metricsPrefix );
|
||||
|
||||
void registerThreadForProfiling();
|
||||
void updateCpuProfiler(ProfilerRequest req);
|
||||
|
|
|
@ -1528,6 +1528,17 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
Future<Optional<Void>> f;
|
||||
|
||||
Standalone<StringRef> machineId(getSharedMemoryMachineId().toString());
|
||||
|
||||
if (!localities.isPresent(LocalityData::keyZoneId))
|
||||
localities.set(LocalityData::keyZoneId, zoneId.present() ? zoneId : machineId);
|
||||
|
||||
if (!localities.isPresent(LocalityData::keyMachineId))
|
||||
localities.set(LocalityData::keyMachineId, machineId);
|
||||
|
||||
if (!localities.isPresent(LocalityData::keyDcId) && dcId.present())
|
||||
localities.set(LocalityData::keyDcId, dcId);
|
||||
|
||||
if (role == Simulation) {
|
||||
TraceEvent("Simulation").detail("TestFile", testFile);
|
||||
|
||||
|
@ -1574,16 +1585,6 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
vector<Future<Void>> actors;
|
||||
actors.push_back( listenError );
|
||||
Standalone<StringRef> machineId(getSharedMemoryMachineId().toString());
|
||||
|
||||
if (!localities.isPresent(LocalityData::keyZoneId))
|
||||
localities.set(LocalityData::keyZoneId, zoneId.present() ? zoneId : machineId);
|
||||
|
||||
if (!localities.isPresent(LocalityData::keyMachineId))
|
||||
localities.set(LocalityData::keyMachineId, machineId);
|
||||
|
||||
if (!localities.isPresent(LocalityData::keyDcId) && dcId.present())
|
||||
localities.set(LocalityData::keyDcId, dcId);
|
||||
|
||||
actors.push_back( fdbd(connectionFile, localities, processClass, dataFolder, dataFolder, storageMemLimit, metricsConnFile, metricsPrefix) );
|
||||
//actors.push_back( recurring( []{}, .001 ) ); // for ASIO latency measurement
|
||||
|
@ -1591,11 +1592,11 @@ int main(int argc, char* argv[]) {
|
|||
f = stopAfter( waitForAll(actors) );
|
||||
g_network->run();
|
||||
} else if (role == MultiTester) {
|
||||
f = stopAfter( runTests( connectionFile, TEST_TYPE_FROM_FILE, testOnServers ? TEST_ON_SERVERS : TEST_ON_TESTERS, minTesterCount, testFile ) );
|
||||
f = stopAfter( runTests( connectionFile, TEST_TYPE_FROM_FILE, testOnServers ? TEST_ON_SERVERS : TEST_ON_TESTERS, minTesterCount, testFile, StringRef(), localities ) );
|
||||
g_network->run();
|
||||
} else if (role == Test || role == ConsistencyCheck) {
|
||||
auto m = startSystemMonitor(dataFolder, zoneId, zoneId);
|
||||
f = stopAfter( runTests( connectionFile, role == ConsistencyCheck ? TEST_TYPE_CONSISTENCY_CHECK : TEST_TYPE_FROM_FILE, TEST_HERE, 1, testFile ) );
|
||||
f = stopAfter( runTests( connectionFile, role == ConsistencyCheck ? TEST_TYPE_CONSISTENCY_CHECK : TEST_TYPE_FROM_FILE, TEST_HERE, 1, testFile, StringRef(), localities ) );
|
||||
g_network->run();
|
||||
} else if (role == CreateTemplateDatabase) {
|
||||
createTemplateDatabase();
|
||||
|
|
|
@ -48,7 +48,6 @@
|
|||
<ClCompile Include="Knobs.cpp" />
|
||||
<ActorCompiler Include="QuietDatabase.actor.cpp" />
|
||||
<ActorCompiler Include="networktest.actor.cpp" />
|
||||
<ActorCompiler Include="MetricLogger.actor.cpp" />
|
||||
<ActorCompiler Include="workloads\SaveAndKill.actor.cpp" />
|
||||
<ActorCompiler Include="Resolver.actor.cpp" />
|
||||
<ActorCompiler Include="LogSystemDiskQueueAdapter.actor.cpp" />
|
||||
|
|
|
@ -24,9 +24,9 @@
|
|||
#include "flow/Trace.h"
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbclient/NativeAPI.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "ConflictSet.h"
|
||||
#include "flow/Notified.h"
|
||||
#include "DataDistribution.h"
|
||||
#include "Knobs.h"
|
||||
#include <iterator>
|
||||
|
@ -495,7 +495,7 @@ ACTOR Future<Standalone<CommitTransactionRef>> provisionalMaster( Reference<Mast
|
|||
}
|
||||
}
|
||||
}
|
||||
when ( ReplyPromise<vector<StorageServerInterface>> req = waitNext( parent->provisionalProxies[0].getKeyServersLocations.getFuture() ) ) {
|
||||
when ( ReplyPromise<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>> req = waitNext( parent->provisionalProxies[0].getKeyServersLocations.getFuture() ) ) {
|
||||
req.send(Never());
|
||||
}
|
||||
when ( Void _ = wait( waitFailure ) ) { throw worker_removed(); }
|
||||
|
|
|
@ -29,12 +29,12 @@
|
|||
#include "fdbclient/KeyRangeMap.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/NativeAPI.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "fdbclient/MasterProxyInterface.h"
|
||||
#include "fdbclient/DatabaseContext.h"
|
||||
#include "WorkerInterface.h"
|
||||
#include "TLogInterface.h"
|
||||
#include "MoveKeys.h"
|
||||
#include "flow/Notified.h"
|
||||
#include "Knobs.h"
|
||||
#include "WaitFailure.h"
|
||||
#include "IKeyValueStore.h"
|
||||
|
|
|
@ -484,7 +484,7 @@ ACTOR Future<Void> runWorkloadAsync( Database cx, WorkloadInterface workIface, T
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> testerServerWorkload( WorkloadRequest work, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<struct ServerDBInfo>> dbInfo ) {
|
||||
ACTOR Future<Void> testerServerWorkload( WorkloadRequest work, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<struct ServerDBInfo>> dbInfo, LocalityData locality ) {
|
||||
state WorkloadInterface workIface;
|
||||
state bool replied = false;
|
||||
state Database cx;
|
||||
|
@ -501,7 +501,7 @@ ACTOR Future<Void> testerServerWorkload( WorkloadRequest work, Reference<Cluster
|
|||
|
||||
if( database.size() ) {
|
||||
Reference<Cluster> cluster = Cluster::createCluster(ccf->getFilename(), -1);
|
||||
Database _cx = wait(cluster->createDatabase(database));
|
||||
Database _cx = wait(cluster->createDatabase(database, locality));
|
||||
cx = _cx;
|
||||
|
||||
Void _ = wait( delay(1.0) );
|
||||
|
@ -544,7 +544,7 @@ ACTOR Future<Void> testerServerWorkload( WorkloadRequest work, Reference<Cluster
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> testerServerCore( TesterInterface interf, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<struct ServerDBInfo>> dbInfo ) {
|
||||
ACTOR Future<Void> testerServerCore( TesterInterface interf, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<struct ServerDBInfo>> dbInfo, LocalityData locality ) {
|
||||
state PromiseStream<Future<Void>> addWorkload;
|
||||
state Future<Void> workerFatalError = actorCollection(addWorkload.getFuture());
|
||||
|
||||
|
@ -552,7 +552,7 @@ ACTOR Future<Void> testerServerCore( TesterInterface interf, Reference<ClusterCo
|
|||
loop choose {
|
||||
when (Void _ = wait(workerFatalError)) {}
|
||||
when (WorkloadRequest work = waitNext( interf.recruitments.getFuture() )) {
|
||||
addWorkload.send(testerServerWorkload(work, ccf, dbInfo));
|
||||
addWorkload.send(testerServerWorkload(work, ccf, dbInfo, locality));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -985,7 +985,7 @@ vector<TestSpec> readTests( ifstream& ifs ) {
|
|||
return result;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> runTests( Reference<AsyncVar<Optional<struct ClusterControllerFullInterface>>> cc, Reference<AsyncVar<Optional<struct ClusterInterface>>> ci, vector< TesterInterface > testers, vector<TestSpec> tests, StringRef startingConfiguration ) {
|
||||
ACTOR Future<Void> runTests( Reference<AsyncVar<Optional<struct ClusterControllerFullInterface>>> cc, Reference<AsyncVar<Optional<struct ClusterInterface>>> ci, vector< TesterInterface > testers, vector<TestSpec> tests, StringRef startingConfiguration, LocalityData locality ) {
|
||||
state Standalone<StringRef> database = LiteralStringRef("DB");
|
||||
state Database cx;
|
||||
state Reference<AsyncVar<ServerDBInfo>> dbInfo( new AsyncVar<ServerDBInfo> );
|
||||
|
@ -1016,7 +1016,7 @@ ACTOR Future<Void> runTests( Reference<AsyncVar<Optional<struct ClusterControlle
|
|||
databasePingDelay = 0.0;
|
||||
|
||||
if (useDB) {
|
||||
Database _cx = wait( DatabaseContext::createDatabase( ci, Reference<Cluster>(), database, LocalityData() ) ); // FIXME: Locality!
|
||||
Database _cx = wait( DatabaseContext::createDatabase( ci, Reference<Cluster>(), database, locality ) );
|
||||
cx = _cx;
|
||||
} else
|
||||
database = LiteralStringRef("");
|
||||
|
@ -1071,7 +1071,7 @@ ACTOR Future<Void> runTests( Reference<AsyncVar<Optional<struct ClusterControlle
|
|||
|
||||
ACTOR Future<Void> runTests( Reference<AsyncVar<Optional<struct ClusterControllerFullInterface>>> cc,
|
||||
Reference<AsyncVar<Optional<struct ClusterInterface>>> ci, vector<TestSpec> tests, test_location_t at,
|
||||
int minTestersExpected, StringRef startingConfiguration ) {
|
||||
int minTestersExpected, StringRef startingConfiguration, LocalityData locality ) {
|
||||
state int flags = at == TEST_ON_SERVERS ? 0 : GetWorkersRequest::FLAG_TESTER_CLASS;
|
||||
state Future<Void> testerTimeout = delay(60.0); // wait 60 sec for testers to show up
|
||||
state vector<std::pair<WorkerInterface, ProcessClass>> workers;
|
||||
|
@ -1097,12 +1097,12 @@ ACTOR Future<Void> runTests( Reference<AsyncVar<Optional<struct ClusterControlle
|
|||
for(int i=0; i<workers.size(); i++)
|
||||
ts.push_back(workers[i].first.testerInterface);
|
||||
|
||||
Void _ = wait( runTests( cc, ci, ts, tests, startingConfiguration) );
|
||||
Void _ = wait( runTests( cc, ci, ts, tests, startingConfiguration, locality) );
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> runTests( Reference<ClusterConnectionFile> connFile, test_type_t whatToRun, test_location_t at,
|
||||
int minTestersExpected, std::string fileName, StringRef startingConfiguration ) {
|
||||
int minTestersExpected, std::string fileName, StringRef startingConfiguration, LocalityData locality ) {
|
||||
state vector<TestSpec> testSpecs;
|
||||
Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> cc( new AsyncVar<Optional<ClusterControllerFullInterface>> );
|
||||
Reference<AsyncVar<Optional<ClusterInterface>>> ci( new AsyncVar<Optional<ClusterInterface>> );
|
||||
|
@ -1147,10 +1147,10 @@ ACTOR Future<Void> runTests( Reference<ClusterConnectionFile> connFile, test_typ
|
|||
Reference<AsyncVar<ServerDBInfo>> db( new AsyncVar<ServerDBInfo> );
|
||||
vector<TesterInterface> iTesters(1);
|
||||
actors.push_back( reportErrors(monitorServerDBInfo( cc, Reference<ClusterConnectionFile>(), LocalityData(), db ), "monitorServerDBInfo") ); // FIXME: Locality
|
||||
actors.push_back( reportErrors(testerServerCore( iTesters[0], connFile, db ), "testerServerCore") );
|
||||
tests = runTests( cc, ci, iTesters, testSpecs, startingConfiguration );
|
||||
actors.push_back( reportErrors(testerServerCore( iTesters[0], connFile, db, locality ), "testerServerCore") );
|
||||
tests = runTests( cc, ci, iTesters, testSpecs, startingConfiguration, locality );
|
||||
} else {
|
||||
tests = reportErrors(runTests(cc, ci, testSpecs, at, minTestersExpected, startingConfiguration), "runTests");
|
||||
tests = reportErrors(runTests(cc, ci, testSpecs, at, minTestersExpected, startingConfiguration, locality), "runTests");
|
||||
}
|
||||
|
||||
choose {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "flow/TDMetric.actor.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "fdbclient/NativeAPI.h"
|
||||
#include "fdbclient/MetricLogger.h"
|
||||
#include "WorkerInterface.h"
|
||||
#include "IKeyValueStore.h"
|
||||
#include "WaitFailure.h"
|
||||
|
@ -184,7 +185,7 @@ std::string filenameFromSample( KeyValueStoreType storeType, std::string folder,
|
|||
if( storeType == KeyValueStoreType::SSD_BTREE_V1 )
|
||||
return joinPath( folder, sample_filename );
|
||||
else if ( storeType == KeyValueStoreType::SSD_BTREE_V2 )
|
||||
return joinPath(folder, sample_filename);
|
||||
return joinPath(folder, sample_filename);
|
||||
else if( storeType == KeyValueStoreType::MEMORY )
|
||||
return joinPath( folder, sample_filename.substr(0, sample_filename.size() - 5) );
|
||||
|
||||
|
@ -195,7 +196,7 @@ std::string filenameFromId( KeyValueStoreType storeType, std::string folder, std
|
|||
if( storeType == KeyValueStoreType::SSD_BTREE_V1)
|
||||
return joinPath( folder, prefix + id.toString() + ".fdb" );
|
||||
else if (storeType == KeyValueStoreType::SSD_BTREE_V2)
|
||||
return joinPath(folder, prefix + id.toString() + ".sqlite");
|
||||
return joinPath(folder, prefix + id.toString() + ".sqlite");
|
||||
else if( storeType == KeyValueStoreType::MEMORY )
|
||||
return joinPath( folder, prefix + id.toString() + "-" );
|
||||
|
||||
|
@ -355,6 +356,7 @@ void startRole(UID roleId, UID workerId, std::string as, std::map<std::string, s
|
|||
g_roles.insert({as, roleId.shortString()});
|
||||
StringMetricHandle(LiteralStringRef("Roles")) = roleString(g_roles, false);
|
||||
StringMetricHandle(LiteralStringRef("RolesWithIDs")) = roleString(g_roles, true);
|
||||
if (g_network->isSimulated()) g_simulator.addRole(g_network->getLocalAddress(), as);
|
||||
}
|
||||
|
||||
void endRole(UID id, std::string as, std::string reason, bool ok, Error e) {
|
||||
|
@ -386,6 +388,7 @@ void endRole(UID id, std::string as, std::string reason, bool ok, Error e) {
|
|||
g_roles.erase({as, id.shortString()});
|
||||
StringMetricHandle(LiteralStringRef("Roles")) = roleString(g_roles, false);
|
||||
StringMetricHandle(LiteralStringRef("RolesWithIDs")) = roleString(g_roles, true);
|
||||
if (g_network->isSimulated()) g_simulator.removeRole(g_network->getLocalAddress(), as);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface, Reference<ClusterConnectionFile> connFile, LocalityData locality, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
|
@ -509,7 +512,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
if( metricsConnFile.size() > 0) {
|
||||
try {
|
||||
state Reference<Cluster> cluster = Cluster::createCluster( metricsConnFile, Cluster::API_VERSION_LATEST );
|
||||
metricsLogger = runMetrics( cluster->createDatabase(LiteralStringRef("DB")), KeyRef(metricsPrefix) );
|
||||
metricsLogger = runMetrics( cluster->createDatabase(LiteralStringRef("DB"), locality), KeyRef(metricsPrefix) );
|
||||
} catch(Error &e) {
|
||||
TraceEvent(SevWarnAlways, "TDMetricsBadClusterFile").error(e).detail("ConnFile", metricsConnFile);
|
||||
}
|
||||
|
@ -523,7 +526,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
errorForwarders.add( registrationClient( ccInterface, interf, processClass ) );
|
||||
errorForwarders.add( waitFailureServer( interf.waitFailure.getFuture() ) );
|
||||
errorForwarders.add( monitorServerDBInfo( ccInterface, connFile, locality, dbInfo ) );
|
||||
errorForwarders.add( testerServerCore( interf.testerInterface, connFile, dbInfo ) );
|
||||
errorForwarders.add( testerServerCore( interf.testerInterface, connFile, dbInfo, locality ) );
|
||||
|
||||
filesClosed.add(stopping.getFuture());
|
||||
|
||||
|
@ -620,7 +623,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
Reference<IAsyncFile> checkFile = wait( IAsyncFileSystem::filesystem()->open( joinPath(folder, validationFilename), IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_READWRITE, 0600 ) );
|
||||
Void _ = wait( checkFile->sync() );
|
||||
}
|
||||
|
||||
|
||||
if(g_network->isSimulated()) {
|
||||
TraceEvent("SimulatedReboot").detail("Deletion", rebootReq.deleteData );
|
||||
if( rebootReq.deleteData ) {
|
||||
|
@ -659,7 +662,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
std::map<std::string, std::string> details;
|
||||
details["ForMaster"] = req.recruitmentID.shortString();
|
||||
details["StorageEngine"] = req.storeType.toString();
|
||||
|
||||
|
||||
//FIXME: start role for every tlog instance, rather that just for the shared actor, also use a different role type for the shared actor
|
||||
startRole( logId, interf.id(), "SharedTLog", details );
|
||||
|
||||
|
|
|
@ -132,11 +132,12 @@ public:
|
|||
}
|
||||
|
||||
ACTOR Future<Void> performSetup(Database cx, ApiCorrectnessWorkload *self) {
|
||||
//Choose a random transaction type (NativeAPI, ReadYourWrites, ThreadSafe)
|
||||
//Choose a random transaction type (NativeAPI, ReadYourWrites, ThreadSafe, MultiVersion)
|
||||
std::vector<TransactionType> types;
|
||||
types.push_back(NATIVE);
|
||||
types.push_back(READ_YOUR_WRITES);
|
||||
types.push_back(THREAD_SAFE);
|
||||
types.push_back(MULTI_VERSION);
|
||||
|
||||
Void _ = wait(self->chooseTransactionFactory(cx, types));
|
||||
|
||||
|
|
|
@ -171,6 +171,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
state DatabaseConfiguration configuration;
|
||||
|
||||
state Transaction tr(cx);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
loop {
|
||||
try {
|
||||
Standalone<RangeResultRef> res = wait( tr.getRange(configKeys, 1000) );
|
||||
|
@ -247,15 +248,15 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
}
|
||||
|
||||
//Get a list of key servers; verify that the TLogs and master all agree about who the key servers are
|
||||
state Promise<vector<StorageServerInterface>> keyServerPromise;
|
||||
state Promise<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>> keyServerPromise;
|
||||
bool keyServerResult = wait(self->getKeyServers(cx, self, keyServerPromise));
|
||||
if(keyServerResult)
|
||||
{
|
||||
state vector<StorageServerInterface> storageServers = keyServerPromise.getFuture().get();
|
||||
state vector<pair<KeyRangeRef, vector<StorageServerInterface>>> keyServers = keyServerPromise.getFuture().get();
|
||||
|
||||
//Get the locations of all the shards in the database
|
||||
state Promise<Standalone<VectorRef<KeyValueRef>>> keyLocationPromise;
|
||||
bool keyLocationResult = wait(self->getKeyLocations(cx, storageServers, self, keyLocationPromise));
|
||||
bool keyLocationResult = wait(self->getKeyLocations(cx, keyServers, self, keyLocationPromise));
|
||||
if(keyLocationResult)
|
||||
{
|
||||
state Standalone<VectorRef<KeyValueRef>> keyLocations = keyLocationPromise.getFuture().get();
|
||||
|
@ -268,7 +269,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
catch(Error &e)
|
||||
{
|
||||
if(e.code() == error_code_past_version || e.code() == error_code_future_version || e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed || e.code() == error_code_server_request_queue_full)
|
||||
TraceEvent("ConsistencyCheck_Retry").error(e);
|
||||
TraceEvent("ConsistencyCheck_Retry").error(e); // FIXME: consistency check does not retry in this case
|
||||
else
|
||||
self->testFailure(format("Error %d - %s", e.code(), e.what()));
|
||||
}
|
||||
|
@ -285,6 +286,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
loop
|
||||
{
|
||||
state Transaction tr(cx);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
try
|
||||
{
|
||||
Version version = wait(tr.getReadVersion());
|
||||
|
@ -300,18 +302,18 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
//Get a list of storage servers from the master and compares them with the TLogs.
|
||||
//If this is a quiescent check, then each master proxy needs to respond, otherwise only one needs to respond.
|
||||
//Returns false if there is a failure (in this case, keyServersPromise will never be set)
|
||||
ACTOR Future<bool> getKeyServers(Database cx, ConsistencyCheckWorkload *self, Promise<vector<StorageServerInterface>> keyServersPromise)
|
||||
ACTOR Future<bool> getKeyServers(Database cx, ConsistencyCheckWorkload *self, Promise<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>> keyServersPromise)
|
||||
{
|
||||
state vector<StorageServerInterface> keyServers;
|
||||
state vector<pair<KeyRangeRef, vector<StorageServerInterface>>> keyServers;
|
||||
|
||||
loop
|
||||
{
|
||||
state Reference<ProxyInfo> proxyInfo = wait(cx->getMasterProxiesFuture());
|
||||
|
||||
//Try getting key server locations from the master proxies
|
||||
state vector<Future<ErrorOr<vector<StorageServerInterface>>>> keyServerLocationFutures;
|
||||
state vector<Future<ErrorOr<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>>>> keyServerLocationFutures;
|
||||
for(int i = 0; i < proxyInfo->size(); i++)
|
||||
keyServerLocationFutures.push_back(proxyInfo->get(i,&MasterProxyInterface::getKeyServersLocations).getReplyUnlessFailedFor(ReplyPromise<vector<StorageServerInterface>>(), 2, 0));
|
||||
keyServerLocationFutures.push_back(proxyInfo->get(i,&MasterProxyInterface::getKeyServersLocations).getReplyUnlessFailedFor(ReplyPromise<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>>(), 2, 0));
|
||||
|
||||
choose {
|
||||
when( Void _ = wait(waitForAll(keyServerLocationFutures)) ) {
|
||||
|
@ -320,21 +322,21 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
state bool successful = true;
|
||||
for(int i = 0; i < keyServerLocationFutures.size(); i++)
|
||||
{
|
||||
ErrorOr<vector<StorageServerInterface>> interfaces = keyServerLocationFutures[i].get();
|
||||
ErrorOr<vector<pair<KeyRangeRef, vector<StorageServerInterface>>>> shards = keyServerLocationFutures[i].get();
|
||||
|
||||
//If performing quiescent check, then all master proxies should be reachable. Otherwise, only one needs to be reachable
|
||||
if(self->performQuiescentChecks && !interfaces.present())
|
||||
if(self->performQuiescentChecks && !shards.present())
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_MasterProxyUnavailable").detail("MasterProxyID", proxyInfo->getId(i));
|
||||
self->testFailure("Master proxy unavailable");
|
||||
return false;
|
||||
}
|
||||
|
||||
//Get the list of interfaces if one was returned. If not doing a quiescent check, we can break if it is.
|
||||
//If we are doing a quiescent check, then we only need to do this for the first interface.
|
||||
if(interfaces.present() && (i == 0 || !self->performQuiescentChecks))
|
||||
//Get the list of shards if one was returned. If not doing a quiescent check, we can break if it is.
|
||||
//If we are doing a quiescent check, then we only need to do this for the first shard.
|
||||
if(shards.present() && (i == 0 || !self->performQuiescentChecks))
|
||||
{
|
||||
keyServers = interfaces.get();
|
||||
keyServers = shards.get();
|
||||
if(!self->performQuiescentChecks)
|
||||
break;
|
||||
}
|
||||
|
@ -364,96 +366,108 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
|
||||
//Retrieves the locations of all shards in the database
|
||||
//Returns false if there is a failure (in this case, keyLocationPromise will never be set)
|
||||
ACTOR Future<bool> getKeyLocations(Database cx, vector<StorageServerInterface> storageServers, ConsistencyCheckWorkload *self, Promise<Standalone<VectorRef<KeyValueRef>>> keyLocationPromise)
|
||||
ACTOR Future<bool> getKeyLocations(Database cx, vector<pair<KeyRangeRef, vector<StorageServerInterface>>> shards, ConsistencyCheckWorkload *self, Promise<Standalone<VectorRef<KeyValueRef>>> keyLocationPromise)
|
||||
{
|
||||
state Standalone<VectorRef<KeyValueRef>> keyLocations;
|
||||
state Key beginKey = allKeys.begin;
|
||||
state int i = 0;
|
||||
|
||||
//If the responses are too big, we may use multiple requests to get the key locations. Each request begins where the last left off
|
||||
while(beginKey < allKeys.end)
|
||||
for ( ; i < shards.size(); i++)
|
||||
{
|
||||
try
|
||||
// skip serverList shards
|
||||
if (!shards[i].first.begin.startsWith(keyServersPrefix)) {
|
||||
break;
|
||||
}
|
||||
|
||||
state Key endKey = shards[i].first.end.startsWith(keyServersPrefix) ? shards[i].first.end.removePrefix(keyServersPrefix) : allKeys.end;
|
||||
|
||||
while(beginKey < endKey)
|
||||
{
|
||||
Version version = wait(self->getVersion(cx, self));
|
||||
|
||||
GetKeyValuesRequest req;
|
||||
Key prefixBegin = beginKey.withPrefix(keyServersPrefix);
|
||||
req.begin = firstGreaterOrEqual(prefixBegin);
|
||||
req.end = firstGreaterOrEqual(keyServersEnd);
|
||||
req.limit = SERVER_KNOBS->MOVE_KEYS_KRM_LIMIT;
|
||||
req.limitBytes = SERVER_KNOBS->MOVE_KEYS_KRM_LIMIT_BYTES;
|
||||
req.version = version;
|
||||
|
||||
//Try getting the shard locations from the key servers
|
||||
state vector<Future<ErrorOr<GetKeyValuesReply>>> keyValueFutures;
|
||||
for(int i = 0; i < storageServers.size(); i++)
|
||||
try
|
||||
{
|
||||
resetReply(req);
|
||||
keyValueFutures.push_back(storageServers[i].getKeyValues.getReplyUnlessFailedFor(req, 2, 0));
|
||||
}
|
||||
Version version = wait(self->getVersion(cx, self));
|
||||
|
||||
Void _ = wait(waitForAll(keyValueFutures));
|
||||
GetKeyValuesRequest req;
|
||||
Key prefixBegin = beginKey.withPrefix(keyServersPrefix);
|
||||
req.begin = firstGreaterOrEqual(prefixBegin);
|
||||
req.end = firstGreaterOrEqual(keyServersEnd);
|
||||
req.limit = SERVER_KNOBS->MOVE_KEYS_KRM_LIMIT;
|
||||
req.limitBytes = SERVER_KNOBS->MOVE_KEYS_KRM_LIMIT_BYTES;
|
||||
req.version = version;
|
||||
|
||||
int firstValidStorageServer = -1;
|
||||
|
||||
//Read the shard location results
|
||||
for(int i = 0; i < keyValueFutures.size(); i++)
|
||||
{
|
||||
ErrorOr<GetKeyValuesReply> reply = keyValueFutures[i].get();
|
||||
|
||||
if(!reply.present())
|
||||
//Try getting the shard locations from the key servers
|
||||
state vector<Future<ErrorOr<GetKeyValuesReply>>> keyValueFutures;
|
||||
for(int j = 0; j < shards[i].second.size(); j++)
|
||||
{
|
||||
//If the storage server didn't reply in a quiescent database, then the check fails
|
||||
if(self->performQuiescentChecks)
|
||||
resetReply(req);
|
||||
keyValueFutures.push_back(shards[i].second[j].getKeyValues.getReplyUnlessFailedFor(req, 2, 0));
|
||||
}
|
||||
|
||||
Void _ = wait(waitForAll(keyValueFutures));
|
||||
|
||||
int firstValidStorageServer = -1;
|
||||
|
||||
//Read the shard location results
|
||||
for(int j = 0; j < keyValueFutures.size(); j++)
|
||||
{
|
||||
ErrorOr<GetKeyValuesReply> reply = keyValueFutures[j].get();
|
||||
|
||||
if(!reply.present())
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_KeyServerUnavailable").detail("StorageServer", storageServers[i].id().toString().c_str());
|
||||
self->testFailure("Key server unavailable");
|
||||
return false;
|
||||
//If the storage server didn't reply in a quiescent database, then the check fails
|
||||
if(self->performQuiescentChecks)
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_KeyServerUnavailable").detail("StorageServer", shards[i].second[j].id().toString().c_str());
|
||||
self->testFailure("Key server unavailable");
|
||||
return false;
|
||||
}
|
||||
|
||||
//If no storage servers replied, then throw all_alternatives_failed to force a retry
|
||||
else if(firstValidStorageServer < 0 && j == keyValueFutures.size() - 1)
|
||||
throw all_alternatives_failed();
|
||||
}
|
||||
|
||||
//If no storage servers replied, then throw all_alternatives_failed to force a retry
|
||||
else if(firstValidStorageServer < 0 && i == keyValueFutures.size() - 1)
|
||||
throw all_alternatives_failed();
|
||||
//If this is the first storage server, store the locations to send back to the caller
|
||||
else if(firstValidStorageServer < 0)
|
||||
firstValidStorageServer = j;
|
||||
|
||||
//Otherwise, compare the data to the results from the first storage server. If they are different, then the check fails
|
||||
else if(reply.get().data != keyValueFutures[firstValidStorageServer].get().get().data || reply.get().more != keyValueFutures[firstValidStorageServer].get().get().more)
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_InconsistentKeyServers").detail("StorageServer1", shards[i].second[firstValidStorageServer].id())
|
||||
.detail("StorageServer2", shards[i].second[j].id());
|
||||
self->testFailure("Key servers inconsistent");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
//If this is the first storage server, store the locations to send back to the caller
|
||||
else if(firstValidStorageServer < 0)
|
||||
firstValidStorageServer = i;
|
||||
auto keyValueResponse = keyValueFutures[firstValidStorageServer].get().get();
|
||||
Standalone<RangeResultRef> currentLocations = krmDecodeRanges( keyServersPrefix, KeyRangeRef(beginKey, endKey), RangeResultRef( keyValueResponse.data, keyValueResponse.more) );
|
||||
|
||||
//Otherwise, compare the data to the results from the first storage server. If they are different, then the check fails
|
||||
else if(reply.get().data != keyValueFutures[firstValidStorageServer].get().get().data || reply.get().more != keyValueFutures[firstValidStorageServer].get().get().more)
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_InconsistentKeyServers").detail("StorageServer1", storageServers[firstValidStorageServer].id())
|
||||
.detail("StorageServer2", storageServers[i].id());
|
||||
self->testFailure("Key servers inconsistent");
|
||||
return false;
|
||||
}
|
||||
//Push all but the last item, which will be pushed as the first item next iteration
|
||||
keyLocations.append_deep(keyLocations.arena(), currentLocations.begin(), currentLocations.size() - 1);
|
||||
|
||||
//Next iteration should pick up where we left off
|
||||
ASSERT(currentLocations.size() > 1);
|
||||
beginKey = currentLocations.end()[-1].key;
|
||||
|
||||
//If this is the last iteration, then push the allKeys.end KV pair
|
||||
if(beginKey == allKeys.end)
|
||||
keyLocations.push_back_deep(keyLocations.arena(), currentLocations.end()[-1]);
|
||||
}
|
||||
catch(Error &e)
|
||||
{
|
||||
//If we failed because of a version problem, then retry
|
||||
if(e.code() == error_code_past_version || e.code() == error_code_future_version || e.code() == error_code_past_version)
|
||||
TraceEvent("ConsistencyCheck_RetryGetKeyLocations").error(e);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
||||
auto keyValueResponse = keyValueFutures[firstValidStorageServer].get().get();
|
||||
Standalone<RangeResultRef> currentLocations = krmDecodeRanges( keyServersPrefix, KeyRangeRef(beginKey, allKeys.end), RangeResultRef( keyValueResponse.data, keyValueResponse.more) );
|
||||
|
||||
//Push all but the last item, which will be pushed as the first item next iteration
|
||||
keyLocations.append_deep(keyLocations.arena(), currentLocations.begin(), currentLocations.size() - 1);
|
||||
|
||||
//Next iteration should pick up where we left off
|
||||
ASSERT(currentLocations.size() > 1);
|
||||
beginKey = currentLocations.end()[-1].key;
|
||||
|
||||
//If this is the last iteration, then push the allKeys.end KV pair
|
||||
if(beginKey == allKeys.end)
|
||||
keyLocations.push_back_deep(keyLocations.arena(), currentLocations.end()[-1]);
|
||||
}
|
||||
catch(Error &e)
|
||||
{
|
||||
//If we failed because of a version problem, then retry
|
||||
if(e.code() == error_code_past_version || e.code() == error_code_future_version || e.code() == error_code_past_version)
|
||||
TraceEvent("ConsistencyCheck_RetryGetKeyLocations").error(e);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
keyLocationPromise.send(keyLocations);
|
||||
return true;
|
||||
}
|
||||
|
@ -532,6 +546,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
|
||||
ACTOR Future<int64_t> getDatabaseSize(Database cx) {
|
||||
state Transaction tr( cx );
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
loop {
|
||||
try {
|
||||
StorageMetrics metrics = wait( tr.getStorageMetrics( KeyRangeRef(allKeys.begin, keyServersPrefix), 100000 ) );
|
||||
|
@ -1084,6 +1099,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
if(!statefulProcesses[itr->first.address()].count(id)) {
|
||||
TraceEvent("ConsistencyCheck_ExtraDataStore").detail("Address", itr->first.address()).detail("DataStoreID", id);
|
||||
if(g_network->isSimulated()) {
|
||||
TraceEvent("ConsistencyCheck_RebootProcess").detail("Address", itr->first.address()).detail("DataStoreID", id);
|
||||
g_simulator.rebootProcess(g_simulator.getProcessByAddress(itr->first.address()), ISimulator::RebootProcess);
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue