Merge branch 'master' into qhoang-support-multithreaded-client-mako
This commit is contained in:
commit
9cb28a2bde
|
@ -9,7 +9,7 @@ bindings/java/foundationdb-tests*.jar
|
||||||
bindings/java/fdb-java-*-sources.jar
|
bindings/java/fdb-java-*-sources.jar
|
||||||
packaging/msi/FDBInstaller.msi
|
packaging/msi/FDBInstaller.msi
|
||||||
build/
|
build/
|
||||||
cmake-build-debug/
|
cmake-build-debug*
|
||||||
# Generated source, build, and packaging files
|
# Generated source, build, and packaging files
|
||||||
*.g.cpp
|
*.g.cpp
|
||||||
*.g.h
|
*.g.h
|
||||||
|
@ -63,6 +63,7 @@ packaging/msi/obj
|
||||||
simfdb
|
simfdb
|
||||||
tests/oldBinaries
|
tests/oldBinaries
|
||||||
trace.*.xml
|
trace.*.xml
|
||||||
|
.venv
|
||||||
|
|
||||||
# Editor files
|
# Editor files
|
||||||
*.iml
|
*.iml
|
||||||
|
|
|
@ -562,3 +562,28 @@ folly_memcpy:
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
Arm Limited (optimized-routines)
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 1999-2019, Arm Limited.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
|
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
|
||||||
|
|
||||||
![Build Status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiSFd4OEl4QllpbUQrQ0xyN25ZM1FIM3MyZ0tBV3Y1cm9wU293V2Rlb3Qyei9XMlIrb2Y0YkFqOTBzc2w5ZjZScFdjME9pcGRXTGNRMWkwY2ZPbGMwUUdNPSIsIml2UGFyYW1ldGVyU3BlYyI6IlBqTkJjeCt5QkNuTlBGZEwiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)
|
![Build Status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiZ1FhRlNwU0JXeHVpZkt0a0k0QlNJK3BEUkplTGVRYnk3azBoT1FOazBQbGlIeDgrYmRJZVhuSUI4RTd3RWJWcjVMT3ZPTzV0NXlCTWpPTGlPVlMzckJJPSIsIml2UGFyYW1ldGVyU3BlYyI6IlB0TWVCM0VYdU5PQWtMUFYiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)
|
||||||
|
|
||||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
|
add_subdirectory(c)
|
||||||
if(NOT OPEN_FOR_IDE)
|
if(NOT OPEN_FOR_IDE)
|
||||||
# flow bindings currently doesn't support that
|
# flow bindings currently doesn't support that
|
||||||
add_subdirectory(c)
|
|
||||||
add_subdirectory(flow)
|
add_subdirectory(flow)
|
||||||
endif()
|
endif()
|
||||||
if(WITH_PYTHON_BINDING)
|
if(WITH_PYTHON_BINDING)
|
||||||
|
|
|
@ -39,6 +39,8 @@ else()
|
||||||
strip_debug_symbols(fdb_c)
|
strip_debug_symbols(fdb_c)
|
||||||
endif()
|
endif()
|
||||||
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
|
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
|
||||||
|
add_dependencies(fdbclient fdb_c_options)
|
||||||
|
add_dependencies(fdbclient_sampling fdb_c_options)
|
||||||
target_link_libraries(fdb_c PUBLIC $<BUILD_INTERFACE:fdbclient>)
|
target_link_libraries(fdb_c PUBLIC $<BUILD_INTERFACE:fdbclient>)
|
||||||
if(APPLE)
|
if(APPLE)
|
||||||
set(symbols ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.symbols)
|
set(symbols ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.symbols)
|
||||||
|
@ -80,6 +82,10 @@ if(NOT WIN32)
|
||||||
|
|
||||||
set(UNIT_TEST_VERSION_510_SRCS test/unit/unit_tests_version_510.cpp)
|
set(UNIT_TEST_VERSION_510_SRCS test/unit/unit_tests_version_510.cpp)
|
||||||
set(TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS test/unit/trace_partial_file_suffix_test.cpp)
|
set(TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS test/unit/trace_partial_file_suffix_test.cpp)
|
||||||
|
set(DISCONNECTED_TIMEOUT_UNIT_TEST_SRCS
|
||||||
|
test/unit/disconnected_timeout_tests.cpp
|
||||||
|
test/unit/fdb_api.cpp
|
||||||
|
test/unit/fdb_api.hpp)
|
||||||
|
|
||||||
if(OPEN_FOR_IDE)
|
if(OPEN_FOR_IDE)
|
||||||
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
|
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
|
||||||
|
@ -90,6 +96,7 @@ if(NOT WIN32)
|
||||||
add_library(fdb_c_unit_tests OBJECT ${UNIT_TEST_SRCS})
|
add_library(fdb_c_unit_tests OBJECT ${UNIT_TEST_SRCS})
|
||||||
add_library(fdb_c_unit_tests_version_510 OBJECT ${UNIT_TEST_VERSION_510_SRCS})
|
add_library(fdb_c_unit_tests_version_510 OBJECT ${UNIT_TEST_VERSION_510_SRCS})
|
||||||
add_library(trace_partial_file_suffix_test OBJECT ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
add_library(trace_partial_file_suffix_test OBJECT ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||||
|
add_library(disconnected_timeout_unit_tests OBJECT ${DISCONNECTED_TIMEOUT_UNIT_TEST_SRCS})
|
||||||
else()
|
else()
|
||||||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||||
|
@ -99,6 +106,7 @@ if(NOT WIN32)
|
||||||
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
|
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
|
||||||
add_executable(fdb_c_unit_tests_version_510 ${UNIT_TEST_VERSION_510_SRCS})
|
add_executable(fdb_c_unit_tests_version_510 ${UNIT_TEST_VERSION_510_SRCS})
|
||||||
add_executable(trace_partial_file_suffix_test ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
add_executable(trace_partial_file_suffix_test ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||||
|
add_executable(disconnected_timeout_unit_tests ${DISCONNECTED_TIMEOUT_UNIT_TEST_SRCS})
|
||||||
strip_debug_symbols(fdb_c_performance_test)
|
strip_debug_symbols(fdb_c_performance_test)
|
||||||
strip_debug_symbols(fdb_c_ryw_benchmark)
|
strip_debug_symbols(fdb_c_ryw_benchmark)
|
||||||
strip_debug_symbols(fdb_c_txn_size_test)
|
strip_debug_symbols(fdb_c_txn_size_test)
|
||||||
|
@ -110,13 +118,16 @@ if(NOT WIN32)
|
||||||
add_dependencies(fdb_c_setup_tests doctest)
|
add_dependencies(fdb_c_setup_tests doctest)
|
||||||
add_dependencies(fdb_c_unit_tests doctest)
|
add_dependencies(fdb_c_unit_tests doctest)
|
||||||
add_dependencies(fdb_c_unit_tests_version_510 doctest)
|
add_dependencies(fdb_c_unit_tests_version_510 doctest)
|
||||||
|
add_dependencies(disconnected_timeout_unit_tests doctest)
|
||||||
target_include_directories(fdb_c_setup_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
target_include_directories(fdb_c_setup_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||||
target_include_directories(fdb_c_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
target_include_directories(fdb_c_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||||
target_include_directories(fdb_c_unit_tests_version_510 PUBLIC ${DOCTEST_INCLUDE_DIR})
|
target_include_directories(fdb_c_unit_tests_version_510 PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||||
|
target_include_directories(disconnected_timeout_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||||
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
|
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
|
||||||
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads)
|
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads)
|
||||||
target_link_libraries(fdb_c_unit_tests_version_510 PRIVATE fdb_c Threads::Threads)
|
target_link_libraries(fdb_c_unit_tests_version_510 PRIVATE fdb_c Threads::Threads)
|
||||||
target_link_libraries(trace_partial_file_suffix_test PRIVATE fdb_c Threads::Threads)
|
target_link_libraries(trace_partial_file_suffix_test PRIVATE fdb_c Threads::Threads)
|
||||||
|
target_link_libraries(disconnected_timeout_unit_tests PRIVATE fdb_c Threads::Threads)
|
||||||
|
|
||||||
# do not set RPATH for mako
|
# do not set RPATH for mako
|
||||||
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
|
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
|
||||||
|
@ -130,13 +141,19 @@ if(NOT WIN32)
|
||||||
target_link_libraries(fdb_c90_test PRIVATE fdb_c)
|
target_link_libraries(fdb_c90_test PRIVATE fdb_c)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(OPEN_FOR_IDE)
|
||||||
|
set(FDB_C_TARGET $<TARGET_OBJECTS:fdb_c>)
|
||||||
|
else()
|
||||||
|
set(FDB_C_TARGET $<TARGET_FILE:fdb_c>)
|
||||||
|
endif()
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_c> ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
COMMAND ${CMAKE_COMMAND} -E copy ${FDB_C_TARGET} ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||||
DEPENDS fdb_c
|
DEPENDS fdb_c
|
||||||
COMMENT "Copy libfdb_c to use as external client for test")
|
COMMENT "Copy libfdb_c to use as external client for test")
|
||||||
add_custom_target(external_client DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so)
|
add_custom_target(external_client DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so)
|
||||||
add_dependencies(fdb_c_unit_tests external_client)
|
add_dependencies(fdb_c_unit_tests external_client)
|
||||||
|
add_dependencies(disconnected_timeout_unit_tests external_client)
|
||||||
|
|
||||||
add_fdbclient_test(
|
add_fdbclient_test(
|
||||||
NAME fdb_c_setup_tests
|
NAME fdb_c_setup_tests
|
||||||
|
@ -163,6 +180,17 @@ if(NOT WIN32)
|
||||||
fdb
|
fdb
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||||
)
|
)
|
||||||
|
add_unavailable_fdbclient_test(
|
||||||
|
NAME disconnected_timeout_unit_tests
|
||||||
|
COMMAND $<TARGET_FILE:disconnected_timeout_unit_tests>
|
||||||
|
@CLUSTER_FILE@
|
||||||
|
)
|
||||||
|
add_unavailable_fdbclient_test(
|
||||||
|
NAME disconnected_timeout_external_client_unit_tests
|
||||||
|
COMMAND $<TARGET_FILE:disconnected_timeout_unit_tests>
|
||||||
|
@CLUSTER_FILE@
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||||
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(c_workloads_srcs
|
set(c_workloads_srcs
|
||||||
|
|
|
@ -152,19 +152,18 @@ void* fdb_network_thread(void* args) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int genprefix(char* str, char* prefix, int prefixlen, int prefixpadding, int rows, int len) {
|
int genprefix(char* str, char* prefix, int prefixlen, int prefixpadding, int rows, int len) {
|
||||||
const int rowdigit = digits(rows);
|
const int rowdigit = digits(rows);
|
||||||
const int paddinglen = len - (prefixlen + rowdigit) - 1;
|
const int paddinglen = len - (prefixlen + rowdigit) - 1;
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
if (prefixpadding) {
|
if (prefixpadding) {
|
||||||
memset(str, 'x', paddinglen);
|
memset(str, 'x', paddinglen);
|
||||||
offset += paddinglen;
|
offset += paddinglen;
|
||||||
}
|
}
|
||||||
memcpy(str + offset, prefix, prefixlen);
|
memcpy(str + offset, prefix, prefixlen);
|
||||||
str[len - 1] = '\0';
|
str[len - 1] = '\0';
|
||||||
return offset + prefixlen;
|
return offset + prefixlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* cleanup database */
|
/* cleanup database */
|
||||||
int cleanup(FDBTransaction* transaction, mako_args_t* args) {
|
int cleanup(FDBTransaction* transaction, mako_args_t* args) {
|
||||||
struct timespec timer_start, timer_end;
|
struct timespec timer_start, timer_end;
|
||||||
|
@ -194,13 +193,13 @@ retryTxn:
|
||||||
|
|
||||||
fdb_transaction_clear_range(transaction, (uint8_t*)beginstr, len + 1, (uint8_t*)endstr, len + 1);
|
fdb_transaction_clear_range(transaction, (uint8_t*)beginstr, len + 1, (uint8_t*)endstr, len + 1);
|
||||||
switch (commit_transaction(transaction)) {
|
switch (commit_transaction(transaction)) {
|
||||||
case (FDB_SUCCESS):
|
case (FDB_SUCCESS):
|
||||||
break;
|
break;
|
||||||
case (FDB_ERROR_RETRY):
|
case (FDB_ERROR_RETRY):
|
||||||
fdb_transaction_reset(transaction);
|
fdb_transaction_reset(transaction);
|
||||||
goto retryTxn;
|
goto retryTxn;
|
||||||
default:
|
default:
|
||||||
goto failExit;
|
goto failExit;
|
||||||
}
|
}
|
||||||
|
|
||||||
fdb_transaction_reset(transaction);
|
fdb_transaction_reset(transaction);
|
||||||
|
@ -323,12 +322,12 @@ int populate(FDBTransaction* transaction,
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (commit_transaction(transaction)) {
|
switch (commit_transaction(transaction)) {
|
||||||
case (FDB_SUCCESS):
|
case (FDB_SUCCESS):
|
||||||
break;
|
break;
|
||||||
case (FDB_ERROR_RETRY):
|
case (FDB_ERROR_RETRY):
|
||||||
goto retryTxn;
|
goto retryTxn;
|
||||||
default:
|
default:
|
||||||
goto failExit;
|
goto failExit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* xact latency stats */
|
/* xact latency stats */
|
||||||
|
@ -380,9 +379,14 @@ int populate(FDBTransaction* transaction,
|
||||||
if (stats->xacts % args->sampling == 0) {
|
if (stats->xacts % args->sampling == 0) {
|
||||||
clock_gettime(CLOCK_MONOTONIC, &timer_per_xact_end);
|
clock_gettime(CLOCK_MONOTONIC, &timer_per_xact_end);
|
||||||
update_op_lat_stats(
|
update_op_lat_stats(
|
||||||
&timer_start_commit, &timer_per_xact_end, OP_COMMIT, stats, block, elem_size, is_memory_allocated);
|
&timer_start_commit, &timer_per_xact_end, OP_COMMIT, stats, block, elem_size, is_memory_allocated);
|
||||||
update_op_lat_stats(
|
update_op_lat_stats(&timer_per_xact_start,
|
||||||
&timer_per_xact_start, &timer_per_xact_end, OP_TRANSACTION, stats, block, elem_size, is_memory_allocated);
|
&timer_per_xact_end,
|
||||||
|
OP_TRANSACTION,
|
||||||
|
stats,
|
||||||
|
block,
|
||||||
|
elem_size,
|
||||||
|
is_memory_allocated);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,7 +592,13 @@ retryTxn:
|
||||||
if (keyend > args->rows - 1) {
|
if (keyend > args->rows - 1) {
|
||||||
keyend = args->rows - 1;
|
keyend = args->rows - 1;
|
||||||
}
|
}
|
||||||
genkey(keystr2, KEYPREFIX, KEYPREFIXLEN, args->prefixpadding, keyend, args->rows, args->key_length + 1);
|
genkey(keystr2,
|
||||||
|
KEYPREFIX,
|
||||||
|
KEYPREFIXLEN,
|
||||||
|
args->prefixpadding,
|
||||||
|
keyend,
|
||||||
|
args->rows,
|
||||||
|
args->key_length + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stats->xacts % args->sampling == 0) {
|
if (stats->xacts % args->sampling == 0) {
|
||||||
|
@ -1243,7 +1253,8 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
|
||||||
|
|
||||||
/* Set client Log group */
|
/* Set client Log group */
|
||||||
if (strlen(args->log_group) != 0) {
|
if (strlen(args->log_group) != 0) {
|
||||||
err = fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP, (uint8_t*)args->log_group, strlen(args->log_group));
|
err =
|
||||||
|
fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP, (uint8_t*)args->log_group, strlen(args->log_group));
|
||||||
if (err) {
|
if (err) {
|
||||||
fprintf(stderr, "ERROR: fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP): %s\n", fdb_get_error(err));
|
fprintf(stderr, "ERROR: fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP): %s\n", fdb_get_error(err));
|
||||||
}
|
}
|
||||||
|
@ -1332,7 +1343,9 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
|
||||||
fdb_create_database(args->cluster_files[cluster_index], &process.databases[i]);
|
fdb_create_database(args->cluster_files[cluster_index], &process.databases[i]);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
if (args->disable_ryw) {
|
||||||
|
fdb_database_set_option(process.database, FDB_DB_OPTION_SNAPSHOT_RYW_DISABLE, (uint8_t*)NULL, 0);
|
||||||
|
}
|
||||||
fprintf(debugme, "DEBUG: creating %d worker threads\n", args->num_threads);
|
fprintf(debugme, "DEBUG: creating %d worker threads\n", args->num_threads);
|
||||||
worker_threads = (pthread_t*)calloc(sizeof(pthread_t), args->num_threads);
|
worker_threads = (pthread_t*)calloc(sizeof(pthread_t), args->num_threads);
|
||||||
if (!worker_threads) {
|
if (!worker_threads) {
|
||||||
|
@ -1453,7 +1466,7 @@ int init_args(mako_args_t* args) {
|
||||||
for (i = 0; i < MAX_OP; i++) {
|
for (i = 0; i < MAX_OP; i++) {
|
||||||
args->txnspec.ops[i][OP_COUNT] = 0;
|
args->txnspec.ops[i][OP_COUNT] = 0;
|
||||||
}
|
}
|
||||||
args->client_threads_per_version = 0;
|
args->disable_ryw = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1619,7 +1632,7 @@ void usage() {
|
||||||
printf("%-24s %s\n", " --knobs=KNOBS", "Set client knobs");
|
printf("%-24s %s\n", " --knobs=KNOBS", "Set client knobs");
|
||||||
printf("%-24s %s\n", " --flatbuffers", "Use flatbuffers");
|
printf("%-24s %s\n", " --flatbuffers", "Use flatbuffers");
|
||||||
printf("%-24s %s\n", " --streaming", "Streaming mode: all (default), iterator, small, medium, large, serial");
|
printf("%-24s %s\n", " --streaming", "Streaming mode: all (default), iterator, small, medium, large, serial");
|
||||||
printf("%-24s %s\n", " --client_threads_per_version", "Spawns multiple worker threads for each version of the client that is loaded. Setting this to a number greater than one implies disable_local_client.");
|
printf("%-24s %s\n", " --disable_ryw", "Disable snapshot read-your-writes");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* parse benchmark paramters */
|
/* parse benchmark paramters */
|
||||||
|
@ -1667,6 +1680,7 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
|
||||||
{ "txntagging_prefix", required_argument, NULL, ARG_TXNTAGGINGPREFIX },
|
{ "txntagging_prefix", required_argument, NULL, ARG_TXNTAGGINGPREFIX },
|
||||||
{ "version", no_argument, NULL, ARG_VERSION },
|
{ "version", no_argument, NULL, ARG_VERSION },
|
||||||
{ "client_threads_per_version", required_argument, NULL, ARG_CLIENT_THREADS_PER_VERSION },
|
{ "client_threads_per_version", required_argument, NULL, ARG_CLIENT_THREADS_PER_VERSION },
|
||||||
|
{ "disable_ryw", no_argument, NULL, ARG_DISABLE_RYW },
|
||||||
{ NULL, 0, NULL, 0 }
|
{ NULL, 0, NULL, 0 }
|
||||||
};
|
};
|
||||||
idx = 0;
|
idx = 0;
|
||||||
|
@ -1818,18 +1832,19 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
|
||||||
args->txntagging = 1000;
|
args->txntagging = 1000;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ARG_TXNTAGGINGPREFIX: {
|
case ARG_TXNTAGGINGPREFIX:
|
||||||
if (strlen(optarg) > TAGPREFIXLENGTH_MAX) {
|
if (strlen(optarg) > TAGPREFIXLENGTH_MAX) {
|
||||||
fprintf(stderr, "Error: the length of txntagging_prefix is larger than %d\n", TAGPREFIXLENGTH_MAX);
|
fprintf(stderr, "Error: the length of txntagging_prefix is larger than %d\n", TAGPREFIXLENGTH_MAX);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
memcpy(args->txntagging_prefix, optarg, strlen(optarg));
|
memcpy(args->txntagging_prefix, optarg, strlen(optarg));
|
||||||
break;
|
break;
|
||||||
}
|
case ARG_CLIENT_THREADS_PER_VERSION:
|
||||||
case ARG_CLIENT_THREADS_PER_VERSION: {
|
|
||||||
args->client_threads_per_version = atoi(optarg);
|
args->client_threads_per_version = atoi(optarg);
|
||||||
break;
|
break;
|
||||||
}
|
case ARG_DISABLE_RYW:
|
||||||
|
args->disable_ryw = 1;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,8 @@ enum Arguments {
|
||||||
ARG_TXNTAGGING,
|
ARG_TXNTAGGING,
|
||||||
ARG_TXNTAGGINGPREFIX,
|
ARG_TXNTAGGINGPREFIX,
|
||||||
ARG_STREAMING_MODE,
|
ARG_STREAMING_MODE,
|
||||||
ARG_CLIENT_THREADS_PER_VERSION
|
ARG_CLIENT_THREADS_PER_VERSION,
|
||||||
|
ARG_DISABLE_RYW
|
||||||
};
|
};
|
||||||
|
|
||||||
enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE };
|
enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE };
|
||||||
|
@ -140,6 +141,7 @@ typedef struct {
|
||||||
char txntagging_prefix[TAGPREFIXLENGTH_MAX];
|
char txntagging_prefix[TAGPREFIXLENGTH_MAX];
|
||||||
FDBStreamingMode streaming_mode;
|
FDBStreamingMode streaming_mode;
|
||||||
int64_t client_threads_per_version;
|
int64_t client_threads_per_version;
|
||||||
|
int disable_ryw;
|
||||||
} mako_args_t;
|
} mako_args_t;
|
||||||
|
|
||||||
/* shared memory */
|
/* shared memory */
|
||||||
|
|
|
@ -71,13 +71,13 @@ int digits(int num) {
|
||||||
/* prefix is "mako" by default, prefixpadding = 1 means 'x' will be in front rather than trailing the keyname */
|
/* prefix is "mako" by default, prefixpadding = 1 means 'x' will be in front rather than trailing the keyname */
|
||||||
/* len is the buffer size, key length + null */
|
/* len is the buffer size, key length + null */
|
||||||
void genkey(char* str, char* prefix, int prefixlen, int prefixpadding, int num, int rows, int len) {
|
void genkey(char* str, char* prefix, int prefixlen, int prefixpadding, int num, int rows, int len) {
|
||||||
const int rowdigit = digits(rows);
|
const int rowdigit = digits(rows);
|
||||||
const int prefixoffset = prefixpadding ? len - (prefixlen + rowdigit) - 1 : 0;
|
const int prefixoffset = prefixpadding ? len - (prefixlen + rowdigit) - 1 : 0;
|
||||||
char* prefixstr = (char*)alloca(sizeof(char) * (prefixlen + rowdigit + 1));
|
char* prefixstr = (char*)alloca(sizeof(char) * (prefixlen + rowdigit + 1));
|
||||||
snprintf(prefixstr, prefixlen + rowdigit + 1, "%s%0.*d", prefix, rowdigit, num);
|
snprintf(prefixstr, prefixlen + rowdigit + 1, "%s%0.*d", prefix, rowdigit, num);
|
||||||
memset(str, 'x', len);
|
memset(str, 'x', len);
|
||||||
memcpy(str + prefixoffset, prefixstr, prefixlen + rowdigit);
|
memcpy(str + prefixoffset, prefixstr, prefixlen + rowdigit);
|
||||||
str[len - 1] = '\0';
|
str[len - 1] = '\0';
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is another sorting algorithm used to calculate latency parameters */
|
/* This is another sorting algorithm used to calculate latency parameters */
|
||||||
|
|
|
@ -0,0 +1,292 @@
|
||||||
|
/*
|
||||||
|
* disconnected_timeout_tests.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Unit tests that test the timeouts for a disconnected cluster
|
||||||
|
|
||||||
|
#define FDB_API_VERSION 710
|
||||||
|
#include <foundationdb/fdb_c.h>
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string.h>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#define DOCTEST_CONFIG_IMPLEMENT
|
||||||
|
#include "doctest.h"
|
||||||
|
#include "fdb_api.hpp"
|
||||||
|
|
||||||
|
void fdb_check(fdb_error_t e) {
|
||||||
|
if (e) {
|
||||||
|
std::cerr << fdb_get_error(e) << std::endl;
|
||||||
|
std::abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FDBDatabase* fdb_open_database(const char* clusterFile) {
|
||||||
|
FDBDatabase* db;
|
||||||
|
fdb_check(fdb_create_database(clusterFile, &db));
|
||||||
|
return db;
|
||||||
|
}
|
||||||
|
|
||||||
|
static FDBDatabase* db = nullptr;
|
||||||
|
static FDBDatabase* timeoutDb = nullptr;
|
||||||
|
|
||||||
|
// Blocks until the given future is ready, returning an error code if there was
|
||||||
|
// an issue.
|
||||||
|
fdb_error_t wait_future(fdb::Future& f) {
|
||||||
|
fdb_check(f.block_until_ready());
|
||||||
|
return f.get_error();
|
||||||
|
}
|
||||||
|
|
||||||
|
void validateTimeoutDuration(double expectedSeconds, std::chrono::time_point<std::chrono::steady_clock> start) {
|
||||||
|
std::chrono::duration<double> duration = std::chrono::steady_clock::now() - start;
|
||||||
|
double actualSeconds = duration.count();
|
||||||
|
CHECK(actualSeconds >= expectedSeconds - 1e-6);
|
||||||
|
CHECK(actualSeconds < expectedSeconds * 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("500ms_transaction_timeout") {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
fdb::Transaction tr(db);
|
||||||
|
|
||||||
|
int64_t timeout = 500;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
|
||||||
|
CHECK(err == 1031);
|
||||||
|
validateTimeoutDuration(timeout / 1000.0, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("500ms_transaction_timeout_after_op") {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
fdb::Transaction tr(db);
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
|
||||||
|
int64_t timeout = 500;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
|
||||||
|
CHECK(err == 1031);
|
||||||
|
validateTimeoutDuration(timeout / 1000.0, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("500ms_transaction_timeout_before_op_2000ms_after") {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
fdb::Transaction tr(db);
|
||||||
|
|
||||||
|
int64_t timeout = 500;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
|
||||||
|
timeout = 2000;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
|
||||||
|
CHECK(err == 1031);
|
||||||
|
validateTimeoutDuration(timeout / 1000.0, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("2000ms_transaction_timeout_before_op_500ms_after") {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
fdb::Transaction tr(db);
|
||||||
|
|
||||||
|
int64_t timeout = 2000;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
|
||||||
|
timeout = 500;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
|
||||||
|
CHECK(err == 1031);
|
||||||
|
validateTimeoutDuration(timeout / 1000.0, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("500ms_database_timeout") {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
int64_t timeout = 500;
|
||||||
|
fdb_check(fdb_database_set_option(
|
||||||
|
timeoutDb, FDB_DB_OPTION_TRANSACTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb::Transaction tr(timeoutDb);
|
||||||
|
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
|
||||||
|
CHECK(err == 1031);
|
||||||
|
validateTimeoutDuration(timeout / 1000.0, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("2000ms_database_timeout_500ms_transaction_timeout") {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
int64_t timeout = 2000;
|
||||||
|
fdb_check(fdb_database_set_option(
|
||||||
|
timeoutDb, FDB_DB_OPTION_TRANSACTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb::Transaction tr(timeoutDb);
|
||||||
|
|
||||||
|
timeout = 500;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
|
||||||
|
CHECK(err == 1031);
|
||||||
|
validateTimeoutDuration(timeout / 1000.0, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("500ms_database_timeout_2000ms_transaction_timeout_with_reset") {
|
||||||
|
auto start = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
int64_t dbTimeout = 500;
|
||||||
|
fdb_check(fdb_database_set_option(
|
||||||
|
timeoutDb, FDB_DB_OPTION_TRANSACTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&dbTimeout), sizeof(dbTimeout)));
|
||||||
|
|
||||||
|
fdb::Transaction tr(timeoutDb);
|
||||||
|
|
||||||
|
int64_t trTimeout = 2000;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&trTimeout), sizeof(trTimeout)));
|
||||||
|
|
||||||
|
tr.reset();
|
||||||
|
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
|
||||||
|
CHECK(err == 1031);
|
||||||
|
validateTimeoutDuration(dbTimeout / 1000.0, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("transaction_reset_cancels_without_timeout") {
|
||||||
|
fdb::Transaction tr(db);
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
tr.reset();
|
||||||
|
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
CHECK(err == 1025);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("transaction_reset_cancels_with_timeout") {
|
||||||
|
fdb::Transaction tr(db);
|
||||||
|
|
||||||
|
int64_t timeout = 500;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||||
|
tr.reset();
|
||||||
|
|
||||||
|
fdb_error_t err = wait_future(grvFuture);
|
||||||
|
CHECK(err == 1025);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("transaction_destruction_cancels_without_timeout") {
|
||||||
|
FDBTransaction* tr;
|
||||||
|
fdb_check(fdb_database_create_transaction(db, &tr));
|
||||||
|
|
||||||
|
FDBFuture* grvFuture = fdb_transaction_get_read_version(tr);
|
||||||
|
fdb_transaction_destroy(tr);
|
||||||
|
|
||||||
|
fdb_check(fdb_future_block_until_ready(grvFuture));
|
||||||
|
fdb_error_t err = fdb_future_get_error(grvFuture);
|
||||||
|
CHECK(err == 1025);
|
||||||
|
|
||||||
|
fdb_future_destroy(grvFuture);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("transaction_destruction_cancels_with_timeout") {
|
||||||
|
FDBTransaction* tr;
|
||||||
|
fdb_check(fdb_database_create_transaction(db, &tr));
|
||||||
|
|
||||||
|
int64_t timeout = 500;
|
||||||
|
fdb_check(fdb_transaction_set_option(
|
||||||
|
tr, FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
|
||||||
|
FDBFuture* grvFuture = fdb_transaction_get_read_version(tr);
|
||||||
|
fdb_transaction_destroy(tr);
|
||||||
|
|
||||||
|
fdb_check(fdb_future_block_until_ready(grvFuture));
|
||||||
|
fdb_error_t err = fdb_future_get_error(grvFuture);
|
||||||
|
CHECK(err == 1025);
|
||||||
|
|
||||||
|
fdb_future_destroy(grvFuture);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("transaction_set_timeout_and_destroy_repeatedly") {
|
||||||
|
for (int i = 0; i < 1000; ++i) {
|
||||||
|
fdb::Transaction tr(db);
|
||||||
|
int64_t timeout = 500;
|
||||||
|
fdb_check(tr.set_option(FDB_TR_OPTION_TIMEOUT, reinterpret_cast<const uint8_t*>(&timeout), sizeof(timeout)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
if (argc < 2) {
|
||||||
|
std::cout << "Disconnected timeout unit tests for the FoundationDB C API.\n"
|
||||||
|
<< "Usage: disconnected_timeout_tests <unavailableClusterFile> [externalClient] [doctest args]"
|
||||||
|
<< std::endl;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
fdb_check(fdb_select_api_version(710));
|
||||||
|
if (argc >= 3) {
|
||||||
|
std::string externalClientLibrary = argv[2];
|
||||||
|
if (externalClientLibrary.substr(0, 2) != "--") {
|
||||||
|
fdb_check(fdb_network_set_option(
|
||||||
|
FDBNetworkOption::FDB_NET_OPTION_DISABLE_LOCAL_CLIENT, reinterpret_cast<const uint8_t*>(""), 0));
|
||||||
|
fdb_check(fdb_network_set_option(FDBNetworkOption::FDB_NET_OPTION_EXTERNAL_CLIENT_LIBRARY,
|
||||||
|
reinterpret_cast<const uint8_t*>(externalClientLibrary.c_str()),
|
||||||
|
externalClientLibrary.size()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doctest::Context context;
|
||||||
|
context.applyCommandLine(argc, argv);
|
||||||
|
|
||||||
|
fdb_check(fdb_setup_network());
|
||||||
|
std::thread network_thread{ &fdb_run_network };
|
||||||
|
|
||||||
|
db = fdb_open_database(argv[1]);
|
||||||
|
timeoutDb = fdb_open_database(argv[1]);
|
||||||
|
|
||||||
|
int res = context.run();
|
||||||
|
fdb_database_destroy(db);
|
||||||
|
fdb_database_destroy(timeoutDb);
|
||||||
|
|
||||||
|
if (context.shouldExit()) {
|
||||||
|
fdb_check(fdb_stop_network());
|
||||||
|
network_thread.join();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
fdb_check(fdb_stop_network());
|
||||||
|
network_thread.join();
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
|
@ -43,7 +43,8 @@ set(go_options_file ${GO_DEST}/src/fdb/generated.go)
|
||||||
|
|
||||||
set(go_env GOPATH=${GOPATH}
|
set(go_env GOPATH=${GOPATH}
|
||||||
C_INCLUDE_PATH=${CMAKE_BINARY_DIR}/bindings/c/foundationdb:${CMAKE_SOURCE_DIR}/bindings/c
|
C_INCLUDE_PATH=${CMAKE_BINARY_DIR}/bindings/c/foundationdb:${CMAKE_SOURCE_DIR}/bindings/c
|
||||||
CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/lib)
|
CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/lib
|
||||||
|
GO111MODULE=auto)
|
||||||
|
|
||||||
foreach(src_file IN LISTS SRCS)
|
foreach(src_file IN LISTS SRCS)
|
||||||
set(dest_file ${GO_DEST}/${src_file})
|
set(dest_file ${GO_DEST}/${src_file})
|
||||||
|
|
|
@ -77,19 +77,37 @@ add_dependencies(packages python_package)
|
||||||
|
|
||||||
if (NOT WIN32 AND NOT OPEN_FOR_IDE)
|
if (NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||||
add_fdbclient_test(
|
add_fdbclient_test(
|
||||||
NAME fdbcli_tests
|
NAME single_process_fdbcli_tests
|
||||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
${CMAKE_BINARY_DIR}
|
||||||
@CLUSTER_FILE@
|
@CLUSTER_FILE@
|
||||||
1
|
|
||||||
)
|
)
|
||||||
add_fdbclient_test(
|
add_fdbclient_test(
|
||||||
NAME multi_process_fdbcli_tests
|
NAME multi_process_fdbcli_tests
|
||||||
PROCESS_NUMBER 5
|
PROCESS_NUMBER 5
|
||||||
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
|
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
|
||||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
${CMAKE_BINARY_DIR}
|
||||||
@CLUSTER_FILE@
|
@CLUSTER_FILE@
|
||||||
5
|
5
|
||||||
)
|
)
|
||||||
|
if (TARGET external_client) # external_client copies fdb_c to bindings/c/libfdb_c.so
|
||||||
|
add_fdbclient_test(
|
||||||
|
NAME single_process_external_client_fdbcli_tests
|
||||||
|
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||||
|
${CMAKE_BINARY_DIR}
|
||||||
|
@CLUSTER_FILE@
|
||||||
|
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c.so
|
||||||
|
)
|
||||||
|
add_fdbclient_test(
|
||||||
|
NAME multi_process_external_client_fdbcli_tests
|
||||||
|
PROCESS_NUMBER 5
|
||||||
|
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
|
||||||
|
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||||
|
${CMAKE_BINARY_DIR}
|
||||||
|
@CLUSTER_FILE@
|
||||||
|
5
|
||||||
|
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c.so
|
||||||
|
)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
|
@ -1,14 +1,17 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import logging
|
import logging
|
||||||
import functools
|
import functools
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
|
from argparse import ArgumentParser, RawDescriptionHelpFormatter
|
||||||
|
|
||||||
def enable_logging(level=logging.ERROR):
|
|
||||||
|
def enable_logging(level=logging.DEBUG):
|
||||||
"""Enable logging in the function with the specified logging level
|
"""Enable logging in the function with the specified logging level
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -16,7 +19,7 @@ def enable_logging(level=logging.ERROR):
|
||||||
"""
|
"""
|
||||||
def func_decorator(func):
|
def func_decorator(func):
|
||||||
@functools.wraps(func)
|
@functools.wraps(func)
|
||||||
def wrapper(*args,**kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
# initialize logger
|
# initialize logger
|
||||||
logger = logging.getLogger(func.__name__)
|
logger = logging.getLogger(func.__name__)
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
|
@ -32,6 +35,7 @@ def enable_logging(level=logging.ERROR):
|
||||||
return wrapper
|
return wrapper
|
||||||
return func_decorator
|
return func_decorator
|
||||||
|
|
||||||
|
|
||||||
def run_fdbcli_command(*args):
|
def run_fdbcli_command(*args):
|
||||||
"""run the fdbcli statement: fdbcli --exec '<arg1> <arg2> ... <argN>'.
|
"""run the fdbcli statement: fdbcli --exec '<arg1> <arg2> ... <argN>'.
|
||||||
|
|
||||||
|
@ -39,7 +43,8 @@ def run_fdbcli_command(*args):
|
||||||
string: Console output from fdbcli
|
string: Console output from fdbcli
|
||||||
"""
|
"""
|
||||||
commands = command_template + ["{}".format(' '.join(args))]
|
commands = command_template + ["{}".format(' '.join(args))]
|
||||||
return subprocess.run(commands, stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
|
return subprocess.run(commands, stdout=subprocess.PIPE, env=fdbcli_env).stdout.decode('utf-8').strip()
|
||||||
|
|
||||||
|
|
||||||
def run_fdbcli_command_and_get_error(*args):
|
def run_fdbcli_command_and_get_error(*args):
|
||||||
"""run the fdbcli statement: fdbcli --exec '<arg1> <arg2> ... <argN>'.
|
"""run the fdbcli statement: fdbcli --exec '<arg1> <arg2> ... <argN>'.
|
||||||
|
@ -48,7 +53,8 @@ def run_fdbcli_command_and_get_error(*args):
|
||||||
string: Stderr output from fdbcli
|
string: Stderr output from fdbcli
|
||||||
"""
|
"""
|
||||||
commands = command_template + ["{}".format(' '.join(args))]
|
commands = command_template + ["{}".format(' '.join(args))]
|
||||||
return subprocess.run(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stderr.decode('utf-8').strip()
|
return subprocess.run(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env).stderr.decode('utf-8').strip()
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def advanceversion(logger):
|
def advanceversion(logger):
|
||||||
|
@ -72,6 +78,7 @@ def advanceversion(logger):
|
||||||
logger.debug("Read version: {}".format(version4))
|
logger.debug("Read version: {}".format(version4))
|
||||||
assert version4 >= version3
|
assert version4 >= version3
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def maintenance(logger):
|
def maintenance(logger):
|
||||||
# expected fdbcli output when running 'maintenance' while there's no ongoing maintenance
|
# expected fdbcli output when running 'maintenance' while there's no ongoing maintenance
|
||||||
|
@ -94,45 +101,52 @@ def maintenance(logger):
|
||||||
output3 = run_fdbcli_command('maintenance')
|
output3 = run_fdbcli_command('maintenance')
|
||||||
assert output3 == no_maintenance_output
|
assert output3 == no_maintenance_output
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def setclass(logger):
|
def setclass(logger):
|
||||||
|
# get all processes' network addresses
|
||||||
output1 = run_fdbcli_command('setclass')
|
output1 = run_fdbcli_command('setclass')
|
||||||
class_type_line_1 = output1.split('\n')[-1]
|
logger.debug(output1)
|
||||||
logger.debug(class_type_line_1)
|
# except the first line, each line is one process
|
||||||
# check process' network address
|
process_types = output1.split('\n')[1:]
|
||||||
assert '127.0.0.1' in class_type_line_1
|
assert len(process_types) == args.process_number
|
||||||
network_address = ':'.join(class_type_line_1.split(':')[:2])
|
addresses = []
|
||||||
logger.debug("Network address: {}".format(network_address))
|
for line in process_types:
|
||||||
# check class type
|
assert '127.0.0.1' in line
|
||||||
assert 'unset' in class_type_line_1
|
# check class type
|
||||||
# check class source
|
assert 'unset' in line
|
||||||
assert 'command_line' in class_type_line_1
|
# check class source
|
||||||
|
assert 'command_line' in line
|
||||||
|
# check process' network address
|
||||||
|
network_address = ':'.join(line.split(':')[:2])
|
||||||
|
logger.debug("Network address: {}".format(network_address))
|
||||||
|
addresses.append(network_address)
|
||||||
|
random_address = random.choice(addresses)
|
||||||
|
logger.debug("Randomly selected address: {}".format(random_address))
|
||||||
# set class to a random valid type
|
# set class to a random valid type
|
||||||
class_types = ['storage', 'storage', 'transaction', 'resolution',
|
class_types = ['storage', 'transaction', 'resolution',
|
||||||
'commit_proxy', 'grv_proxy', 'master', 'stateless', 'log',
|
'commit_proxy', 'grv_proxy', 'master', 'stateless', 'log',
|
||||||
'router', 'cluster_controller', 'fast_restore', 'data_distributor',
|
'router', 'cluster_controller', 'fast_restore', 'data_distributor',
|
||||||
'coordinator', 'ratekeeper', 'storage_cache', 'backup'
|
'coordinator', 'ratekeeper', 'storage_cache', 'backup'
|
||||||
]
|
]
|
||||||
random_class_type = random.choice(class_types)
|
random_class_type = random.choice(class_types)
|
||||||
logger.debug("Change to type: {}".format(random_class_type))
|
logger.debug("Change to type: {}".format(random_class_type))
|
||||||
run_fdbcli_command('setclass', network_address, random_class_type)
|
run_fdbcli_command('setclass', random_address, random_class_type)
|
||||||
# check the set successful
|
# check the set successful
|
||||||
output2 = run_fdbcli_command('setclass')
|
output2 = run_fdbcli_command('setclass')
|
||||||
class_type_line_2 = output2.split('\n')[-1]
|
logger.debug(output2)
|
||||||
logger.debug(class_type_line_2)
|
assert random_address in output2
|
||||||
|
process_types = output2.split('\n')[1:]
|
||||||
# check process' network address
|
# check process' network address
|
||||||
assert network_address in class_type_line_2
|
for line in process_types:
|
||||||
# check class type changed to the specified value
|
if random_address in line:
|
||||||
assert random_class_type in class_type_line_2
|
# check class type changed to the specified value
|
||||||
# check class source
|
assert random_class_type in line
|
||||||
assert 'set_class' in class_type_line_2
|
# check class source
|
||||||
# set back to default
|
assert 'set_class' in line
|
||||||
run_fdbcli_command('setclass', network_address, 'default')
|
# set back to unset
|
||||||
# everything should be back to the same as before
|
run_fdbcli_command('setclass', random_address, 'unset')
|
||||||
output3 = run_fdbcli_command('setclass')
|
|
||||||
class_type_line_3 = output3.split('\n')[-1]
|
|
||||||
logger.debug(class_type_line_3)
|
|
||||||
assert class_type_line_3 == class_type_line_1
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def lockAndUnlock(logger):
|
def lockAndUnlock(logger):
|
||||||
|
@ -148,7 +162,7 @@ def lockAndUnlock(logger):
|
||||||
output2 = run_fdbcli_command_and_get_error("lock")
|
output2 = run_fdbcli_command_and_get_error("lock")
|
||||||
assert output2 == 'ERROR: Database is locked (1038)'
|
assert output2 == 'ERROR: Database is locked (1038)'
|
||||||
# unlock the database
|
# unlock the database
|
||||||
process = subprocess.Popen(command_template + ['unlock ' + lock_uid], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
process = subprocess.Popen(command_template + ['unlock ' + lock_uid], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||||
line1 = process.stdout.readline()
|
line1 = process.stdout.readline()
|
||||||
# The randome passphrease we need to confirm to proceed the unlocking
|
# The randome passphrease we need to confirm to proceed the unlocking
|
||||||
line2 = process.stdout.readline()
|
line2 = process.stdout.readline()
|
||||||
|
@ -159,6 +173,7 @@ def lockAndUnlock(logger):
|
||||||
assert output3.decode('utf-8').strip() == 'Database unlocked.'
|
assert output3.decode('utf-8').strip() == 'Database unlocked.'
|
||||||
assert not get_value_from_status_json(True, 'cluster', 'database_lock_state', 'locked')
|
assert not get_value_from_status_json(True, 'cluster', 'database_lock_state', 'locked')
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def kill(logger):
|
def kill(logger):
|
||||||
output1 = run_fdbcli_command('kill')
|
output1 = run_fdbcli_command('kill')
|
||||||
|
@ -168,11 +183,11 @@ def kill(logger):
|
||||||
address = lines[1]
|
address = lines[1]
|
||||||
logger.debug("Address: {}".format(address))
|
logger.debug("Address: {}".format(address))
|
||||||
old_generation = get_value_from_status_json(False, 'cluster', 'generation')
|
old_generation = get_value_from_status_json(False, 'cluster', 'generation')
|
||||||
# This is currently an issue with fdbcli,
|
# This is currently an issue with fdbcli,
|
||||||
# where you need to first run 'kill' to initialize processes' list
|
# where you need to first run 'kill' to initialize processes' list
|
||||||
# and then specify the certain process to kill
|
# and then specify the certain process to kill
|
||||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||||
#
|
#
|
||||||
output2, err = process.communicate(input='kill; kill {}\n'.format(address).encode())
|
output2, err = process.communicate(input='kill; kill {}\n'.format(address).encode())
|
||||||
logger.debug(output2)
|
logger.debug(output2)
|
||||||
# wait for a second for the cluster recovery
|
# wait for a second for the cluster recovery
|
||||||
|
@ -181,6 +196,7 @@ def kill(logger):
|
||||||
logger.debug("Old: {}, New: {}".format(old_generation, new_generation))
|
logger.debug("Old: {}, New: {}".format(old_generation, new_generation))
|
||||||
assert new_generation > old_generation
|
assert new_generation > old_generation
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def suspend(logger):
|
def suspend(logger):
|
||||||
output1 = run_fdbcli_command('suspend')
|
output1 = run_fdbcli_command('suspend')
|
||||||
|
@ -200,7 +216,7 @@ def suspend(logger):
|
||||||
assert len(pinfo) == 1
|
assert len(pinfo) == 1
|
||||||
pid = pinfo[0].split(' ')[0]
|
pid = pinfo[0].split(' ')[0]
|
||||||
logger.debug("Pid: {}".format(pid))
|
logger.debug("Pid: {}".format(pid))
|
||||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||||
# suspend the process for enough long time
|
# suspend the process for enough long time
|
||||||
output2, err = process.communicate(input='suspend; suspend 3600 {}\n'.format(address).encode())
|
output2, err = process.communicate(input='suspend; suspend 3600 {}\n'.format(address).encode())
|
||||||
# the cluster should be unavailable after the only process being suspended
|
# the cluster should be unavailable after the only process being suspended
|
||||||
|
@ -213,7 +229,7 @@ def suspend(logger):
|
||||||
kill_output = subprocess.check_output(['kill', pid]).decode().strip()
|
kill_output = subprocess.check_output(['kill', pid]).decode().strip()
|
||||||
logger.debug("Kill result: {}".format(kill_output))
|
logger.debug("Kill result: {}".format(kill_output))
|
||||||
# The process should come back after a few time
|
# The process should come back after a few time
|
||||||
duration = 0 # seconds we already wait
|
duration = 0 # seconds we already wait
|
||||||
while not get_value_from_status_json(False, 'client', 'database_status', 'available') and duration < 60:
|
while not get_value_from_status_json(False, 'client', 'database_status', 'available') and duration < 60:
|
||||||
logger.debug("Sleep for 1 second to wait cluster recovery")
|
logger.debug("Sleep for 1 second to wait cluster recovery")
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
@ -221,6 +237,7 @@ def suspend(logger):
|
||||||
# at most after 60 seconds, the cluster should be available
|
# at most after 60 seconds, the cluster should be available
|
||||||
assert get_value_from_status_json(False, 'client', 'database_status', 'available')
|
assert get_value_from_status_json(False, 'client', 'database_status', 'available')
|
||||||
|
|
||||||
|
|
||||||
def get_value_from_status_json(retry, *args):
|
def get_value_from_status_json(retry, *args):
|
||||||
while True:
|
while True:
|
||||||
result = json.loads(run_fdbcli_command('status', 'json'))
|
result = json.loads(run_fdbcli_command('status', 'json'))
|
||||||
|
@ -229,9 +246,10 @@ def get_value_from_status_json(retry, *args):
|
||||||
for arg in args:
|
for arg in args:
|
||||||
assert arg in result
|
assert arg in result
|
||||||
result = result[arg]
|
result = result[arg]
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def consistencycheck(logger):
|
def consistencycheck(logger):
|
||||||
consistency_check_on_output = 'ConsistencyCheck is on'
|
consistency_check_on_output = 'ConsistencyCheck is on'
|
||||||
|
@ -245,6 +263,7 @@ def consistencycheck(logger):
|
||||||
output3 = run_fdbcli_command('consistencycheck')
|
output3 = run_fdbcli_command('consistencycheck')
|
||||||
assert output3 == consistency_check_on_output
|
assert output3 == consistency_check_on_output
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def cache_range(logger):
|
def cache_range(logger):
|
||||||
# this command is currently experimental
|
# this command is currently experimental
|
||||||
|
@ -252,6 +271,7 @@ def cache_range(logger):
|
||||||
run_fdbcli_command('cache_range', 'set', 'a', 'b')
|
run_fdbcli_command('cache_range', 'set', 'a', 'b')
|
||||||
run_fdbcli_command('cache_range', 'clear', 'a', 'b')
|
run_fdbcli_command('cache_range', 'clear', 'a', 'b')
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def datadistribution(logger):
|
def datadistribution(logger):
|
||||||
output1 = run_fdbcli_command('datadistribution', 'off')
|
output1 = run_fdbcli_command('datadistribution', 'off')
|
||||||
|
@ -271,6 +291,7 @@ def datadistribution(logger):
|
||||||
assert output6 == 'Data distribution is enabled for rebalance.'
|
assert output6 == 'Data distribution is enabled for rebalance.'
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def transaction(logger):
|
def transaction(logger):
|
||||||
"""This test will cover the transaction related fdbcli commands.
|
"""This test will cover the transaction related fdbcli commands.
|
||||||
|
@ -280,7 +301,7 @@ def transaction(logger):
|
||||||
"""
|
"""
|
||||||
err1 = run_fdbcli_command_and_get_error('set', 'key', 'value')
|
err1 = run_fdbcli_command_and_get_error('set', 'key', 'value')
|
||||||
assert err1 == 'ERROR: writemode must be enabled to set or clear keys in the database.'
|
assert err1 == 'ERROR: writemode must be enabled to set or clear keys in the database.'
|
||||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||||
transaction_flow = ['writemode on', 'begin', 'getversion', 'set key value', 'get key', 'commit']
|
transaction_flow = ['writemode on', 'begin', 'getversion', 'set key value', 'get key', 'commit']
|
||||||
output1, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
output1, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
||||||
# split the output into lines
|
# split the output into lines
|
||||||
|
@ -299,13 +320,13 @@ def transaction(logger):
|
||||||
output2 = run_fdbcli_command('get', 'key')
|
output2 = run_fdbcli_command('get', 'key')
|
||||||
assert output2 == "`key' is `value'"
|
assert output2 == "`key' is `value'"
|
||||||
# test rollback and read-your-write behavior
|
# test rollback and read-your-write behavior
|
||||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||||
transaction_flow = [
|
transaction_flow = [
|
||||||
'writemode on', 'begin', 'getrange a z',
|
'writemode on', 'begin', 'getrange a z',
|
||||||
'clear key', 'get key',
|
'clear key', 'get key',
|
||||||
# 'option on READ_YOUR_WRITES_DISABLE', 'get key',
|
# 'option on READ_YOUR_WRITES_DISABLE', 'get key',
|
||||||
'rollback'
|
'rollback'
|
||||||
]
|
]
|
||||||
output3, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
output3, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
||||||
lines = list(filter(len, output3.decode().split('\n')))[-5:]
|
lines = list(filter(len, output3.decode().split('\n')))[-5:]
|
||||||
# lines[0] == "Transaction started" and lines[1] == 'Range limited to 25 keys'
|
# lines[0] == "Transaction started" and lines[1] == 'Range limited to 25 keys'
|
||||||
|
@ -316,13 +337,13 @@ def transaction(logger):
|
||||||
output4 = run_fdbcli_command('get', 'key')
|
output4 = run_fdbcli_command('get', 'key')
|
||||||
assert output4 == "`key' is `value'"
|
assert output4 == "`key' is `value'"
|
||||||
# test read_your_write_disable option and clear the inserted key
|
# test read_your_write_disable option and clear the inserted key
|
||||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||||
transaction_flow = [
|
transaction_flow = [
|
||||||
'writemode on', 'begin',
|
'writemode on', 'begin',
|
||||||
'option on READ_YOUR_WRITES_DISABLE',
|
'option on READ_YOUR_WRITES_DISABLE',
|
||||||
'clear key', 'get key',
|
'clear key', 'get key',
|
||||||
'commit'
|
'commit'
|
||||||
]
|
]
|
||||||
output6, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
output6, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
||||||
lines = list(filter(len, output6.decode().split('\n')))[-4:]
|
lines = list(filter(len, output6.decode().split('\n')))[-4:]
|
||||||
assert lines[1] == 'Option enabled for current transaction'
|
assert lines[1] == 'Option enabled for current transaction'
|
||||||
|
@ -332,15 +353,17 @@ def transaction(logger):
|
||||||
output7 = run_fdbcli_command('get', 'key')
|
output7 = run_fdbcli_command('get', 'key')
|
||||||
assert output7 == "`key': not found"
|
assert output7 == "`key': not found"
|
||||||
|
|
||||||
|
|
||||||
def get_fdb_process_addresses(logger):
|
def get_fdb_process_addresses(logger):
|
||||||
# get all processes' network addresses
|
# get all processes' network addresses
|
||||||
output = run_fdbcli_command('kill')
|
output = run_fdbcli_command('kill')
|
||||||
logger.debug(output)
|
logger.debug(output)
|
||||||
# except the first line, each line is one process
|
# except the first line, each line is one process
|
||||||
addresses = output.split('\n')[1:]
|
addresses = output.split('\n')[1:]
|
||||||
assert len(addresses) == process_number
|
assert len(addresses) == args.process_number
|
||||||
return addresses
|
return addresses
|
||||||
|
|
||||||
|
|
||||||
@enable_logging(logging.DEBUG)
|
@enable_logging(logging.DEBUG)
|
||||||
def coordinators(logger):
|
def coordinators(logger):
|
||||||
# we should only have one coordinator for now
|
# we should only have one coordinator for now
|
||||||
|
@ -362,12 +385,13 @@ def coordinators(logger):
|
||||||
# verify now we have 5 coordinators and the description is updated
|
# verify now we have 5 coordinators and the description is updated
|
||||||
output2 = run_fdbcli_command('coordinators')
|
output2 = run_fdbcli_command('coordinators')
|
||||||
assert output2.split('\n')[0].split(': ')[-1] == new_cluster_description
|
assert output2.split('\n')[0].split(': ')[-1] == new_cluster_description
|
||||||
assert output2.split('\n')[1] == 'Cluster coordinators ({}): {}'.format(5, ','.join(addresses))
|
assert output2.split('\n')[1] == 'Cluster coordinators ({}): {}'.format(args.process_number, ','.join(addresses))
|
||||||
# auto change should go back to 1 coordinator
|
# auto change should go back to 1 coordinator
|
||||||
run_fdbcli_command('coordinators', 'auto')
|
run_fdbcli_command('coordinators', 'auto')
|
||||||
assert len(get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')) == 1
|
assert len(get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')) == 1
|
||||||
wait_for_database_available(logger)
|
wait_for_database_available(logger)
|
||||||
|
|
||||||
|
|
||||||
@enable_logging(logging.DEBUG)
|
@enable_logging(logging.DEBUG)
|
||||||
def exclude(logger):
|
def exclude(logger):
|
||||||
# get all processes' network addresses
|
# get all processes' network addresses
|
||||||
|
@ -380,7 +404,7 @@ def exclude(logger):
|
||||||
# randomly pick one and exclude the process
|
# randomly pick one and exclude the process
|
||||||
excluded_address = random.choice(addresses)
|
excluded_address = random.choice(addresses)
|
||||||
# If we see "not enough space" error, use FORCE option to proceed
|
# If we see "not enough space" error, use FORCE option to proceed
|
||||||
# this should be a safe operation as we do not need any storage space for the test
|
# this should be a safe operation as we do not need any storage space for the test
|
||||||
force = False
|
force = False
|
||||||
# sometimes we need to retry the exclude
|
# sometimes we need to retry the exclude
|
||||||
while True:
|
while True:
|
||||||
|
@ -417,6 +441,8 @@ def exclude(logger):
|
||||||
wait_for_database_available(logger)
|
wait_for_database_available(logger)
|
||||||
|
|
||||||
# read the system key 'k', need to enable the option first
|
# read the system key 'k', need to enable the option first
|
||||||
|
|
||||||
|
|
||||||
def read_system_key(k):
|
def read_system_key(k):
|
||||||
output = run_fdbcli_command('option', 'on', 'READ_SYSTEM_KEYS;', 'get', k)
|
output = run_fdbcli_command('option', 'on', 'READ_SYSTEM_KEYS;', 'get', k)
|
||||||
if 'is' not in output:
|
if 'is' not in output:
|
||||||
|
@ -425,11 +451,14 @@ def read_system_key(k):
|
||||||
_, value = output.split(' is ')
|
_, value = output.split(' is ')
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|
||||||
@enable_logging()
|
@enable_logging()
|
||||||
def throttle(logger):
|
def throttle(logger):
|
||||||
# no throttled tags at the beginning
|
# no throttled tags at the beginning
|
||||||
no_throttle_tags_output = 'There are no throttled tags'
|
no_throttle_tags_output = 'There are no throttled tags'
|
||||||
assert run_fdbcli_command('throttle', 'list') == no_throttle_tags_output
|
output = run_fdbcli_command('throttle', 'list')
|
||||||
|
logger.debug(output)
|
||||||
|
assert output == no_throttle_tags_output
|
||||||
# test 'throttle enable auto'
|
# test 'throttle enable auto'
|
||||||
run_fdbcli_command('throttle', 'enable', 'auto')
|
run_fdbcli_command('throttle', 'enable', 'auto')
|
||||||
# verify the change is applied by reading the system key
|
# verify the change is applied by reading the system key
|
||||||
|
@ -442,6 +471,7 @@ def throttle(logger):
|
||||||
assert enable_flag == "`0'"
|
assert enable_flag == "`0'"
|
||||||
# TODO : test manual throttling, not easy to do now
|
# TODO : test manual throttling, not easy to do now
|
||||||
|
|
||||||
|
|
||||||
def wait_for_database_available(logger):
|
def wait_for_database_available(logger):
|
||||||
# sometimes the change takes some time to have effect and the database can be unavailable at that time
|
# sometimes the change takes some time to have effect and the database can be unavailable at that time
|
||||||
# this is to wait until the database is available again
|
# this is to wait until the database is available again
|
||||||
|
@ -449,30 +479,81 @@ def wait_for_database_available(logger):
|
||||||
logger.debug("Database unavailable for now, wait for one second")
|
logger.debug("Database unavailable for now, wait for one second")
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
|
@enable_logging()
|
||||||
|
def profile(logger):
|
||||||
|
# profile list should return the same list as kill
|
||||||
|
addresses = get_fdb_process_addresses(logger)
|
||||||
|
output1 = run_fdbcli_command('profile', 'list')
|
||||||
|
assert output1.split('\n') == addresses
|
||||||
|
# check default output
|
||||||
|
default_profile_client_get_output = 'Client profiling rate is set to default and size limit is set to default.'
|
||||||
|
output2 = run_fdbcli_command('profile', 'client', 'get')
|
||||||
|
assert output2 == default_profile_client_get_output
|
||||||
|
# set rate and size limit
|
||||||
|
run_fdbcli_command('profile', 'client', 'set', '0.5', '1GB')
|
||||||
|
output3 = run_fdbcli_command('profile', 'client', 'get')
|
||||||
|
logger.debug(output3)
|
||||||
|
output3_list = output3.split(' ')
|
||||||
|
assert float(output3_list[6]) == 0.5
|
||||||
|
# size limit should be 1GB
|
||||||
|
assert output3_list[-1] == '1000000000.'
|
||||||
|
# change back to default value and check
|
||||||
|
run_fdbcli_command('profile', 'client', 'set', 'default', 'default')
|
||||||
|
assert run_fdbcli_command('profile', 'client', 'get') == default_profile_client_get_output
|
||||||
|
|
||||||
|
|
||||||
|
@enable_logging()
|
||||||
|
def triggerddteaminfolog(logger):
|
||||||
|
# this command is straightforward and only has one code path
|
||||||
|
output = run_fdbcli_command('triggerddteaminfolog')
|
||||||
|
assert output == 'Triggered team info logging in data distribution.'
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>
|
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||||
assert len(sys.argv) == 4, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>"
|
description="""
|
||||||
|
The test calls fdbcli commands through fdbcli --exec "<command>" interactively using subprocess.
|
||||||
|
The outputs from fdbcli are returned and compared to predefined results.
|
||||||
|
Consequently, changing fdbcli outputs or breaking any commands will casue the test to fail.
|
||||||
|
Commands that are easy to test will run against a single process cluster.
|
||||||
|
For complex commands like exclude, they will run against a cluster with multiple(current set to 5) processes.
|
||||||
|
If external_client_library is given, we will disable the local client and use the external client to run fdbcli.
|
||||||
|
""")
|
||||||
|
parser.add_argument('build_dir', metavar='BUILD_DIRECTORY', help='FDB build directory')
|
||||||
|
parser.add_argument('cluster_file', metavar='CLUSTER_FILE', help='FDB cluster file')
|
||||||
|
parser.add_argument('process_number', nargs='?', metavar='PROCESS_NUMBER', help="Number of fdb processes", type=int, default=1)
|
||||||
|
parser.add_argument('--external-client-library', '-e', metavar='EXTERNAL_CLIENT_LIBRARY_PATH', help="External client library path")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# keep current environment variables
|
||||||
|
fdbcli_env = os.environ.copy()
|
||||||
|
# set external client library if provided
|
||||||
|
if args.external_client_library:
|
||||||
|
# disable local client and use the external client library
|
||||||
|
fdbcli_env['FDB_NETWORK_OPTION_DISABLE_LOCAL_CLIENT'] = ''
|
||||||
|
fdbcli_env['FDB_NETWORK_OPTION_EXTERNAL_CLIENT_LIBRARY'] = args.external_client_library
|
||||||
|
|
||||||
# shell command template
|
# shell command template
|
||||||
command_template = [sys.argv[1], '-C', sys.argv[2], '--exec']
|
command_template = [args.build_dir + '/bin/fdbcli', '-C', args.cluster_file, '--exec']
|
||||||
# tests for fdbcli commands
|
# tests for fdbcli commands
|
||||||
# assertions will fail if fdbcli does not work as expected
|
# assertions will fail if fdbcli does not work as expected
|
||||||
process_number = int(sys.argv[3])
|
if args.process_number == 1:
|
||||||
if process_number == 1:
|
|
||||||
# TODO: disable for now, the change can cause the database unavailable
|
# TODO: disable for now, the change can cause the database unavailable
|
||||||
#advanceversion()
|
# advanceversion()
|
||||||
cache_range()
|
cache_range()
|
||||||
consistencycheck()
|
consistencycheck()
|
||||||
datadistribution()
|
datadistribution()
|
||||||
kill()
|
kill()
|
||||||
lockAndUnlock()
|
lockAndUnlock()
|
||||||
maintenance()
|
maintenance()
|
||||||
setclass()
|
profile()
|
||||||
suspend()
|
suspend()
|
||||||
transaction()
|
transaction()
|
||||||
throttle()
|
throttle()
|
||||||
|
triggerddteaminfolog()
|
||||||
else:
|
else:
|
||||||
assert process_number > 1, "Process number should be positive"
|
assert args.process_number > 1, "Process number should be positive"
|
||||||
coordinators()
|
coordinators()
|
||||||
exclude()
|
exclude()
|
||||||
|
setclass()
|
||||||
|
|
||||||
|
|
|
@ -128,7 +128,8 @@ function(add_fdb_test)
|
||||||
-n ${test_name}
|
-n ${test_name}
|
||||||
-b ${PROJECT_BINARY_DIR}
|
-b ${PROJECT_BINARY_DIR}
|
||||||
-t ${test_type}
|
-t ${test_type}
|
||||||
-O ${OLD_FDBSERVER_BINARY}
|
-O ${OLD_FDBSERVER_BINARY}
|
||||||
|
--config "@CTEST_CONFIGURATION_TYPE@"
|
||||||
--crash
|
--crash
|
||||||
--aggregate-traces ${TEST_AGGREGATE_TRACES}
|
--aggregate-traces ${TEST_AGGREGATE_TRACES}
|
||||||
--log-format ${TEST_LOG_FORMAT}
|
--log-format ${TEST_LOG_FORMAT}
|
||||||
|
@ -442,6 +443,40 @@ function(add_fdbclient_test)
|
||||||
set_tests_properties("${T_NAME}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
|
set_tests_properties("${T_NAME}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
# Creates a cluster file for a nonexistent cluster before running the specified command
|
||||||
|
# (usually a ctest test)
|
||||||
|
function(add_unavailable_fdbclient_test)
|
||||||
|
set(options DISABLED ENABLED)
|
||||||
|
set(oneValueArgs NAME TEST_TIMEOUT)
|
||||||
|
set(multiValueArgs COMMAND)
|
||||||
|
cmake_parse_arguments(T "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||||
|
if(OPEN_FOR_IDE)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
if(NOT T_ENABLED AND T_DISABLED)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
if(NOT T_NAME)
|
||||||
|
message(FATAL_ERROR "NAME is a required argument for add_unavailable_fdbclient_test")
|
||||||
|
endif()
|
||||||
|
if(NOT T_COMMAND)
|
||||||
|
message(FATAL_ERROR "COMMAND is a required argument for add_unavailable_fdbclient_test")
|
||||||
|
endif()
|
||||||
|
message(STATUS "Adding unavailable client test ${T_NAME}")
|
||||||
|
add_test(NAME "${T_NAME}"
|
||||||
|
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/fake_cluster.py
|
||||||
|
--output-dir ${CMAKE_BINARY_DIR}
|
||||||
|
--
|
||||||
|
${T_COMMAND})
|
||||||
|
if (T_TEST_TIMEOUT)
|
||||||
|
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT ${T_TEST_TIMEOUT})
|
||||||
|
else()
|
||||||
|
# default timeout
|
||||||
|
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
|
||||||
|
endif()
|
||||||
|
set_tests_properties("${T_NAME}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
# Creates 3 distinct clusters before running the specified command.
|
# Creates 3 distinct clusters before running the specified command.
|
||||||
# This is useful for testing features that require multiple clusters (like the
|
# This is useful for testing features that require multiple clusters (like the
|
||||||
# multi-cluster FDB client)
|
# multi-cluster FDB client)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# FindRocksDB
|
# FindRocksDB
|
||||||
|
|
||||||
find_package(RocksDB)
|
find_package(RocksDB 6.22.1)
|
||||||
|
|
||||||
include(ExternalProject)
|
include(ExternalProject)
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ find_package(Python COMPONENTS Interpreter)
|
||||||
if(Python_Interpreter_FOUND)
|
if(Python_Interpreter_FOUND)
|
||||||
set(WITH_PYTHON ON)
|
set(WITH_PYTHON ON)
|
||||||
else()
|
else()
|
||||||
#message(FATAL_ERROR "Could not found a suitable python interpreter")
|
message(WARNING "Could not found a suitable python interpreter")
|
||||||
set(WITH_PYTHON OFF)
|
set(WITH_PYTHON OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ else()
|
||||||
if(WITH_PYTHON)
|
if(WITH_PYTHON)
|
||||||
set(WITH_PYTHON_BINDING ON)
|
set(WITH_PYTHON_BINDING ON)
|
||||||
else()
|
else()
|
||||||
#message(FATAL_ERROR "Could not found a suitable python interpreter")
|
message(WARNING "Python binding depends on Python, but a python interpreter is not found")
|
||||||
set(WITH_PYTHON_BINDING OFF)
|
set(WITH_PYTHON_BINDING OFF)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -76,6 +76,9 @@ endif()
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
option(BUILD_C_BINDING "build C binding" ON)
|
option(BUILD_C_BINDING "build C binding" ON)
|
||||||
|
if(BUILD_C_BINDING AND NOT WITH_PYTHON)
|
||||||
|
message(WARNING "C binding depends on Python, but a python interpreter is not found")
|
||||||
|
endif()
|
||||||
if(BUILD_C_BINDING AND WITH_PYTHON)
|
if(BUILD_C_BINDING AND WITH_PYTHON)
|
||||||
set(WITH_C_BINDING ON)
|
set(WITH_C_BINDING ON)
|
||||||
else()
|
else()
|
||||||
|
@ -87,6 +90,9 @@ endif()
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
option(BUILD_JAVA_BINDING "build java binding" ON)
|
option(BUILD_JAVA_BINDING "build java binding" ON)
|
||||||
|
if(BUILD_JAVA_BINDING AND NOT WITH_C_BINDING)
|
||||||
|
message(WARNING "Java binding depends on C binding, but C binding is not enabled")
|
||||||
|
endif()
|
||||||
if(NOT BUILD_JAVA_BINDING OR NOT WITH_C_BINDING)
|
if(NOT BUILD_JAVA_BINDING OR NOT WITH_C_BINDING)
|
||||||
set(WITH_JAVA_BINDING OFF)
|
set(WITH_JAVA_BINDING OFF)
|
||||||
else()
|
else()
|
||||||
|
@ -120,6 +126,9 @@ endif()
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
option(BUILD_GO_BINDING "build go binding" ON)
|
option(BUILD_GO_BINDING "build go binding" ON)
|
||||||
|
if(BUILD_GO_BINDING AND NOT WITH_C_BINDING)
|
||||||
|
message(WARNING "Go binding depends on C binding, but C binding is not enabled")
|
||||||
|
endif()
|
||||||
if(NOT BUILD_GO_BINDING OR NOT BUILD_C_BINDING)
|
if(NOT BUILD_GO_BINDING OR NOT BUILD_C_BINDING)
|
||||||
set(WITH_GO_BINDING OFF)
|
set(WITH_GO_BINDING OFF)
|
||||||
else()
|
else()
|
||||||
|
@ -141,6 +150,9 @@ endif()
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
option(BUILD_RUBY_BINDING "build ruby binding" ON)
|
option(BUILD_RUBY_BINDING "build ruby binding" ON)
|
||||||
|
if(BUILD_RUBY_BINDING AND NOT WITH_C_BINDING)
|
||||||
|
message(WARNING "Ruby binding depends on C binding, but C binding is not enabled")
|
||||||
|
endif()
|
||||||
if(NOT BUILD_RUBY_BINDING OR NOT BUILD_C_BINDING)
|
if(NOT BUILD_RUBY_BINDING OR NOT BUILD_C_BINDING)
|
||||||
set(WITH_RUBY_BINDING OFF)
|
set(WITH_RUBY_BINDING OFF)
|
||||||
else()
|
else()
|
||||||
|
|
|
@ -4,5 +4,20 @@ find_path(ROCKSDB_INCLUDE_DIR
|
||||||
NAMES rocksdb/db.h
|
NAMES rocksdb/db.h
|
||||||
PATH_SUFFIXES include)
|
PATH_SUFFIXES include)
|
||||||
|
|
||||||
|
if(ROCKSDB_INCLUDE_DIR AND EXISTS "${ROCKSDB_INCLUDE_DIR}/rocksdb/version.h")
|
||||||
|
foreach(ver "MAJOR" "MINOR" "PATCH")
|
||||||
|
file(STRINGS "${ROCKSDB_INCLUDE_DIR}/rocksdb/version.h" ROCKSDB_VER_${ver}_LINE
|
||||||
|
REGEX "^#define[ \t]+ROCKSDB_${ver}[ \t]+[0-9]+$")
|
||||||
|
string(REGEX REPLACE "^#define[ \t]+ROCKSDB_${ver}[ \t]+([0-9]+)$"
|
||||||
|
"\\1" ROCKSDB_VERSION_${ver} "${ROCKSDB_VER_${ver}_LINE}")
|
||||||
|
unset(${ROCKSDB_VER_${ver}_LINE})
|
||||||
|
endforeach()
|
||||||
|
set(ROCKSDB_VERSION_STRING
|
||||||
|
"${ROCKSDB_VERSION_MAJOR}.${ROCKSDB_VERSION_MINOR}.${ROCKSDB_VERSION_PATCH}")
|
||||||
|
|
||||||
|
message(STATUS "Found RocksDB version: ${ROCKSDB_VERSION_STRING}")
|
||||||
|
endif()
|
||||||
|
|
||||||
find_package_handle_standard_args(RocksDB
|
find_package_handle_standard_args(RocksDB
|
||||||
DEFAULT_MSG ROCKSDB_INCLUDE_DIR)
|
REQUIRED_VARS ROCKSDB_INCLUDE_DIR
|
||||||
|
VERSION_VAR ROCKSDB_VERSION_STRING)
|
||||||
|
|
|
@ -155,17 +155,35 @@ list(GET FDB_VERSION_LIST 2 FDB_PATCH)
|
||||||
# Alternatives config
|
# Alternatives config
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
set(mv_packaging_dir ${PROJECT_SOURCE_DIR}/packaging/multiversion)
|
||||||
math(EXPR ALTERNATIVES_PRIORITY "(${PROJECT_VERSION_MAJOR} * 1000) + (${PROJECT_VERSION_MINOR} * 100) + ${PROJECT_VERSION_PATCH}")
|
math(EXPR ALTERNATIVES_PRIORITY "(${PROJECT_VERSION_MAJOR} * 1000) + (${PROJECT_VERSION_MINOR} * 100) + ${PROJECT_VERSION_PATCH}")
|
||||||
set(script_dir "${PROJECT_BINARY_DIR}/packaging/multiversion/")
|
set(script_dir "${PROJECT_BINARY_DIR}/packaging/multiversion/")
|
||||||
file(MAKE_DIRECTORY "${script_dir}/server" "${script_dir}/clients")
|
file(MAKE_DIRECTORY "${script_dir}/server" "${script_dir}/clients")
|
||||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/server/postinst" "${script_dir}/server" @ONLY)
|
|
||||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/server/prerm" "${script_dir}/server" @ONLY)
|
|
||||||
set(LIB_DIR lib)
|
|
||||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients" @ONLY)
|
|
||||||
set(LIB_DIR lib64)
|
|
||||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients/postinst-el7" @ONLY)
|
|
||||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/prerm" "${script_dir}/clients" @ONLY)
|
|
||||||
|
|
||||||
|
# Needs to to be named postinst for debian
|
||||||
|
configure_file("${mv_packaging_dir}/server/postinst-deb" "${script_dir}/server/postinst" @ONLY)
|
||||||
|
|
||||||
|
configure_file("${mv_packaging_dir}/server/postinst-rpm" "${script_dir}/server" @ONLY)
|
||||||
|
configure_file("${mv_packaging_dir}/server/prerm" "${script_dir}/server" @ONLY)
|
||||||
|
set(LIB_DIR lib)
|
||||||
|
configure_file("${mv_packaging_dir}/clients/postinst" "${script_dir}/clients" @ONLY)
|
||||||
|
set(LIB_DIR lib64)
|
||||||
|
configure_file("${mv_packaging_dir}/clients/postinst" "${script_dir}/clients/postinst-el7" @ONLY)
|
||||||
|
configure_file("${mv_packaging_dir}/clients/prerm" "${script_dir}/clients" @ONLY)
|
||||||
|
|
||||||
|
#make sure all directories we need exist
|
||||||
|
file(MAKE_DIRECTORY "${script_dir}/clients/usr/lib/foundationdb")
|
||||||
|
install(DIRECTORY "${script_dir}/clients/usr/lib/foundationdb"
|
||||||
|
DESTINATION usr/lib
|
||||||
|
COMPONENT clients-versioned)
|
||||||
|
file(MAKE_DIRECTORY "${script_dir}/clients/usr/lib/pkgconfig")
|
||||||
|
install(DIRECTORY "${script_dir}/clients/usr/lib/pkgconfig"
|
||||||
|
DESTINATION usr/lib
|
||||||
|
COMPONENT clients-versioned)
|
||||||
|
file(MAKE_DIRECTORY "${script_dir}/clients/usr/lib/cmake")
|
||||||
|
install(DIRECTORY "${script_dir}/clients/usr/lib/cmake"
|
||||||
|
DESTINATION usr/lib
|
||||||
|
COMPONENT clients-versioned)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Move Docker Setup
|
# Move Docker Setup
|
||||||
|
@ -196,16 +214,18 @@ set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7)
|
||||||
set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
|
set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
|
||||||
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
|
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
|
||||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DEPENDS clients-versioned)
|
set(CPACK_COMPONENT_SERVER-VERSIONED_DEPENDS clients-versioned)
|
||||||
|
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_REQUIRES
|
||||||
|
"foundationdb${PROJECT_VERSION}-clients")
|
||||||
|
|
||||||
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
|
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
|
||||||
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
|
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
|
||||||
set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server")
|
set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server")
|
||||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DISPLAY_NAME "foundationdb-server-${PROJECT_VERSION}")
|
set(CPACK_COMPONENT_SERVER-VERSIONED_DISPLAY_NAME "foundationdb${PROJECT_VERSION}-server")
|
||||||
|
|
||||||
set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients")
|
set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients")
|
||||||
set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients")
|
set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients")
|
||||||
set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
|
set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
|
||||||
set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb${PROJECT_VERSION}-clients")
|
||||||
|
|
||||||
|
|
||||||
# MacOS needs a file exiension for the LICENSE file
|
# MacOS needs a file exiension for the LICENSE file
|
||||||
|
@ -226,14 +246,21 @@ else()
|
||||||
set(prerelease_string "-1")
|
set(prerelease_string "-1")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
#############
|
||||||
|
# Filenames #
|
||||||
|
#############
|
||||||
|
set(unversioned_postfix "${PROJECT_VERSION}${prerelease_string}")
|
||||||
# RPM filenames
|
# RPM filenames
|
||||||
set(rpm-clients-filename "foundationdb-clients-${PROJECT_VERSION}${prerelease_string}")
|
set(rpm-clients-filename "foundationdb-clients-${unversioned_postfix}")
|
||||||
set(rpm-server-filename "foundationdb-server-${PROJECT_VERSION}${prerelease_string}")
|
set(rpm-server-filename "foundationdb-server-${unversioned_postfix}")
|
||||||
|
set(rpm-clients-versioned-filename "foundationdb${PROJECT_VERSION}-clients${prerelease_string}")
|
||||||
|
set(rpm-server-versioned-filename "foundationdb${PROJECT_VERSION}-server${prerelease_string}")
|
||||||
|
|
||||||
# Deb filenames
|
# Deb filenames
|
||||||
set(deb-clients-filename "foundationdb-clients_${PROJECT_VERSION}${prerelease_string}")
|
set(deb-clients-filename "foundationdb-clients_${unversioned_postfix}")
|
||||||
set(deb-server-filename "foundationdb-server_${PROJECT_VERSION}${prerelease_string}")
|
set(deb-server-filename "foundationdb-server_${unversioned_postfix}")
|
||||||
|
set(deb-clients-versioned-filename "foundationdb${PROJECT_VERSION}-clients${prerelease_string}")
|
||||||
|
set(deb-server-versioned-filename "foundationdb${PROJECT_VERSION}-server${prerelease_string}")
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Configuration for RPM
|
# Configuration for RPM
|
||||||
|
@ -244,17 +271,18 @@ set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0")
|
||||||
set(CPACK_RPM_PACKAGE_NAME "foundationdb")
|
set(CPACK_RPM_PACKAGE_NAME "foundationdb")
|
||||||
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients")
|
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients")
|
||||||
set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server")
|
set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server")
|
||||||
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME "foundationdb-server-${PROJECT_VERSION}")
|
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-server")
|
||||||
|
set(CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-clients")
|
||||||
|
|
||||||
set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${rpm-clients-filename}.el7.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${rpm-clients-filename}.el7.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${rpm-clients-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${rpm-clients-versioned-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
set(CPACK_RPM_SERVER-EL7_FILE_NAME "${rpm-server-filename}.el7.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_SERVER-EL7_FILE_NAME "${rpm-server-filename}.el7.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
set(CPACK_RPM_SERVER-VERSIONED_FILE_NAME "${rpm-server-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_SERVER-VERSIONED_FILE_NAME "${rpm-server-versioned-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
|
|
||||||
set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.el7-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.el7-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
set(CPACK_RPM_CLIENTS-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_CLIENTS-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-clients-versioned-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
set(CPACK_RPM_SERVER-EL7_DEBUGINFO_FILE_NAME "${rpm-server-filename}.el7-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_SERVER-EL7_DEBUGINFO_FILE_NAME "${rpm-server-filename}.el7-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
set(CPACK_RPM_SERVER-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-server-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
set(CPACK_RPM_SERVER-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-server-versioned-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||||
|
|
||||||
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir")
|
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir")
|
||||||
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server)
|
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server)
|
||||||
|
@ -281,7 +309,10 @@ set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
|
||||||
"/lib"
|
"/lib"
|
||||||
"/lib/systemd"
|
"/lib/systemd"
|
||||||
"/lib/systemd/system"
|
"/lib/systemd/system"
|
||||||
"/etc/rc.d/init.d")
|
"/etc/rc.d/init.d"
|
||||||
|
"/usr/lib/pkgconfig"
|
||||||
|
"/usr/lib/foundationdb"
|
||||||
|
"/usr/lib/cmake")
|
||||||
set(CPACK_RPM_DEBUGINFO_PACKAGE ${GENERATE_DEBUG_PACKAGES})
|
set(CPACK_RPM_DEBUGINFO_PACKAGE ${GENERATE_DEBUG_PACKAGES})
|
||||||
#set(CPACK_RPM_BUILD_SOURCE_FDB_INSTALL_DIRS_PREFIX /usr/src)
|
#set(CPACK_RPM_BUILD_SOURCE_FDB_INSTALL_DIRS_PREFIX /usr/src)
|
||||||
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
||||||
|
@ -305,7 +336,7 @@ set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES
|
||||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||||
|
|
||||||
set(CPACK_RPM_SERVER-VERSIONED_POST_INSTALL_SCRIPT_FILE
|
set(CPACK_RPM_SERVER-VERSIONED_POST_INSTALL_SCRIPT_FILE
|
||||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst)
|
${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst-rpm)
|
||||||
|
|
||||||
set(CPACK_RPM_SERVER-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE
|
set(CPACK_RPM_SERVER-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE
|
||||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm)
|
${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm)
|
||||||
|
@ -323,13 +354,13 @@ set(CPACK_RPM_CLIENTS-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE
|
||||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
|
||||||
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${deb-clients-filename}_amd64.deb")
|
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${deb-clients-filename}_amd64.deb")
|
||||||
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${deb-server-filename}_amd64.deb")
|
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${deb-server-filename}_amd64.deb")
|
||||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-filename}.versioned_amd64.deb")
|
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-versioned-filename}.versioned_amd64.deb")
|
||||||
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-filename}.versioned_amd64.deb")
|
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-versioned-filename}.versioned_amd64.deb")
|
||||||
else()
|
else()
|
||||||
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${deb-clients-filename}_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${deb-clients-filename}_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||||
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${deb-server-filename}_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${deb-server-filename}_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-versioned-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||||
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-versioned-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(CPACK_DEB_COMPONENT_INSTALL ON)
|
set(CPACK_DEB_COMPONENT_INSTALL ON)
|
||||||
|
@ -339,8 +370,8 @@ set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
||||||
|
|
||||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_NAME "foundationdb-server")
|
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_NAME "foundationdb-server")
|
||||||
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_NAME "foundationdb-clients")
|
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_NAME "foundationdb-clients")
|
||||||
set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_NAME "foundationdb-server-${PROJECT_VERSION}")
|
set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-server")
|
||||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
set(CPACK_DEBIAN_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-clients")
|
||||||
|
|
||||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), foundationdb-clients (= ${FDB_VERSION})")
|
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), foundationdb-clients (= ${FDB_VERSION})")
|
||||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_RECOMMENDS "python (>= 2.6)")
|
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_RECOMMENDS "python (>= 2.6)")
|
||||||
|
|
|
@ -308,9 +308,16 @@ namespace SummarizeTest
|
||||||
string lastFolderName = Path.GetFileName(Path.GetDirectoryName(testFile));
|
string lastFolderName = Path.GetFileName(Path.GetDirectoryName(testFile));
|
||||||
if (lastFolderName.Contains("from_") || lastFolderName.Contains("to_")) // Only perform upgrade/downgrade tests from certain versions
|
if (lastFolderName.Contains("from_") || lastFolderName.Contains("to_")) // Only perform upgrade/downgrade tests from certain versions
|
||||||
{
|
{
|
||||||
oldBinaryVersionLowerBound = lastFolderName.Split('_').Last();
|
oldBinaryVersionLowerBound = lastFolderName.Split('_').ElementAt(1); // Assuming "from_*.*.*" appears first in the folder name
|
||||||
}
|
}
|
||||||
string oldBinaryVersionUpperBound = getFdbserverVersion(fdbserverName);
|
string oldBinaryVersionUpperBound = getFdbserverVersion(fdbserverName);
|
||||||
|
if (lastFolderName.Contains("until_")) // Specify upper bound for old binary; "until_*.*.*" is assumed at the end if present
|
||||||
|
{
|
||||||
|
string givenUpperBound = lastFolderName.Split('_').Last();
|
||||||
|
if (versionLessThan(givenUpperBound, oldBinaryVersionUpperBound)) {
|
||||||
|
oldBinaryVersionUpperBound = givenUpperBound;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (versionGreaterThanOrEqual("4.0.0", oldBinaryVersionUpperBound)) {
|
if (versionGreaterThanOrEqual("4.0.0", oldBinaryVersionUpperBound)) {
|
||||||
// If the binary under test is from 3.x, then allow upgrade tests from 3.x binaries.
|
// If the binary under test is from 3.x, then allow upgrade tests from 3.x binaries.
|
||||||
oldBinaryVersionLowerBound = "0.0.0";
|
oldBinaryVersionLowerBound = "0.0.0";
|
||||||
|
@ -320,8 +327,22 @@ namespace SummarizeTest
|
||||||
Directory.GetFiles(oldBinaryFolder),
|
Directory.GetFiles(oldBinaryFolder),
|
||||||
x => versionGreaterThanOrEqual(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionLowerBound)
|
x => versionGreaterThanOrEqual(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionLowerBound)
|
||||||
&& versionLessThan(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionUpperBound));
|
&& versionLessThan(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionUpperBound));
|
||||||
oldBinaries = oldBinaries.Concat(currentBinary);
|
if (!lastFolderName.Contains("until_")) {
|
||||||
oldServerName = random.Choice(oldBinaries.ToList<string>());
|
// only add current binary to the list of old binaries if "until_" is not specified in the folder name
|
||||||
|
// <version> in until_<version> should be less or equal to the current binary version
|
||||||
|
// otherwise, using "until_" makes no sense
|
||||||
|
// thus, by definition, if "until_" appears, we do not want to run with the current binary version
|
||||||
|
oldBinaries = oldBinaries.Concat(currentBinary);
|
||||||
|
}
|
||||||
|
List<string> oldBinariesList = oldBinaries.ToList<string>();
|
||||||
|
if (oldBinariesList.Count == 0) {
|
||||||
|
// In theory, restarting tests are named to have at least one old binary version to run
|
||||||
|
// But if none of the provided old binaries fall in the range, we just skip the test
|
||||||
|
Console.WriteLine("No available old binary version from {0} to {1}", oldBinaryVersionLowerBound, oldBinaryVersionUpperBound);
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
oldServerName = random.Choice(oldBinariesList);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
# pkg_tester
|
||||||
|
|
||||||
|
This is a test suite that can be used to validate properties of generated package files.
|
||||||
|
|
||||||
|
To use it, first build the package files as described in the main [README](https://github.com/apple/foundationdb#linux)
|
||||||
|
|
||||||
|
Then setup a virtualenv:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ python3 -m venv .venv
|
||||||
|
$ source .venv/bin/activate
|
||||||
|
$ pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can run the tests with pytest:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ BUILDDIR=<BUILDDIR> python -m pytest -s -v
|
||||||
|
```
|
||||||
|
|
||||||
|
These are snapshot tests, so you may need to update the snapshots with
|
||||||
|
|
||||||
|
```
|
||||||
|
$ BUILDDIR=<BUILDDIR> python -m pytest -s -v --snapshot-update
|
||||||
|
```
|
||||||
|
|
||||||
|
Use discretion about whether or not the behavior change is acceptable.
|
||||||
|
|
||||||
|
A helpful tip for debugging: if you run pytest with `--pdb`, then it will pause
|
||||||
|
the tests at the first error which gives you a chance to run some `docker exec`
|
||||||
|
commands to try and see what's wrong.
|
||||||
|
|
||||||
|
There's a small chance that this will leak an image (especially if you interrupt the test with ctrl-c). Consider running
|
||||||
|
|
||||||
|
```
|
||||||
|
$ docker image prune
|
||||||
|
```
|
||||||
|
|
||||||
|
after. If you `kill -9` the test, there might even be leaked containers. You can destroy all existing containers with
|
||||||
|
|
||||||
|
```
|
||||||
|
$ docker rm -f $(docker ps -a -q) # destroy all docker containers!
|
||||||
|
```
|
||||||
|
|
||||||
|
# Requirements
|
||||||
|
|
||||||
|
docker, python
|
||||||
|
|
||||||
|
# Future work?
|
||||||
|
|
||||||
|
- [x] Test rpms
|
||||||
|
- [x] Test debs
|
||||||
|
- [x] Test versioned packages
|
||||||
|
- [ ] Test that upgrades preserve data/config
|
||||||
|
|
||||||
|
# Development
|
||||||
|
|
||||||
|
Please run `black` and `mypy` after making changes
|
|
@ -0,0 +1,244 @@
|
||||||
|
# name: test_backup_restore[centos-versioned]
|
||||||
|
'
|
||||||
|
Submitted and now waiting for the backup on tag `default' to complete.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_backup_restore[centos-versioned].1
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_backup_restore[centos]
|
||||||
|
'
|
||||||
|
Submitted and now waiting for the backup on tag `default' to complete.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_backup_restore[centos].1
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_backup_restore[ubuntu-versioned]
|
||||||
|
'
|
||||||
|
Submitted and now waiting for the backup on tag `default' to complete.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_backup_restore[ubuntu-versioned].1
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_backup_restore[ubuntu]
|
||||||
|
'
|
||||||
|
Submitted and now waiting for the backup on tag `default' to complete.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_backup_restore[ubuntu].1
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_fdbcli_help_text[centos-versioned]
|
||||||
|
'
|
||||||
|
FoundationDB CLI 7.1 (v7.1.0)
|
||||||
|
usage: fdbcli [OPTIONS]
|
||||||
|
|
||||||
|
-C CONNFILE The path of a file containing the connection string for the
|
||||||
|
FoundationDB cluster. The default is first the value of the
|
||||||
|
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||||
|
then `/etc/foundationdb/fdb.cluster'.
|
||||||
|
--log Enables trace file logging for the CLI session.
|
||||||
|
--log-dir PATH Specifes the output directory for trace files. If
|
||||||
|
unspecified, defaults to the current directory. Has
|
||||||
|
no effect unless --log is specified.
|
||||||
|
--trace_format FORMAT
|
||||||
|
Select the format of the log files. xml (the default) and json
|
||||||
|
are supported. Has no effect unless --log is specified.
|
||||||
|
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||||
|
and then exits.
|
||||||
|
--no-status Disables the initial status check done when starting
|
||||||
|
the CLI.
|
||||||
|
--tls_certificate_file CERTFILE
|
||||||
|
The path of a file containing the TLS certificate and CA
|
||||||
|
chain.
|
||||||
|
--tls_ca_file CERTAUTHFILE
|
||||||
|
The path of a file containing the CA certificates chain.
|
||||||
|
--tls_key_file KEYFILE
|
||||||
|
The path of a file containing the private key corresponding
|
||||||
|
to the TLS certificate.
|
||||||
|
--tls_password PASSCODE
|
||||||
|
The passphrase of encrypted private key
|
||||||
|
--tls_verify_peers CONSTRAINTS
|
||||||
|
The constraints by which to validate TLS peers. The contents
|
||||||
|
and format of CONSTRAINTS are plugin-specific.
|
||||||
|
--knob_KNOBNAME KNOBVALUE
|
||||||
|
Changes a knob option. KNOBNAME should be lowercase.
|
||||||
|
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||||
|
Useful in reporting and diagnosing TLS issues.
|
||||||
|
--build_flags Print build information and exit.
|
||||||
|
-v, --version Print FoundationDB CLI version information and exit.
|
||||||
|
-h, --help Display this help and exit.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_fdbcli_help_text[centos]
|
||||||
|
'
|
||||||
|
FoundationDB CLI 7.1 (v7.1.0)
|
||||||
|
usage: fdbcli [OPTIONS]
|
||||||
|
|
||||||
|
-C CONNFILE The path of a file containing the connection string for the
|
||||||
|
FoundationDB cluster. The default is first the value of the
|
||||||
|
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||||
|
then `/etc/foundationdb/fdb.cluster'.
|
||||||
|
--log Enables trace file logging for the CLI session.
|
||||||
|
--log-dir PATH Specifes the output directory for trace files. If
|
||||||
|
unspecified, defaults to the current directory. Has
|
||||||
|
no effect unless --log is specified.
|
||||||
|
--trace_format FORMAT
|
||||||
|
Select the format of the log files. xml (the default) and json
|
||||||
|
are supported. Has no effect unless --log is specified.
|
||||||
|
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||||
|
and then exits.
|
||||||
|
--no-status Disables the initial status check done when starting
|
||||||
|
the CLI.
|
||||||
|
--tls_certificate_file CERTFILE
|
||||||
|
The path of a file containing the TLS certificate and CA
|
||||||
|
chain.
|
||||||
|
--tls_ca_file CERTAUTHFILE
|
||||||
|
The path of a file containing the CA certificates chain.
|
||||||
|
--tls_key_file KEYFILE
|
||||||
|
The path of a file containing the private key corresponding
|
||||||
|
to the TLS certificate.
|
||||||
|
--tls_password PASSCODE
|
||||||
|
The passphrase of encrypted private key
|
||||||
|
--tls_verify_peers CONSTRAINTS
|
||||||
|
The constraints by which to validate TLS peers. The contents
|
||||||
|
and format of CONSTRAINTS are plugin-specific.
|
||||||
|
--knob_KNOBNAME KNOBVALUE
|
||||||
|
Changes a knob option. KNOBNAME should be lowercase.
|
||||||
|
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||||
|
Useful in reporting and diagnosing TLS issues.
|
||||||
|
--build_flags Print build information and exit.
|
||||||
|
-v, --version Print FoundationDB CLI version information and exit.
|
||||||
|
-h, --help Display this help and exit.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_fdbcli_help_text[ubuntu-versioned]
|
||||||
|
'
|
||||||
|
FoundationDB CLI 7.1 (v7.1.0)
|
||||||
|
usage: fdbcli [OPTIONS]
|
||||||
|
|
||||||
|
-C CONNFILE The path of a file containing the connection string for the
|
||||||
|
FoundationDB cluster. The default is first the value of the
|
||||||
|
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||||
|
then `/etc/foundationdb/fdb.cluster'.
|
||||||
|
--log Enables trace file logging for the CLI session.
|
||||||
|
--log-dir PATH Specifes the output directory for trace files. If
|
||||||
|
unspecified, defaults to the current directory. Has
|
||||||
|
no effect unless --log is specified.
|
||||||
|
--trace_format FORMAT
|
||||||
|
Select the format of the log files. xml (the default) and json
|
||||||
|
are supported. Has no effect unless --log is specified.
|
||||||
|
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||||
|
and then exits.
|
||||||
|
--no-status Disables the initial status check done when starting
|
||||||
|
the CLI.
|
||||||
|
--tls_certificate_file CERTFILE
|
||||||
|
The path of a file containing the TLS certificate and CA
|
||||||
|
chain.
|
||||||
|
--tls_ca_file CERTAUTHFILE
|
||||||
|
The path of a file containing the CA certificates chain.
|
||||||
|
--tls_key_file KEYFILE
|
||||||
|
The path of a file containing the private key corresponding
|
||||||
|
to the TLS certificate.
|
||||||
|
--tls_password PASSCODE
|
||||||
|
The passphrase of encrypted private key
|
||||||
|
--tls_verify_peers CONSTRAINTS
|
||||||
|
The constraints by which to validate TLS peers. The contents
|
||||||
|
and format of CONSTRAINTS are plugin-specific.
|
||||||
|
--knob_KNOBNAME KNOBVALUE
|
||||||
|
Changes a knob option. KNOBNAME should be lowercase.
|
||||||
|
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||||
|
Useful in reporting and diagnosing TLS issues.
|
||||||
|
--build_flags Print build information and exit.
|
||||||
|
-v, --version Print FoundationDB CLI version information and exit.
|
||||||
|
-h, --help Display this help and exit.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_fdbcli_help_text[ubuntu]
|
||||||
|
'
|
||||||
|
FoundationDB CLI 7.1 (v7.1.0)
|
||||||
|
usage: fdbcli [OPTIONS]
|
||||||
|
|
||||||
|
-C CONNFILE The path of a file containing the connection string for the
|
||||||
|
FoundationDB cluster. The default is first the value of the
|
||||||
|
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||||
|
then `/etc/foundationdb/fdb.cluster'.
|
||||||
|
--log Enables trace file logging for the CLI session.
|
||||||
|
--log-dir PATH Specifes the output directory for trace files. If
|
||||||
|
unspecified, defaults to the current directory. Has
|
||||||
|
no effect unless --log is specified.
|
||||||
|
--trace_format FORMAT
|
||||||
|
Select the format of the log files. xml (the default) and json
|
||||||
|
are supported. Has no effect unless --log is specified.
|
||||||
|
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||||
|
and then exits.
|
||||||
|
--no-status Disables the initial status check done when starting
|
||||||
|
the CLI.
|
||||||
|
--tls_certificate_file CERTFILE
|
||||||
|
The path of a file containing the TLS certificate and CA
|
||||||
|
chain.
|
||||||
|
--tls_ca_file CERTAUTHFILE
|
||||||
|
The path of a file containing the CA certificates chain.
|
||||||
|
--tls_key_file KEYFILE
|
||||||
|
The path of a file containing the private key corresponding
|
||||||
|
to the TLS certificate.
|
||||||
|
--tls_password PASSCODE
|
||||||
|
The passphrase of encrypted private key
|
||||||
|
--tls_verify_peers CONSTRAINTS
|
||||||
|
The constraints by which to validate TLS peers. The contents
|
||||||
|
and format of CONSTRAINTS are plugin-specific.
|
||||||
|
--knob_KNOBNAME KNOBVALUE
|
||||||
|
Changes a knob option. KNOBNAME should be lowercase.
|
||||||
|
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||||
|
Useful in reporting and diagnosing TLS issues.
|
||||||
|
--build_flags Print build information and exit.
|
||||||
|
-v, --version Print FoundationDB CLI version information and exit.
|
||||||
|
-h, --help Display this help and exit.
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_write[centos-versioned]
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_write[centos]
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_write[ubuntu-versioned]
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
||||||
|
# name: test_write[ubuntu]
|
||||||
|
'
|
||||||
|
`x' is `y'
|
||||||
|
|
||||||
|
'
|
||||||
|
---
|
|
@ -0,0 +1,13 @@
|
||||||
|
attrs==21.2.0
|
||||||
|
colored==1.4.2
|
||||||
|
importlib-metadata==4.0.1
|
||||||
|
iniconfig==1.1.1
|
||||||
|
packaging==20.9
|
||||||
|
pluggy==0.13.1
|
||||||
|
py==1.10.0
|
||||||
|
pyparsing==2.4.7
|
||||||
|
pytest==6.2.4
|
||||||
|
syrupy==1.2.3
|
||||||
|
toml==0.10.2
|
||||||
|
typing-extensions==3.10.0.0
|
||||||
|
zipp==3.4.1
|
|
@ -0,0 +1,251 @@
|
||||||
|
# test_fdb_pkgs.py
|
||||||
|
#
|
||||||
|
# This source file is part of the FoundationDB open source project
|
||||||
|
#
|
||||||
|
# Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import pytest
|
||||||
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from typing import Iterator, List, Optional, Union
|
||||||
|
|
||||||
|
|
||||||
|
def run(args: List[str]) -> str:
|
||||||
|
print("$ {}".format(" ".join(map(shlex.quote, args))))
|
||||||
|
result = subprocess.check_output(args).decode("utf-8")
|
||||||
|
print(result, end="")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class Image:
|
||||||
|
def __init__(self, uid: str):
|
||||||
|
self.uid = uid
|
||||||
|
|
||||||
|
def dispose(self):
|
||||||
|
run(["docker", "image", "rm", self.uid])
|
||||||
|
|
||||||
|
|
||||||
|
class Container:
|
||||||
|
def __init__(self, image: Union[str, Image], initd=False):
|
||||||
|
if isinstance(image, Image):
|
||||||
|
image_name = image.uid
|
||||||
|
else:
|
||||||
|
assert isinstance(image, str)
|
||||||
|
image_name = image
|
||||||
|
|
||||||
|
# minimal extra args required to run systemd
|
||||||
|
# https://developers.redhat.com/blog/2016/09/13/running-systemd-in-a-non-privileged-container#the_quest
|
||||||
|
extra_initd_args = []
|
||||||
|
if initd:
|
||||||
|
extra_initd_args = "--tmpfs /tmp --tmpfs /run -v /sys/fs/cgroup:/sys/fs/cgroup:ro".split()
|
||||||
|
|
||||||
|
self.uid = str(uuid.uuid4())
|
||||||
|
|
||||||
|
run(
|
||||||
|
["docker", "run"]
|
||||||
|
+ ["-t", "-d", "--name", self.uid]
|
||||||
|
+ extra_initd_args
|
||||||
|
+ [image_name]
|
||||||
|
+ ["/usr/sbin/init" for _ in range(1) if initd]
|
||||||
|
).rstrip()
|
||||||
|
|
||||||
|
def run(self, args: List[str]) -> str:
|
||||||
|
return run(["docker", "exec", self.uid] + args)
|
||||||
|
|
||||||
|
def copy_to(self, src_path: str, dst_path: str) -> None:
|
||||||
|
run(["docker", "cp", src_path, "{}:{}".format(self.uid, dst_path)])
|
||||||
|
|
||||||
|
def commit(self) -> Image:
|
||||||
|
output = run(["docker", "commit", self.uid])
|
||||||
|
uid = output.split(":")[1].rstrip()
|
||||||
|
return Image(uid)
|
||||||
|
|
||||||
|
def dispose(self):
|
||||||
|
run(["docker", "rm", "-f", self.uid])
|
||||||
|
|
||||||
|
|
||||||
|
def ubuntu_image_with_fdb_helper(versioned: bool) -> Iterator[Optional[Image]]:
|
||||||
|
"""
|
||||||
|
Return an image which has just the fdb deb packages installed.
|
||||||
|
"""
|
||||||
|
builddir = os.environ.get("BUILDDIR")
|
||||||
|
if builddir is None:
|
||||||
|
assert False, "BUILDDIR environment variable not set"
|
||||||
|
debs = [
|
||||||
|
deb
|
||||||
|
for deb in glob.glob(os.path.join(builddir, "packages", "*.deb"))
|
||||||
|
if ("versioned" in deb) == versioned
|
||||||
|
]
|
||||||
|
if not debs:
|
||||||
|
yield None
|
||||||
|
return
|
||||||
|
|
||||||
|
container = None
|
||||||
|
image = None
|
||||||
|
try:
|
||||||
|
container = Container("ubuntu")
|
||||||
|
for deb in debs:
|
||||||
|
container.copy_to(deb, "/opt")
|
||||||
|
container.run(["bash", "-c", "dpkg -i /opt/*.deb"])
|
||||||
|
container.run(["bash", "-c", "rm /opt/*.deb"])
|
||||||
|
image = container.commit()
|
||||||
|
yield image
|
||||||
|
finally:
|
||||||
|
if container is not None:
|
||||||
|
container.dispose()
|
||||||
|
if image is not None:
|
||||||
|
image.dispose()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def ubuntu_image_with_fdb() -> Iterator[Optional[Image]]:
|
||||||
|
yield from ubuntu_image_with_fdb_helper(versioned=False)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def ubuntu_image_with_fdb_versioned() -> Iterator[Optional[Image]]:
|
||||||
|
yield from ubuntu_image_with_fdb_helper(versioned=True)
|
||||||
|
|
||||||
|
|
||||||
|
def centos_image_with_fdb_helper(versioned: bool) -> Iterator[Optional[Image]]:
|
||||||
|
"""
|
||||||
|
Return an image which has just the fdb rpm packages installed.
|
||||||
|
"""
|
||||||
|
builddir = os.environ.get("BUILDDIR")
|
||||||
|
if builddir is None:
|
||||||
|
assert False, "BUILDDIR environment variable not set"
|
||||||
|
rpms = [
|
||||||
|
rpm
|
||||||
|
for rpm in glob.glob(os.path.join(builddir, "packages", "*.rpm"))
|
||||||
|
if ("versioned" in rpm) == versioned
|
||||||
|
]
|
||||||
|
if not rpms:
|
||||||
|
yield None
|
||||||
|
return
|
||||||
|
|
||||||
|
container = None
|
||||||
|
image = None
|
||||||
|
try:
|
||||||
|
container = Container("centos", initd=True)
|
||||||
|
for rpm in rpms:
|
||||||
|
container.copy_to(rpm, "/opt")
|
||||||
|
container.run(["bash", "-c", "yum install -y /opt/*.rpm"])
|
||||||
|
container.run(["bash", "-c", "rm /opt/*.rpm"])
|
||||||
|
image = container.commit()
|
||||||
|
yield image
|
||||||
|
finally:
|
||||||
|
if container is not None:
|
||||||
|
container.dispose()
|
||||||
|
if image is not None:
|
||||||
|
image.dispose()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def centos_image_with_fdb() -> Iterator[Optional[Image]]:
|
||||||
|
yield from centos_image_with_fdb_helper(versioned=False)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def centos_image_with_fdb_versioned() -> Iterator[Optional[Image]]:
|
||||||
|
yield from centos_image_with_fdb_helper(versioned=True)
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_generate_tests(metafunc):
|
||||||
|
if "linux_container" in metafunc.fixturenames:
|
||||||
|
metafunc.parametrize(
|
||||||
|
"linux_container",
|
||||||
|
["ubuntu", "centos", "ubuntu-versioned", "centos-versioned"],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def linux_container(
|
||||||
|
request,
|
||||||
|
ubuntu_image_with_fdb,
|
||||||
|
centos_image_with_fdb,
|
||||||
|
ubuntu_image_with_fdb_versioned,
|
||||||
|
centos_image_with_fdb_versioned,
|
||||||
|
) -> Iterator[Container]:
|
||||||
|
"""
|
||||||
|
Tests which accept this fixture will be run once for each supported platform, for each type of package (versioned or unversioned).
|
||||||
|
"""
|
||||||
|
container: Optional[Container] = None
|
||||||
|
try:
|
||||||
|
if request.param == "ubuntu":
|
||||||
|
if ubuntu_image_with_fdb is None:
|
||||||
|
pytest.skip("No debian packages available to test")
|
||||||
|
container = Container(ubuntu_image_with_fdb)
|
||||||
|
container.run(
|
||||||
|
["/etc/init.d/foundationdb", "start"]
|
||||||
|
) # outside docker this shouldn't be necessary
|
||||||
|
elif request.param == "centos":
|
||||||
|
if centos_image_with_fdb is None:
|
||||||
|
pytest.skip("No rpm packages available to test")
|
||||||
|
container = Container(centos_image_with_fdb, initd=True)
|
||||||
|
elif request.param == "ubuntu-versioned":
|
||||||
|
if ubuntu_image_with_fdb is None:
|
||||||
|
pytest.skip("No versioned debian packages available to test")
|
||||||
|
container = Container(ubuntu_image_with_fdb_versioned)
|
||||||
|
container.run(
|
||||||
|
["/etc/init.d/foundationdb", "start"]
|
||||||
|
) # outside docker this shouldn't be necessary
|
||||||
|
elif request.param == "centos-versioned":
|
||||||
|
if centos_image_with_fdb is None:
|
||||||
|
pytest.skip("No versioned rpm packages available to test")
|
||||||
|
container = Container(centos_image_with_fdb_versioned, initd=True)
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
yield container
|
||||||
|
finally:
|
||||||
|
if container is not None:
|
||||||
|
container.dispose()
|
||||||
|
|
||||||
|
|
||||||
|
#################### BEGIN ACTUAL TESTS ####################
|
||||||
|
|
||||||
|
|
||||||
|
def test_db_available(linux_container: Container):
|
||||||
|
linux_container.run(["fdbcli", "--exec", "get x"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_write(linux_container: Container, snapshot):
|
||||||
|
linux_container.run(["fdbcli", "--exec", "writemode on; set x y"])
|
||||||
|
assert snapshot == linux_container.run(["fdbcli", "--exec", "get x"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_fdbcli_help_text(linux_container: Container, snapshot):
|
||||||
|
assert snapshot == linux_container.run(["fdbcli", "--help"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_backup_restore(linux_container: Container, snapshot, tmp_path: pathlib.Path):
|
||||||
|
linux_container.run(["fdbcli", "--exec", "writemode on; set x y"])
|
||||||
|
assert snapshot == linux_container.run(
|
||||||
|
["fdbbackup", "start", "-d", "file:///tmp/fdb_backup", "-w"]
|
||||||
|
)
|
||||||
|
linux_container.run(["fdbcli", "--exec", "writemode on; clear x"])
|
||||||
|
linux_container.run(
|
||||||
|
[
|
||||||
|
"bash",
|
||||||
|
"-c",
|
||||||
|
"fdbrestore start -r file://$(echo /tmp/fdb_backup/*) -w --dest_cluster_file /etc/foundationdb/fdb.cluster",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
assert snapshot == linux_container.run(["fdbcli", "--exec", "get x"])
|
|
@ -1,23 +1,24 @@
|
||||||
add_subdirectory(tutorial)
|
add_subdirectory(tutorial)
|
||||||
if(WIN32)
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# build a virtualenv
|
# build a virtualenv
|
||||||
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
|
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
|
||||||
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
|
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
|
||||||
set(EXE_SUFFIX "")
|
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
|
set(venv_bin_dir ${CMAKE_CURRENT_BINARY_DIR}/venv/Scripts)
|
||||||
|
set(activate_script ${venv_bin_dir}/activate.bat)
|
||||||
set(EXE_SUFFIX ".exe")
|
set(EXE_SUFFIX ".exe")
|
||||||
|
else()
|
||||||
|
set(venv_bin_dir ${CMAKE_CURRENT_BINARY_DIR}/venv/bin)
|
||||||
|
set(activate_script . ${venv_bin_dir}/activate)
|
||||||
|
set(EXE_SUFFIX "")
|
||||||
endif()
|
endif()
|
||||||
set(pip_command ${venv_dir}/bin/pip${EXE_SUFFIX})
|
set(python_command ${venv_bin_dir}/python${EXE_SUFFIX})
|
||||||
set(python_command ${venv_dir}/bin/python${EXE_SUFFIX})
|
set(pip_command ${venv_bin_dir}/pip${EXE_SUFFIX})
|
||||||
|
|
||||||
add_custom_command(OUTPUT ${venv_dir}/venv_setup
|
add_custom_command(OUTPUT ${venv_dir}/venv_setup
|
||||||
COMMAND ${Python3_EXECUTABLE} -m venv venv &&
|
COMMAND ${Python3_EXECUTABLE} -m venv venv &&
|
||||||
${CMAKE_COMMAND} -E copy ${sphinx_dir}/.pip.conf ${venv_dir}/pip.conf &&
|
${CMAKE_COMMAND} -E copy ${sphinx_dir}/.pip.conf ${venv_dir}/pip.conf &&
|
||||||
. ${venv_dir}/bin/activate &&
|
${activate_script} &&
|
||||||
${pip_command} install --upgrade pip &&
|
${python_command} -m pip install --upgrade pip &&
|
||||||
${pip_command} install --upgrade -r ${sphinx_dir}/requirements.txt &&
|
${pip_command} install --upgrade -r ${sphinx_dir}/requirements.txt &&
|
||||||
${pip_command} install sphinx-autobuild && # somehow this is missing in requirements.txt
|
${pip_command} install sphinx-autobuild && # somehow this is missing in requirements.txt
|
||||||
${CMAKE_COMMAND} -E touch ${venv_dir}/venv_setup
|
${CMAKE_COMMAND} -E touch ${venv_dir}/venv_setup
|
||||||
|
@ -36,9 +37,9 @@ function(add_documentation_target)
|
||||||
message(ERROR "GENERATOR is a required argument to add_documentation_target")
|
message(ERROR "GENERATOR is a required argument to add_documentation_target")
|
||||||
endif()
|
endif()
|
||||||
set(target ${ADT_GENERATOR})
|
set(target ${ADT_GENERATOR})
|
||||||
set(SPHINX_COMMAND "${venv_dir}/bin/sphinx-build")
|
set(SPHINX_COMMAND "${venv_bin_dir}/sphinx-build${EXE_SUFFIX}")
|
||||||
if(ADT_SPHINX_COMMAND)
|
if(ADT_SPHINX_COMMAND)
|
||||||
set(SPHINX_COMMAND "${venv_dir}/bin/${ADT_SPHINX_COMMAND}")
|
set(SPHINX_COMMAND "${venv_bin_dir}/${ADT_SPHINX_COMMAND}")
|
||||||
endif()
|
endif()
|
||||||
set(doctree "doctree")
|
set(doctree "doctree")
|
||||||
if (ADT_DOCTREE)
|
if (ADT_DOCTREE)
|
||||||
|
|
|
@ -95,9 +95,6 @@ exclude_patterns = []
|
||||||
# output. They are ignored by default.
|
# output. They are ignored by default.
|
||||||
#show_authors = False
|
#show_authors = False
|
||||||
|
|
||||||
# The name of the Pygments (syntax highlighting) style to use.
|
|
||||||
pygments_style = 'solarizedlight'
|
|
||||||
|
|
||||||
# A list of ignored prefixes for module index sorting.
|
# A list of ignored prefixes for module index sorting.
|
||||||
#modindex_common_prefix = []
|
#modindex_common_prefix = []
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
--index-url https://pypi.python.org/simple
|
--index-url https://pypi.python.org/simple
|
||||||
setuptools>=20.10.0
|
setuptools>=20.10.0,<=57.4.0
|
||||||
sphinx==1.5.6
|
sphinx==1.5.6
|
||||||
sphinx-bootstrap-theme==0.4.8
|
sphinx-bootstrap-theme==0.4.8
|
||||||
docutils==0.16
|
docutils==0.16
|
||||||
pygments-style-solarized
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ Let's consider an **AP** database. In such a database, reads and writes would al
|
||||||
|
|
||||||
However, the downside is stark. Imagine a simple distributed database consisting of two nodes and a network partition making them unable to communicate. To be Available, each of the two nodes must continue to accept writes from clients.
|
However, the downside is stark. Imagine a simple distributed database consisting of two nodes and a network partition making them unable to communicate. To be Available, each of the two nodes must continue to accept writes from clients.
|
||||||
|
|
||||||
.. figure:: /images/AP_Partition.png
|
.. figure:: images/AP_Partition.png
|
||||||
|
|
||||||
Data divergence in an AP system during partition
|
Data divergence in an AP system during partition
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ Imagine that a rack-top switch fails, and A is partitioned from the network. A w
|
||||||
|
|
||||||
However, for all other clients, the database servers can reach a majority of coordination servers, B and C. The replication configuration has ensured there is a full copy of the data available even without A. For these clients, the database will remain available for reads and writes and the web servers will continue to serve traffic.
|
However, for all other clients, the database servers can reach a majority of coordination servers, B and C. The replication configuration has ensured there is a full copy of the data available even without A. For these clients, the database will remain available for reads and writes and the web servers will continue to serve traffic.
|
||||||
|
|
||||||
.. figure:: /images/FDB_Partition.png
|
.. figure:: images/FDB_Partition.png
|
||||||
|
|
||||||
Maintenance of availability during partition
|
Maintenance of availability during partition
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ The ``commit`` command commits the current transaction. Any sets or clears execu
|
||||||
configure
|
configure
|
||||||
---------
|
---------
|
||||||
|
|
||||||
The ``configure`` command changes the database configuration. Its syntax is ``configure [new|tss] [single|double|triple|three_data_hall|three_datacenter] [ssd|memory] [grv_proxies=<N>] [commit_proxies=<N>] [resolvers=<N>] [logs=<N>] [count=<TSS_COUNT>] [perpetual_storage_wiggle=<WIGGLE_SPEED>]``.
|
The ``configure`` command changes the database configuration. Its syntax is ``configure [new|tss] [single|double|triple|three_data_hall|three_datacenter] [ssd|memory] [grv_proxies=<N>] [commit_proxies=<N>] [resolvers=<N>] [logs=<N>] [count=<TSS_COUNT>] [perpetual_storage_wiggle=<WIGGLE_SPEED>] [perpetual_storage_wiggle_locality=<<LOCALITY_KEY>:<LOCALITY_VALUE>|0>] [storage_migration_type={disabled|aggressive|gradual}]``.
|
||||||
|
|
||||||
The ``new`` option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When ``new`` is used, both a redundancy mode and a storage engine must be specified.
|
The ``new`` option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When ``new`` is used, both a redundancy mode and a storage engine must be specified.
|
||||||
|
|
||||||
|
@ -112,7 +112,24 @@ For recommendations on appropriate values for process types in large clusters, s
|
||||||
perpetual storage wiggle
|
perpetual storage wiggle
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Set the value speed (a.k.a., the number of processes that the Data Distributor should wiggle at a time). Currently, only 0 and 1 are supported. The value 0 means to disable the perpetual storage wiggle. For more details, see :ref:`perpetual-storage-wiggle`.
|
``perpetual_storage_wiggle`` sets the value speed (a.k.a., the number of processes that the Data Distributor should wiggle at a time). Currently, only 0 and 1 are supported. The value 0 means to disable the perpetual storage wiggle.
|
||||||
|
``perpetual_storage_wiggle_locality`` sets the process filter for wiggling. The processes that match the given locality key and locality value are only wiggled. The value 0 will disable the locality filter and matches all the processes for wiggling.
|
||||||
|
|
||||||
|
For more details, see :ref:`perpetual-storage-wiggle`.
|
||||||
|
|
||||||
|
storage migration type
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Set the storage migration type, or how FDB should migrate to a new storage engine if the value is changed.
|
||||||
|
The default is ``disabled``, which means changing the storage engine will not be possible.
|
||||||
|
|
||||||
|
* ``disabled``
|
||||||
|
* ``gradual``
|
||||||
|
* ``aggressive``
|
||||||
|
|
||||||
|
``gradual`` replaces a single storage at a time when the ``perpetual storage wiggle`` is active. This requires the perpetual storage wiggle to be set to a non-zero value to actually migrate storage servers. It is somewhat slow but very safe. This is the recommended method for all production clusters.
|
||||||
|
``aggressive`` tries to replace as many storages as it can at once, and will recruit a new storage server on the same process as the old one. This will be faster, but can potentially hit degraded performance or OOM with two storages on the same process. The main benefit over ``gradual`` is that this doesn't need to take one storage out of rotation, so it works for small or development clusters that have the same number of storage processes as the replication factor. Note that ``aggressive`` is not exclusive to running the perpetual wiggle.
|
||||||
|
``disabled`` means that if the storage engine is changed, fdb will not move the cluster over to the new storage engine. This will disable the perpetual wiggle from rewriting storage files.
|
||||||
|
|
||||||
consistencycheck
|
consistencycheck
|
||||||
----------------
|
----------------
|
||||||
|
|
|
@ -176,7 +176,7 @@ The *LogPushData* class is used to hold serialized mutations on a per transactio
|
||||||
|
|
||||||
*LogPushData.writeTypedMessage* is the function that serializes each mutation and writes it to the correct binary stream to be sent to the corresponding transaction log. Each serialized mutation contains additional metadata about the message, with the format:
|
*LogPushData.writeTypedMessage* is the function that serializes each mutation and writes it to the correct binary stream to be sent to the corresponding transaction log. Each serialized mutation contains additional metadata about the message, with the format:
|
||||||
|
|
||||||
.. image:: /images/serialized_mutation_metadata_format.png
|
.. image:: images/serialized_mutation_metadata_format.png
|
||||||
|
|
||||||
* Message size: size of the message, in bytes, excluding the four bytes used for the message size
|
* Message size: size of the message, in bytes, excluding the four bytes used for the message size
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,9 @@
|
||||||
"kvstore_available_bytes":12341234,
|
"kvstore_available_bytes":12341234,
|
||||||
"kvstore_free_bytes":12341234,
|
"kvstore_free_bytes":12341234,
|
||||||
"kvstore_total_bytes":12341234,
|
"kvstore_total_bytes":12341234,
|
||||||
|
"kvstore_total_size":12341234,
|
||||||
|
"kvstore_total_nodes":12341234,
|
||||||
|
"kvstore_inline_keys":12341234,
|
||||||
"durable_bytes":{
|
"durable_bytes":{
|
||||||
"hz":0.0,
|
"hz":0.0,
|
||||||
"counter":0,
|
"counter":0,
|
||||||
|
@ -208,6 +211,13 @@
|
||||||
"estimated_cost":{
|
"estimated_cost":{
|
||||||
"hz":0.0
|
"hz":0.0
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"busiest_write_tag":{
|
||||||
|
"tag": "",
|
||||||
|
"fractional_cost": 0.0,
|
||||||
|
"estimated_cost":{
|
||||||
|
"hz":0.0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -226,6 +236,9 @@
|
||||||
"$enum":[
|
"$enum":[
|
||||||
"file_open_error",
|
"file_open_error",
|
||||||
"incorrect_cluster_file_contents",
|
"incorrect_cluster_file_contents",
|
||||||
|
"trace_log_file_write_error",
|
||||||
|
"trace_log_could_not_create_file",
|
||||||
|
"trace_log_writer_thread_unresponsive",
|
||||||
"process_error",
|
"process_error",
|
||||||
"io_error",
|
"io_error",
|
||||||
"io_timeout",
|
"io_timeout",
|
||||||
|
@ -243,7 +256,7 @@
|
||||||
"excluded":false,
|
"excluded":false,
|
||||||
"address":"1.2.3.4:1234",
|
"address":"1.2.3.4:1234",
|
||||||
"disk":{
|
"disk":{
|
||||||
"free_bytes":3451233456234, // an estimate of how many bytes are free to allocate to fdbservers without swapping
|
"free_bytes":3451233456234,
|
||||||
"reads":{
|
"reads":{
|
||||||
"hz":0.0,
|
"hz":0.0,
|
||||||
"counter":0,
|
"counter":0,
|
||||||
|
@ -255,7 +268,7 @@
|
||||||
"counter":0,
|
"counter":0,
|
||||||
"sectors":0
|
"sectors":0
|
||||||
},
|
},
|
||||||
"total_bytes":123412341234 // an estimate of total physical RAM
|
"total_bytes":123412341234
|
||||||
},
|
},
|
||||||
"uptime_seconds":1234.2345,
|
"uptime_seconds":1234.2345,
|
||||||
"cpu":{
|
"cpu":{
|
||||||
|
@ -285,15 +298,20 @@
|
||||||
"run_loop_busy":0.2 // fraction of time the run loop was busy
|
"run_loop_busy":0.2 // fraction of time the run loop was busy
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"old_logs":[
|
"logs":[
|
||||||
{
|
{
|
||||||
"logs":[ // this field will be absent if a value has not been explicitly set
|
"log_interfaces":[ // this field will be absent if a value has not been explicitly set
|
||||||
{
|
{
|
||||||
"id":"7f8d623d0cb9966e",
|
"id":"7f8d623d0cb9966e",
|
||||||
"healthy":true,
|
"healthy":true,
|
||||||
"address":"1.2.3.4:1234"
|
"address":"1.2.3.4:1234"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"epoch":1,
|
||||||
|
"current":false,
|
||||||
|
"begin_version":23,
|
||||||
|
"end_version":112315141,
|
||||||
|
"possibly_losing_data":true,
|
||||||
"log_replication_factor":3,
|
"log_replication_factor":3,
|
||||||
"log_write_anti_quorum":0,
|
"log_write_anti_quorum":0,
|
||||||
"log_fault_tolerance":2,
|
"log_fault_tolerance":2,
|
||||||
|
@ -325,7 +343,8 @@
|
||||||
"storage_server_min_free_space_ratio",
|
"storage_server_min_free_space_ratio",
|
||||||
"log_server_min_free_space",
|
"log_server_min_free_space",
|
||||||
"log_server_min_free_space_ratio",
|
"log_server_min_free_space_ratio",
|
||||||
"storage_server_durability_lag"
|
"storage_server_durability_lag",
|
||||||
|
"storage_server_list_fetch_failed"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"description":"The database is not being saturated by the workload."
|
"description":"The database is not being saturated by the workload."
|
||||||
|
@ -345,7 +364,8 @@
|
||||||
"storage_server_min_free_space_ratio",
|
"storage_server_min_free_space_ratio",
|
||||||
"log_server_min_free_space",
|
"log_server_min_free_space",
|
||||||
"log_server_min_free_space_ratio",
|
"log_server_min_free_space_ratio",
|
||||||
"storage_server_durability_lag"
|
"storage_server_durability_lag",
|
||||||
|
"storage_server_list_fetch_failed"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"description":"The database is not being saturated by the workload."
|
"description":"The database is not being saturated by the workload."
|
||||||
|
@ -358,15 +378,11 @@
|
||||||
"auto" : {
|
"auto" : {
|
||||||
"busy_read" : 0,
|
"busy_read" : 0,
|
||||||
"busy_write" : 0,
|
"busy_write" : 0,
|
||||||
"count" : 0
|
"count" : 0,
|
||||||
|
"recommended_only":0
|
||||||
},
|
},
|
||||||
"manual" : {
|
"manual" : {
|
||||||
"count" : 0
|
"count" : 0
|
||||||
},
|
|
||||||
"recommend" : {
|
|
||||||
"busy_read" : 0,
|
|
||||||
"busy_write" : 0,
|
|
||||||
"count" : 0
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"limiting_queue_bytes_storage_server":0,
|
"limiting_queue_bytes_storage_server":0,
|
||||||
|
@ -394,12 +410,13 @@
|
||||||
"seconds":1.0,
|
"seconds":1.0,
|
||||||
"versions":1000000
|
"versions":1000000
|
||||||
},
|
},
|
||||||
|
"active_tss_count":0,
|
||||||
"degraded_processes":0,
|
"degraded_processes":0,
|
||||||
"database_available":true,
|
"database_available":true,
|
||||||
"database_lock_state":{
|
"database_lock_state":{
|
||||||
"locked":true,
|
"locked":true,
|
||||||
"lock_uid":"00000000000000000000000000000000" // Only present when database is locked
|
"lock_uid":"00000000000000000000000000000000" // Only present when database is locked
|
||||||
}
|
},
|
||||||
"generation":2,
|
"generation":2,
|
||||||
"latency_probe":{ // all measurements are based on running sample transactions
|
"latency_probe":{ // all measurements are based on running sample transactions
|
||||||
"read_seconds":7, // time to perform a single read
|
"read_seconds":7, // time to perform a single read
|
||||||
|
@ -468,6 +485,8 @@
|
||||||
"database_availability_timeout",
|
"database_availability_timeout",
|
||||||
"consistencycheck_suspendkey_fetch_timeout",
|
"consistencycheck_suspendkey_fetch_timeout",
|
||||||
"consistencycheck_disabled",
|
"consistencycheck_disabled",
|
||||||
|
"duplicate_mutation_streams",
|
||||||
|
"duplicate_mutation_fetch_timeout",
|
||||||
"primary_dc_missing",
|
"primary_dc_missing",
|
||||||
"fetch_primary_dc_timeout"
|
"fetch_primary_dc_timeout"
|
||||||
]
|
]
|
||||||
|
@ -476,7 +495,10 @@
|
||||||
{
|
{
|
||||||
"name":{ // when not limiting
|
"name":{ // when not limiting
|
||||||
"$enum":[
|
"$enum":[
|
||||||
"incorrect_cluster_file_contents"
|
"incorrect_cluster_file_contents",
|
||||||
|
"trace_log_file_write_error",
|
||||||
|
"trace_log_could_not_create_file",
|
||||||
|
"trace_log_writer_thread_unresponsive"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"description":"Cluster file contents do not match current cluster connection string. Verify cluster file is writable and has not been overwritten externally."
|
"description":"Cluster file contents do not match current cluster connection string. Verify cluster file is writable and has not been overwritten externally."
|
||||||
|
@ -680,7 +702,10 @@
|
||||||
"ssd-2",
|
"ssd-2",
|
||||||
"ssd-redwood-experimental",
|
"ssd-redwood-experimental",
|
||||||
"ssd-rocksdb-experimental",
|
"ssd-rocksdb-experimental",
|
||||||
"memory"
|
"memory",
|
||||||
|
"memory-1",
|
||||||
|
"memory-2",
|
||||||
|
"memory-radixtree-beta"
|
||||||
]},
|
]},
|
||||||
"tss_count":1,
|
"tss_count":1,
|
||||||
"tss_storage_engine":{
|
"tss_storage_engine":{
|
||||||
|
@ -690,7 +715,10 @@
|
||||||
"ssd-2",
|
"ssd-2",
|
||||||
"ssd-redwood-experimental",
|
"ssd-redwood-experimental",
|
||||||
"ssd-rocksdb-experimental",
|
"ssd-rocksdb-experimental",
|
||||||
"memory"
|
"memory",
|
||||||
|
"memory-1",
|
||||||
|
"memory-2",
|
||||||
|
"memory-radixtree-beta"
|
||||||
]},
|
]},
|
||||||
"coordinators_count":1,
|
"coordinators_count":1,
|
||||||
"excluded_servers":[
|
"excluded_servers":[
|
||||||
|
@ -700,12 +728,21 @@
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"auto_commit_proxies":3,
|
"auto_commit_proxies":3,
|
||||||
|
"auto_grv_proxies":1,
|
||||||
"auto_resolvers":1,
|
"auto_resolvers":1,
|
||||||
"auto_logs":3,
|
"auto_logs":3,
|
||||||
"backup_worker_enabled":1,
|
|
||||||
"commit_proxies":5, // this field will be absent if a value has not been explicitly set
|
"commit_proxies":5, // this field will be absent if a value has not been explicitly set
|
||||||
|
"grv_proxies":1, // this field will be absent if a value has not been explicitly set
|
||||||
"proxies":6, // this field will be absent if a value has not been explicitly set
|
"proxies":6, // this field will be absent if a value has not been explicitly set
|
||||||
"perpetual_storage_wiggle": 0
|
"backup_worker_enabled":1,
|
||||||
|
"perpetual_storage_wiggle": 0,
|
||||||
|
"perpetual_storage_wiggle_locality":"0",
|
||||||
|
"storage_migration_type":{
|
||||||
|
"$enum":[
|
||||||
|
"disabled",
|
||||||
|
"gradual",
|
||||||
|
"aggressive"
|
||||||
|
]}
|
||||||
},
|
},
|
||||||
"data":{
|
"data":{
|
||||||
"least_operating_space_bytes_log_server":0,
|
"least_operating_space_bytes_log_server":0,
|
||||||
|
@ -805,7 +842,8 @@
|
||||||
"coordinators":[
|
"coordinators":[
|
||||||
{
|
{
|
||||||
"reachable":true,
|
"reachable":true,
|
||||||
"address":"127.0.0.1:4701"
|
"address":"127.0.0.1:4701",
|
||||||
|
"protocol":"0fdb00b070010001"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"quorum_reachable":true
|
"quorum_reachable":true
|
||||||
|
|
|
@ -9,7 +9,7 @@ Scaling
|
||||||
|
|
||||||
FoundationDB scales linearly with the number of cores in a cluster over a wide range of sizes.
|
FoundationDB scales linearly with the number of cores in a cluster over a wide range of sizes.
|
||||||
|
|
||||||
.. image:: /images/scaling.png
|
.. image:: images/scaling.png
|
||||||
|
|
||||||
Here, a cluster of commodity hardware scales to **8.2 million** operations/sec doing a 90% read and 10% write workload with 16 byte keys and values between 8 and 100 bytes.
|
Here, a cluster of commodity hardware scales to **8.2 million** operations/sec doing a 90% read and 10% write workload with 16 byte keys and values between 8 and 100 bytes.
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ Latency
|
||||||
|
|
||||||
FoundationDB has low latencies over a broad range of workloads that only increase modestly as the cluster approaches saturation.
|
FoundationDB has low latencies over a broad range of workloads that only increase modestly as the cluster approaches saturation.
|
||||||
|
|
||||||
.. image:: /images/latency.png
|
.. image:: images/latency.png
|
||||||
|
|
||||||
When run at less than **75% load**, FoundationDB typically has the following latencies:
|
When run at less than **75% load**, FoundationDB typically has the following latencies:
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ Throughput (per core)
|
||||||
|
|
||||||
FoundationDB provides good throughput for the full range of read and write workloads, with two fully durable storage engine options.
|
FoundationDB provides good throughput for the full range of read and write workloads, with two fully durable storage engine options.
|
||||||
|
|
||||||
.. image:: /images/throughput.png
|
.. image:: images/throughput.png
|
||||||
|
|
||||||
FoundationDB offers two :ref:`storage engines <configuration-storage-engine>`, optimized for distinct use cases, both of which write to disk before reporting transactions committed. For each storage engine, the graph shows throughput of a single FoundationDB process running on a **single core** with saturating read/write workloads ranging from 100% reads to 100% writes, all with 16 byte keys and values between 8 and 100 bytes. Throughput for the unmixed workloads is about:
|
FoundationDB offers two :ref:`storage engines <configuration-storage-engine>`, optimized for distinct use cases, both of which write to disk before reporting transactions committed. For each storage engine, the graph shows throughput of a single FoundationDB process running on a **single core** with saturating read/write workloads ranging from 100% reads to 100% writes, all with 16 byte keys and values between 8 and 100 bytes. Throughput for the unmixed workloads is about:
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ Concurrency
|
||||||
|
|
||||||
FoundationDB is designed to achieve great performance under high concurrency from a large number of clients.
|
FoundationDB is designed to achieve great performance under high concurrency from a large number of clients.
|
||||||
|
|
||||||
.. image:: /images/concurrency.png
|
.. image:: images/concurrency.png
|
||||||
|
|
||||||
Its asynchronous design allows it to handle very high concurrency, and for a typical workload with 90% reads and 10% writes, maximum throughput is reached at about 200 concurrent operations. This number of operations was achieved with **20** concurrent transactions per FoundationDB process each running 10 operations with 16 byte keys and values between 8 and 100 bytes.
|
Its asynchronous design allows it to handle very high concurrency, and for a typical workload with 90% reads and 10% writes, maximum throughput is reached at about 200 concurrent operations. This number of operations was achieved with **20** concurrent transactions per FoundationDB process each running 10 operations with 16 byte keys and values between 8 and 100 bytes.
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,8 @@ Configuration
|
||||||
|
|
||||||
You can configure the Perpetual Storage Wiggle via the FDB :ref:`command line interface <command-line-interface>`.
|
You can configure the Perpetual Storage Wiggle via the FDB :ref:`command line interface <command-line-interface>`.
|
||||||
|
|
||||||
|
Note that to have the Perpetual Storage Wiggle change the storage engine type, you must configure ``storage_migration_type=gradual``.
|
||||||
|
|
||||||
Example commands
|
Example commands
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
|
@ -38,6 +40,10 @@ Open perpetual storage wiggle: ``configure perpetual_storage_wiggle=1``.
|
||||||
|
|
||||||
Disable perpetual storage wiggle on the cluster: ``configure perpetual_storage_wiggle=0``.
|
Disable perpetual storage wiggle on the cluster: ``configure perpetual_storage_wiggle=0``.
|
||||||
|
|
||||||
|
Open perpetual storage wiggle for only processes matching the given locality key and value: ``configure perpetual_storage_wiggle=1 perpetual_storage_wiggle_locality=<LOCALITY_KEY>:<LOCALITY_VALUE>``.
|
||||||
|
|
||||||
|
Disable perpetual storage wiggle locality matching filter, which wiggles all the processes: ``configure perpetual_storage_wiggle_locality=0``.
|
||||||
|
|
||||||
Monitor
|
Monitor
|
||||||
=======
|
=======
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ The processing order of multiple transactions is important because it affects th
|
||||||
The content is based on FDB 6.2 and is true for FDB 6.3. A new timestamp proxy role is introduced in post FDB 6.3,
|
The content is based on FDB 6.2 and is true for FDB 6.3. A new timestamp proxy role is introduced in post FDB 6.3,
|
||||||
which affects the read path. We will discuss the timestamp proxy role in the future version of this document.
|
which affects the read path. We will discuss the timestamp proxy role in the future version of this document.
|
||||||
|
|
||||||
.. image:: /images/FDB_read_path.png
|
.. image:: images/FDB_read_path.png
|
||||||
|
|
||||||
Components
|
Components
|
||||||
=================
|
=================
|
||||||
|
@ -198,7 +198,7 @@ Write path of a transaction
|
||||||
Suppose a client has a write-only transaction. Fig. 2 below shows the write path in a non-HA cluster.
|
Suppose a client has a write-only transaction. Fig. 2 below shows the write path in a non-HA cluster.
|
||||||
We will discuss how a transaction with both read and write works in the next section.
|
We will discuss how a transaction with both read and write works in the next section.
|
||||||
|
|
||||||
.. image:: /images/FDB_write_path.png
|
.. image:: images/FDB_write_path.png
|
||||||
|
|
||||||
To simplify the explanation, the steps below do not include transaction batching on proxy,
|
To simplify the explanation, the steps below do not include transaction batching on proxy,
|
||||||
which is a typical database technique to increase transaction throughput.
|
which is a typical database technique to increase transaction throughput.
|
||||||
|
@ -461,7 +461,7 @@ The ordering is enforced in the timestamp generator, the concurrency control com
|
||||||
We use the following example and draw its swimlane diagram to illustrate how two write transactions are ordered in FDB.
|
We use the following example and draw its swimlane diagram to illustrate how two write transactions are ordered in FDB.
|
||||||
The diagram with notes can be viewed at `here <https://lucid.app/lucidchart/6336dbe3-cff4-4c46-995a-4ca3d9260696/view?page=0_0#?folder_id=home&browser=icon>`_.
|
The diagram with notes can be viewed at `here <https://lucid.app/lucidchart/6336dbe3-cff4-4c46-995a-4ca3d9260696/view?page=0_0#?folder_id=home&browser=icon>`_.
|
||||||
|
|
||||||
.. image:: /images/FDB_multiple_txn_swimlane_diagram.png
|
.. image:: images/FDB_multiple_txn_swimlane_diagram.png
|
||||||
|
|
||||||
Reference
|
Reference
|
||||||
============
|
============
|
||||||
|
|
|
@ -2,9 +2,31 @@
|
||||||
Release Notes
|
Release Notes
|
||||||
#############
|
#############
|
||||||
|
|
||||||
|
6.3.21
|
||||||
|
======
|
||||||
|
* Added a ThreadID field to all trace events for the purpose of multi-threaded client debugging. `(PR #5665) <https://github.com/apple/foundationdb/pull/5665>`_
|
||||||
|
* Fixed some histograms' group name in the master proxy. `(PR #5674) <https://github.com/apple/foundationdb/pull/5674>`_
|
||||||
|
* Added histograms for GRV path components in the proxy. `(PR #5689) <https://github.com/apple/foundationdb/pull/5689>`_
|
||||||
|
* Fixed race condition introduced in 6.3.20 between setting timeouts and resetting or destroying transactions. `(PR #5695) <https://github.com/apple/foundationdb/pull/5695>`_
|
||||||
|
* Disable detailed transaction log pop tracing by default. `(PR #5696) <https://github.com/apple/foundationdb/pull/5696>`_
|
||||||
|
|
||||||
|
6.3.20
|
||||||
|
======
|
||||||
|
* Several minor problems with the versioned packages have been fixed. `(PR 5607) <https://github.com/apple/foundationdb/pull/5607>`_
|
||||||
|
* A client might not honor transaction timeouts when using the multi-version client if it cannot connect to the cluster. `(Issue #5595) <https://github.com/apple/foundationdb/issues/5595>`_
|
||||||
|
* Fixed a very rare bug where recovery could potentially roll back a committed transaction `(PR 5461) <https://github.com/apple/foundationdb/pull/5461>`_
|
||||||
|
* Added histograms for commit path components in the proxy. `(PR #5367) <https://github.com/apple/foundationdb/pull/5367>`_
|
||||||
|
* Fixed a false checkRegions call that could cause unwanted primary DC failover. `(PR #5330) <https://github.com/apple/foundationdb/pull/5330>`_
|
||||||
|
|
||||||
6.3.19
|
6.3.19
|
||||||
======
|
======
|
||||||
* Add the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5330) <https://github.com/apple/foundationdb/pull/5330>`_
|
* Added the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5330) <https://github.com/apple/foundationdb/pull/5330>`_
|
||||||
|
* Added error details in ``RemovedDeadBackupLayerStatus`` trace event. `(PR #5356) <https://github.com/apple/foundationdb/pull/5356>`_
|
||||||
|
* Added RepeatableReadMultiThreadClientTest. `(PR #5212) <https://github.com/apple/foundationdb/pull/5212>`_
|
||||||
|
* Added a new feature that allows FDB to detect grey failures and automatically recover from them. `(PR #5249) <https://github.com/apple/foundationdb/pull/5249>`_
|
||||||
|
* Added version and timestamp to ``TimeKeeperCommit`` trace event. `(PR #5415) <https://github.com/apple/foundationdb/pull/5415>`_
|
||||||
|
* Added ``RecruitFromConfigurationRetry`` trace event to improve recruitment observability. `(PR #5455) <https://github.com/apple/foundationdb/pull/5455>`_
|
||||||
|
* Several fixes to pkg_tester and packaging. `(PR #5460) <https://github.com/apple/foundationdb/pull/5460>`_
|
||||||
|
|
||||||
6.3.18
|
6.3.18
|
||||||
======
|
======
|
||||||
|
|
|
@ -64,6 +64,7 @@ Fixes
|
||||||
* If a restore is done using a prefix to remove and specific key ranges to restore, the key range boundaries must begin with the prefix to remove. `(PR #4684) <https://github.com/apple/foundationdb/pull/4684>`_
|
* If a restore is done using a prefix to remove and specific key ranges to restore, the key range boundaries must begin with the prefix to remove. `(PR #4684) <https://github.com/apple/foundationdb/pull/4684>`_
|
||||||
* The multi-version client API would not propagate errors that occurred when creating databases on external clients. This could result in a invalid memory accesses. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
* The multi-version client API would not propagate errors that occurred when creating databases on external clients. This could result in a invalid memory accesses. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
||||||
* Fixed a race between the multi-version client connecting to a cluster and destroying the database that could cause an assertion failure. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
* Fixed a race between the multi-version client connecting to a cluster and destroying the database that could cause an assertion failure. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
||||||
|
* A client might not honor transaction timeouts when using the multi-version client if it cannot connect to the cluster. `(Issue #5595) <https://github.com/apple/foundationdb/issues/5595>`_
|
||||||
|
|
||||||
Status
|
Status
|
||||||
------
|
------
|
||||||
|
|
|
@ -63,7 +63,7 @@ Source IP:port 0 string The IP and port of the machine where the s
|
||||||
Trace ID 1 uint64 The 64-bit identifier of the trace. All spans in a trace share the same trace ID.
|
Trace ID 1 uint64 The 64-bit identifier of the trace. All spans in a trace share the same trace ID.
|
||||||
Span ID 2 uint64 The 64-bit identifier of the span. All spans have a unique identifier.
|
Span ID 2 uint64 The 64-bit identifier of the span. All spans have a unique identifier.
|
||||||
Start timestamp 3 double The timestamp when the operation represented by the span began.
|
Start timestamp 3 double The timestamp when the operation represented by the span began.
|
||||||
End timestamp 4 double The timestamp when the operation represented by the span ended.
|
Duration 4 double The duration in seconds of the operation represented by the span.
|
||||||
Operation name 5 string The name of the operation the span represents.
|
Operation name 5 string The name of the operation the span represents.
|
||||||
Tags 6 map User defined tags, added manually to specify additional information.
|
Tags 6 map User defined tags, added manually to specify additional information.
|
||||||
Parent span IDs 7 vector (Optional) A list of span IDs representing parents of this span.
|
Parent span IDs 7 vector (Optional) A list of span IDs representing parents of this span.
|
||||||
|
|
|
@ -199,7 +199,7 @@ that process, and wait for necessary data to be moved away.
|
||||||
While the key is set, any commit that tries to set a key in the range will fail with the ``special_keys_api_failure`` error.
|
While the key is set, any commit that tries to set a key in the range will fail with the ``special_keys_api_failure`` error.
|
||||||
#. ``\xff\xff/management/data_distribution/<mode|rebalance_ignored>`` Read/write. Changing these two keys will change the two corresponding system keys ``\xff/dataDistributionMode`` and ``\xff\x02/rebalanceDDIgnored``. The value of ``\xff\xff/management/data_distribution/mode`` is a literal text of ``0`` (disable) or ``1`` (enable). Transactions committed with invalid values will throw ``special_keys_api_failure`` . The value of ``\xff\xff/management/data_distribution/rebalance_ignored`` is empty. If present, it means data distribution is disabled for rebalance. Any transaction committed with non-empty value for this key will throw ``special_keys_api_failure``. For more details, see help text of ``fdbcli`` command ``datadistribution``.
|
#. ``\xff\xff/management/data_distribution/<mode|rebalance_ignored>`` Read/write. Changing these two keys will change the two corresponding system keys ``\xff/dataDistributionMode`` and ``\xff\x02/rebalanceDDIgnored``. The value of ``\xff\xff/management/data_distribution/mode`` is a literal text of ``0`` (disable) or ``1`` (enable). Transactions committed with invalid values will throw ``special_keys_api_failure`` . The value of ``\xff\xff/management/data_distribution/rebalance_ignored`` is empty. If present, it means data distribution is disabled for rebalance. Any transaction committed with non-empty value for this key will throw ``special_keys_api_failure``. For more details, see help text of ``fdbcli`` command ``datadistribution``.
|
||||||
#. ``\xff\xff/management/consistency_check_suspended`` Read/write. Set or read this key will set or read the underlying system key ``\xff\x02/ConsistencyCheck/Suspend``. The value of this special key is unused thus if present, will be empty. In particular, if the key exists, then consistency is suspended. For more details, see help text of ``fdbcli`` command ``consistencycheck``.
|
#. ``\xff\xff/management/consistency_check_suspended`` Read/write. Set or read this key will set or read the underlying system key ``\xff\x02/ConsistencyCheck/Suspend``. The value of this special key is unused thus if present, will be empty. In particular, if the key exists, then consistency is suspended. For more details, see help text of ``fdbcli`` command ``consistencycheck``.
|
||||||
#. ``\xff\xff/management/db_locked`` Read/write. A single key that can be read and modified. Set the key will lock the database and clear the key will unlock. If the database is already locked, then the commit will fail with the ``special_keys_api_failure`` error. For more details, see help text of ``fdbcli`` command ``lock`` and ``unlock``.
|
#. ``\xff\xff/management/db_locked`` Read/write. A single key that can be read and modified. Set the key with a 32 bytes hex string UID will lock the database and clear the key will unlock. Read the key will return the UID string as the value. If the database is already locked, then the commit will fail with the ``special_keys_api_failure`` error. For more details, see help text of ``fdbcli`` command ``lock`` and ``unlock``.
|
||||||
#. ``\xff\xff/management/auto_coordinators`` Read-only. A single key, if read, will return a set of processes which is able to satisfy the current redundency level and serve as new coordinators. The return value is formatted as a comma delimited string of network addresses of coordinators, i.e. ``<ip:port>,<ip:port>,...,<ip:port>``.
|
#. ``\xff\xff/management/auto_coordinators`` Read-only. A single key, if read, will return a set of processes which is able to satisfy the current redundency level and serve as new coordinators. The return value is formatted as a comma delimited string of network addresses of coordinators, i.e. ``<ip:port>,<ip:port>,...,<ip:port>``.
|
||||||
#. ``\xff\xff/management/excluded_locality/<locality>`` Read/write. Indicates that the cluster should move data away from processes matching ``<locality>``, so that they can be safely removed. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
#. ``\xff\xff/management/excluded_locality/<locality>`` Read/write. Indicates that the cluster should move data away from processes matching ``<locality>``, so that they can be safely removed. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
||||||
#. ``\xff\xff/management/failed_locality/<locality>`` Read/write. Indicates that the cluster should consider matching processes as permanently failed. This allows the cluster to avoid maintaining extra state and doing extra work in the hope that these processes come back. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
#. ``\xff\xff/management/failed_locality/<locality>`` Read/write. Indicates that the cluster should consider matching processes as permanently failed. This allows the cluster to avoid maintaining extra state and doing extra work in the hope that these processes come back. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
||||||
|
|
|
@ -33,6 +33,12 @@
|
||||||
|
|
||||||
NetworkAddress serverAddress;
|
NetworkAddress serverAddress;
|
||||||
|
|
||||||
|
enum TutorialWellKnownEndpoints {
|
||||||
|
WLTOKEN_SIMPLE_KV_SERVER = WLTOKEN_FIRST_AVAILABLE,
|
||||||
|
WLTOKEN_ECHO_SERVER,
|
||||||
|
WLTOKEN_COUNT_IN_TUTORIAL
|
||||||
|
};
|
||||||
|
|
||||||
// this is a simple actor that will report how long
|
// this is a simple actor that will report how long
|
||||||
// it is already running once a second.
|
// it is already running once a second.
|
||||||
ACTOR Future<Void> simpleTimer() {
|
ACTOR Future<Void> simpleTimer() {
|
||||||
|
@ -153,7 +159,7 @@ struct StreamReply : ReplyPromiseStreamReply {
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize(Ar& ar) {
|
void serialize(Ar& ar) {
|
||||||
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, index);
|
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, ReplyPromiseStreamReply::sequence, index);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -171,7 +177,7 @@ uint64_t tokenCounter = 1;
|
||||||
|
|
||||||
ACTOR Future<Void> echoServer() {
|
ACTOR Future<Void> echoServer() {
|
||||||
state EchoServerInterface echoServer;
|
state EchoServerInterface echoServer;
|
||||||
echoServer.getInterface.makeWellKnownEndpoint(UID(-1, ++tokenCounter), TaskPriority::DefaultEndpoint);
|
echoServer.getInterface.makeWellKnownEndpoint(WLTOKEN_ECHO_SERVER, TaskPriority::DefaultEndpoint);
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
choose {
|
choose {
|
||||||
|
@ -204,7 +210,8 @@ ACTOR Future<Void> echoServer() {
|
||||||
|
|
||||||
ACTOR Future<Void> echoClient() {
|
ACTOR Future<Void> echoClient() {
|
||||||
state EchoServerInterface server;
|
state EchoServerInterface server;
|
||||||
server.getInterface = RequestStream<GetInterfaceRequest>(Endpoint({ serverAddress }, UID(-1, ++tokenCounter)));
|
server.getInterface =
|
||||||
|
RequestStream<GetInterfaceRequest>(Endpoint::wellKnown({ serverAddress }, WLTOKEN_ECHO_SERVER));
|
||||||
EchoServerInterface s = wait(server.getInterface.getReply(GetInterfaceRequest()));
|
EchoServerInterface s = wait(server.getInterface.getReply(GetInterfaceRequest()));
|
||||||
server = s;
|
server = s;
|
||||||
EchoRequest echoRequest;
|
EchoRequest echoRequest;
|
||||||
|
@ -291,7 +298,7 @@ struct ClearRequest {
|
||||||
ACTOR Future<Void> kvStoreServer() {
|
ACTOR Future<Void> kvStoreServer() {
|
||||||
state SimpleKeyValueStoreInteface inf;
|
state SimpleKeyValueStoreInteface inf;
|
||||||
state std::map<std::string, std::string> store;
|
state std::map<std::string, std::string> store;
|
||||||
inf.connect.makeWellKnownEndpoint(UID(-1, ++tokenCounter), TaskPriority::DefaultEndpoint);
|
inf.connect.makeWellKnownEndpoint(WLTOKEN_SIMPLE_KV_SERVER, TaskPriority::DefaultEndpoint);
|
||||||
loop {
|
loop {
|
||||||
choose {
|
choose {
|
||||||
when(GetKVInterface req = waitNext(inf.connect.getFuture())) {
|
when(GetKVInterface req = waitNext(inf.connect.getFuture())) {
|
||||||
|
@ -328,7 +335,7 @@ ACTOR Future<Void> kvStoreServer() {
|
||||||
ACTOR Future<SimpleKeyValueStoreInteface> connect() {
|
ACTOR Future<SimpleKeyValueStoreInteface> connect() {
|
||||||
std::cout << format("%llu: Connect...\n", uint64_t(g_network->now()));
|
std::cout << format("%llu: Connect...\n", uint64_t(g_network->now()));
|
||||||
SimpleKeyValueStoreInteface c;
|
SimpleKeyValueStoreInteface c;
|
||||||
c.connect = RequestStream<GetKVInterface>(Endpoint({ serverAddress }, UID(-1, ++tokenCounter)));
|
c.connect = RequestStream<GetKVInterface>(Endpoint::wellKnown({ serverAddress }, WLTOKEN_SIMPLE_KV_SERVER));
|
||||||
SimpleKeyValueStoreInteface result = wait(c.connect.getReply(GetKVInterface()));
|
SimpleKeyValueStoreInteface result = wait(c.connect.getReply(GetKVInterface()));
|
||||||
std::cout << format("%llu: done..\n", uint64_t(g_network->now()));
|
std::cout << format("%llu: done..\n", uint64_t(g_network->now()));
|
||||||
return result;
|
return result;
|
||||||
|
@ -562,7 +569,7 @@ int main(int argc, char* argv[]) {
|
||||||
}
|
}
|
||||||
platformInit();
|
platformInit();
|
||||||
g_network = newNet2(TLSConfig(), false, true);
|
g_network = newNet2(TLSConfig(), false, true);
|
||||||
FlowTransport::createInstance(!isServer, 0);
|
FlowTransport::createInstance(!isServer, 0, WLTOKEN_COUNT_IN_TUTORIAL);
|
||||||
NetworkAddress publicAddress = NetworkAddress::parse("0.0.0.0:0");
|
NetworkAddress publicAddress = NetworkAddress::parse("0.0.0.0:0");
|
||||||
if (isServer) {
|
if (isServer) {
|
||||||
publicAddress = NetworkAddress::parse("0.0.0.0:" + port);
|
publicAddress = NetworkAddress::parse("0.0.0.0:" + port);
|
||||||
|
|
|
@ -81,8 +81,7 @@ bool BackupTLSConfig::setupTLS() {
|
||||||
try {
|
try {
|
||||||
setNetworkOption(FDBNetworkOptions::TLS_VERIFY_PEERS, tlsVerifyPeers);
|
setNetworkOption(FDBNetworkOptions::TLS_VERIFY_PEERS, tlsVerifyPeers);
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
std::cerr << "ERROR: cannot set TLS peer verification to " << tlsVerifyPeers << " (" << e.what()
|
std::cerr << "ERROR: cannot set TLS peer verification to " << tlsVerifyPeers << " (" << e.what() << ")\n";
|
||||||
<< ")\n";
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,11 +67,11 @@ CSimpleOpt::SOption gConverterOptions[] = { { OPT_CONTAINER, "-r", SO_REQ_SEP },
|
||||||
TLS_OPTION_FLAGS
|
TLS_OPTION_FLAGS
|
||||||
#endif
|
#endif
|
||||||
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
|
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
|
||||||
{ OPT_LIST_ONLY, "--list_only", SO_NONE },
|
{ OPT_LIST_ONLY, "--list_only", SO_NONE },
|
||||||
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
|
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
|
||||||
{ OPT_HEX_KEY_PREFIX, "--hex_prefix", SO_REQ_SEP },
|
{ OPT_HEX_KEY_PREFIX, "--hex_prefix", SO_REQ_SEP },
|
||||||
{ OPT_BEGIN_VERSION_FILTER, "--begin_version_filter", SO_REQ_SEP },
|
{ OPT_BEGIN_VERSION_FILTER, "--begin_version_filter", SO_REQ_SEP },
|
||||||
{ OPT_END_VERSION_FILTER, "--end_version_filter", SO_REQ_SEP },
|
{ OPT_END_VERSION_FILTER, "--end_version_filter", SO_REQ_SEP },
|
||||||
{ OPT_HELP, "-?", SO_NONE },
|
{ OPT_HELP, "-?", SO_NONE },
|
||||||
{ OPT_HELP, "-h", SO_NONE },
|
{ OPT_HELP, "-h", SO_NONE },
|
||||||
{ OPT_HELP, "--help", SO_NONE },
|
{ OPT_HELP, "--help", SO_NONE },
|
||||||
|
|
|
@ -46,40 +46,39 @@ extern bool g_crashOnError;
|
||||||
namespace file_converter {
|
namespace file_converter {
|
||||||
|
|
||||||
void printDecodeUsage() {
|
void printDecodeUsage() {
|
||||||
std::cout
|
std::cout << "Decoder for FoundationDB backup mutation logs.\n"
|
||||||
<< "Decoder for FoundationDB backup mutation logs.\n"
|
"Usage: fdbdecode [OPTIONS]\n"
|
||||||
"Usage: fdbdecode [OPTIONS]\n"
|
" -r, --container URL\n"
|
||||||
" -r, --container URL\n"
|
" Backup container URL, e.g., file:///some/path/.\n"
|
||||||
" Backup container URL, e.g., file:///some/path/.\n"
|
" -i, --input FILE\n"
|
||||||
" -i, --input FILE\n"
|
" Log file filter, only matched files are decoded.\n"
|
||||||
" Log file filter, only matched files are decoded.\n"
|
" --log Enables trace file logging for the CLI session.\n"
|
||||||
" --log Enables trace file logging for the CLI session.\n"
|
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
" unspecified, defaults to the current directory. Has\n"
|
||||||
" unspecified, defaults to the current directory. Has\n"
|
" no effect unless --log is specified.\n"
|
||||||
" no effect unless --log is specified.\n"
|
" --loggroup LOG_GROUP\n"
|
||||||
" --loggroup LOG_GROUP\n"
|
" Sets the LogGroup field with the specified value for all\n"
|
||||||
" Sets the LogGroup field with the specified value for all\n"
|
" events in the trace output (defaults to `default').\n"
|
||||||
" events in the trace output (defaults to `default').\n"
|
" --trace_format FORMAT\n"
|
||||||
" --trace_format FORMAT\n"
|
" Select the format of the trace files, xml (the default) or json.\n"
|
||||||
" Select the format of the trace files, xml (the default) or json.\n"
|
" Has no effect unless --log is specified.\n"
|
||||||
" Has no effect unless --log is specified.\n"
|
" --crash Crash on serious error.\n"
|
||||||
" --crash Crash on serious error.\n"
|
" --blob_credentials FILE\n"
|
||||||
" --blob_credentials FILE\n"
|
" File containing blob credentials in JSON format.\n"
|
||||||
" File containing blob credentials in JSON format.\n"
|
" The same credential format/file fdbbackup uses.\n"
|
||||||
" The same credential format/file fdbbackup uses.\n"
|
|
||||||
#ifndef TLS_DISABLED
|
#ifndef TLS_DISABLED
|
||||||
TLS_HELP
|
TLS_HELP
|
||||||
#endif
|
#endif
|
||||||
" --build_flags Print build information and exit.\n"
|
" --build_flags Print build information and exit.\n"
|
||||||
" --list_only Print file list and exit.\n"
|
" --list_only Print file list and exit.\n"
|
||||||
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
||||||
" --hex_prefix HEX_PREFIX\n"
|
" --hex_prefix HEX_PREFIX\n"
|
||||||
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
|
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
|
||||||
" --begin_version_filter BEGIN_VERSION\n"
|
" --begin_version_filter BEGIN_VERSION\n"
|
||||||
" The version range's begin version (inclusive) for filtering.\n"
|
" The version range's begin version (inclusive) for filtering.\n"
|
||||||
" --end_version_filter END_VERSION\n"
|
" --end_version_filter END_VERSION\n"
|
||||||
" The version range's end version (exclusive) for filtering.\n"
|
" The version range's end version (exclusive) for filtering.\n"
|
||||||
"\n";
|
"\n";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -465,9 +464,9 @@ ACTOR Future<Void> process_file(Reference<IBackupContainer> container, LogFile f
|
||||||
}
|
}
|
||||||
if (print) {
|
if (print) {
|
||||||
TraceEvent(format("Mutation_%llu_%d", vms.version, sub).c_str(), uid)
|
TraceEvent(format("Mutation_%llu_%d", vms.version, sub).c_str(), uid)
|
||||||
.detail("Version", vms.version)
|
.detail("Version", vms.version)
|
||||||
.setMaxFieldLength(10000)
|
.setMaxFieldLength(10000)
|
||||||
.detail("M", m.toString());
|
.detail("M", m.toString());
|
||||||
std::cout << vms.version << " " << m.toString() << "\n";
|
std::cout << vms.version << " " << m.toString() << "\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -498,7 +497,8 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
|
||||||
state std::vector<LogFile> logs = getRelevantLogFiles(listing.logs, params);
|
state std::vector<LogFile> logs = getRelevantLogFiles(listing.logs, params);
|
||||||
printLogFiles("Relevant files are: ", logs);
|
printLogFiles("Relevant files are: ", logs);
|
||||||
|
|
||||||
if (params.list_only) return Void();
|
if (params.list_only)
|
||||||
|
return Void();
|
||||||
|
|
||||||
state int idx = 0;
|
state int idx = 0;
|
||||||
while (idx < logs.size()) {
|
while (idx < logs.size()) {
|
||||||
|
|
|
@ -52,8 +52,6 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
using std::cout;
|
|
||||||
using std::endl;
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
#define WIN32_LEAN_AND_MEAN
|
#define WIN32_LEAN_AND_MEAN
|
||||||
|
@ -1209,9 +1207,11 @@ static void printFastRestoreUsage(bool devhelp) {
|
||||||
static void printDBAgentUsage(bool devhelp) {
|
static void printDBAgentUsage(bool devhelp) {
|
||||||
printf("FoundationDB " FDB_VT_PACKAGE_NAME " (v" FDB_VT_VERSION ")\n");
|
printf("FoundationDB " FDB_VT_PACKAGE_NAME " (v" FDB_VT_VERSION ")\n");
|
||||||
printf("Usage: %s [OPTIONS]\n\n", exeDatabaseAgent.toString().c_str());
|
printf("Usage: %s [OPTIONS]\n\n", exeDatabaseAgent.toString().c_str());
|
||||||
printf(" -d CONNFILE The path of a file containing the connection string for the\n"
|
printf(" -d, --destination CONNFILE\n"
|
||||||
|
" The path of a file containing the connection string for the\n"
|
||||||
" destination FoundationDB cluster.\n");
|
" destination FoundationDB cluster.\n");
|
||||||
printf(" -s CONNFILE The path of a file containing the connection string for the\n"
|
printf(" -s, --source CONNFILE\n"
|
||||||
|
" The path of a file containing the connection string for the\n"
|
||||||
" source FoundationDB cluster.\n");
|
" source FoundationDB cluster.\n");
|
||||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||||
|
@ -1223,7 +1223,7 @@ static void printDBAgentUsage(bool devhelp) {
|
||||||
printf(" --trace_format FORMAT\n"
|
printf(" --trace_format FORMAT\n"
|
||||||
" Select the format of the trace files. xml (the default) and json are supported.\n"
|
" Select the format of the trace files. xml (the default) and json are supported.\n"
|
||||||
" Has no effect unless --log is specified.\n");
|
" Has no effect unless --log is specified.\n");
|
||||||
printf(" -m SIZE, --memory SIZE\n"
|
printf(" -m, --memory SIZE\n"
|
||||||
" Memory limit. The default value is 8GiB. When specified\n"
|
" Memory limit. The default value is 8GiB. When specified\n"
|
||||||
" without a unit, MiB is assumed.\n");
|
" without a unit, MiB is assumed.\n");
|
||||||
#ifndef TLS_DISABLED
|
#ifndef TLS_DISABLED
|
||||||
|
@ -3073,6 +3073,36 @@ Version parseVersion(const char* str) {
|
||||||
extern uint8_t* g_extra_memory;
|
extern uint8_t* g_extra_memory;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// Creates a connection to a cluster. Optionally prints an error if the connection fails.
|
||||||
|
Optional<Database> connectToCluster(std::string const& clusterFile,
|
||||||
|
LocalityData const& localities,
|
||||||
|
bool quiet = false) {
|
||||||
|
auto resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName(clusterFile);
|
||||||
|
Reference<ClusterConnectionFile> ccf;
|
||||||
|
|
||||||
|
Optional<Database> db;
|
||||||
|
|
||||||
|
try {
|
||||||
|
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (!quiet)
|
||||||
|
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
||||||
|
return db;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
db = Database::createDatabase(ccf, -1, IsInternal::True, localities);
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (!quiet) {
|
||||||
|
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||||
|
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
||||||
|
}
|
||||||
|
return db;
|
||||||
|
}
|
||||||
|
|
||||||
|
return db;
|
||||||
|
};
|
||||||
|
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
platformInit();
|
platformInit();
|
||||||
|
|
||||||
|
@ -3785,9 +3815,7 @@ int main(int argc, char* argv[]) {
|
||||||
std::set_new_handler(&platform::outOfMemory);
|
std::set_new_handler(&platform::outOfMemory);
|
||||||
setMemoryQuota(memLimit);
|
setMemoryQuota(memLimit);
|
||||||
|
|
||||||
Reference<ClusterConnectionFile> ccf;
|
|
||||||
Database db;
|
Database db;
|
||||||
Reference<ClusterConnectionFile> sourceCcf;
|
|
||||||
Database sourceDb;
|
Database sourceDb;
|
||||||
FileBackupAgent ba;
|
FileBackupAgent ba;
|
||||||
Key tag;
|
Key tag;
|
||||||
|
@ -3830,43 +3858,29 @@ int main(int argc, char* argv[]) {
|
||||||
};
|
};
|
||||||
|
|
||||||
auto initCluster = [&](bool quiet = false) {
|
auto initCluster = [&](bool quiet = false) {
|
||||||
auto resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName(clusterFile);
|
Optional<Database> result = connectToCluster(clusterFile, localities, quiet);
|
||||||
try {
|
if (result.present()) {
|
||||||
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
db = result.get();
|
||||||
} catch (Error& e) {
|
|
||||||
if (!quiet)
|
|
||||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
return result.present();
|
||||||
db = Database::createDatabase(ccf, -1, IsInternal::True, localities);
|
|
||||||
} catch (Error& e) {
|
|
||||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
|
||||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (sourceClusterFile.size()) {
|
auto initSourceCluster = [&](bool required, bool quiet = false) {
|
||||||
auto resolvedSourceClusterFile = ClusterConnectionFile::lookupClusterFileName(sourceClusterFile);
|
if (!sourceClusterFile.size() && required) {
|
||||||
try {
|
if (!quiet) {
|
||||||
sourceCcf = makeReference<ClusterConnectionFile>(resolvedSourceClusterFile.first);
|
fprintf(stderr, "ERROR: source cluster file is required\n");
|
||||||
} catch (Error& e) {
|
}
|
||||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedSourceClusterFile, e).c_str());
|
return false;
|
||||||
return FDB_EXIT_ERROR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
Optional<Database> result = connectToCluster(sourceClusterFile, localities, quiet);
|
||||||
sourceDb = Database::createDatabase(sourceCcf, -1, IsInternal::True, localities);
|
if (result.present()) {
|
||||||
} catch (Error& e) {
|
sourceDb = result.get();
|
||||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
|
||||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", sourceCcf->getFilename().c_str());
|
|
||||||
return FDB_EXIT_ERROR;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
return result.present();
|
||||||
|
};
|
||||||
|
|
||||||
switch (programExe) {
|
switch (programExe) {
|
||||||
case ProgramExe::AGENT:
|
case ProgramExe::AGENT:
|
||||||
|
@ -4166,13 +4180,15 @@ int main(int argc, char* argv[]) {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ProgramExe::DR_AGENT:
|
case ProgramExe::DR_AGENT:
|
||||||
if (!initCluster())
|
if (!initCluster() || !initSourceCluster(true)) {
|
||||||
return FDB_EXIT_ERROR;
|
return FDB_EXIT_ERROR;
|
||||||
|
}
|
||||||
f = stopAfter(runDBAgent(sourceDb, db));
|
f = stopAfter(runDBAgent(sourceDb, db));
|
||||||
break;
|
break;
|
||||||
case ProgramExe::DB_BACKUP:
|
case ProgramExe::DB_BACKUP:
|
||||||
if (!initCluster())
|
if (!initCluster() || !initSourceCluster(dbType != DBType::ABORT || !dstOnly)) {
|
||||||
return FDB_EXIT_ERROR;
|
return FDB_EXIT_ERROR;
|
||||||
|
}
|
||||||
switch (dbType) {
|
switch (dbType) {
|
||||||
case DBType::START:
|
case DBType::START:
|
||||||
f = stopAfter(submitDBBackup(sourceDb, db, backupKeys, tagName));
|
f = stopAfter(submitDBBackup(sourceDb, db, backupKeys, tagName));
|
||||||
|
@ -4217,14 +4233,14 @@ int main(int argc, char* argv[]) {
|
||||||
|
|
||||||
#ifdef ALLOC_INSTRUMENTATION
|
#ifdef ALLOC_INSTRUMENTATION
|
||||||
{
|
{
|
||||||
cout << "Page Counts: " << FastAllocator<16>::pageCount << " " << FastAllocator<32>::pageCount << " "
|
std::cout << "Page Counts: " << FastAllocator<16>::pageCount << " " << FastAllocator<32>::pageCount << " "
|
||||||
<< FastAllocator<64>::pageCount << " " << FastAllocator<128>::pageCount << " "
|
<< FastAllocator<64>::pageCount << " " << FastAllocator<128>::pageCount << " "
|
||||||
<< FastAllocator<256>::pageCount << " " << FastAllocator<512>::pageCount << " "
|
<< FastAllocator<256>::pageCount << " " << FastAllocator<512>::pageCount << " "
|
||||||
<< FastAllocator<1024>::pageCount << " " << FastAllocator<2048>::pageCount << " "
|
<< FastAllocator<1024>::pageCount << " " << FastAllocator<2048>::pageCount << " "
|
||||||
<< FastAllocator<4096>::pageCount << " " << FastAllocator<8192>::pageCount << " "
|
<< FastAllocator<4096>::pageCount << " " << FastAllocator<8192>::pageCount << " "
|
||||||
<< FastAllocator<16384>::pageCount << endl;
|
<< FastAllocator<16384>::pageCount << std::endl;
|
||||||
|
|
||||||
vector<std::pair<std::string, const char*>> typeNames;
|
std::vector<std::pair<std::string, const char*>> typeNames;
|
||||||
for (auto i = allocInstr.begin(); i != allocInstr.end(); ++i) {
|
for (auto i = allocInstr.begin(); i != allocInstr.end(); ++i) {
|
||||||
std::string s;
|
std::string s;
|
||||||
|
|
||||||
|
|
|
@ -2,15 +2,30 @@ set(FDBCLI_SRCS
|
||||||
fdbcli.actor.cpp
|
fdbcli.actor.cpp
|
||||||
fdbcli.actor.h
|
fdbcli.actor.h
|
||||||
AdvanceVersionCommand.actor.cpp
|
AdvanceVersionCommand.actor.cpp
|
||||||
|
CacheRangeCommand.actor.cpp
|
||||||
|
ConfigureCommand.actor.cpp
|
||||||
ConsistencyCheckCommand.actor.cpp
|
ConsistencyCheckCommand.actor.cpp
|
||||||
|
CoordinatorsCommand.actor.cpp
|
||||||
|
DataDistributionCommand.actor.cpp
|
||||||
|
ExcludeCommand.actor.cpp
|
||||||
|
ExpensiveDataCheckCommand.actor.cpp
|
||||||
|
FileConfigureCommand.actor.cpp
|
||||||
FlowLineNoise.actor.cpp
|
FlowLineNoise.actor.cpp
|
||||||
FlowLineNoise.h
|
FlowLineNoise.h
|
||||||
ForceRecoveryWithDataLossCommand.actor.cpp
|
ForceRecoveryWithDataLossCommand.actor.cpp
|
||||||
|
IncludeCommand.actor.cpp
|
||||||
|
KillCommand.actor.cpp
|
||||||
|
LockCommand.actor.cpp
|
||||||
MaintenanceCommand.actor.cpp
|
MaintenanceCommand.actor.cpp
|
||||||
|
ProfileCommand.actor.cpp
|
||||||
SetClassCommand.actor.cpp
|
SetClassCommand.actor.cpp
|
||||||
SnapshotCommand.actor.cpp
|
SnapshotCommand.actor.cpp
|
||||||
|
StatusCommand.actor.cpp
|
||||||
|
SuspendCommand.actor.cpp
|
||||||
ThrottleCommand.actor.cpp
|
ThrottleCommand.actor.cpp
|
||||||
Util.cpp
|
TriggerDDTeamInfoLogCommand.actor.cpp
|
||||||
|
TssqCommand.actor.cpp
|
||||||
|
Util.actor.cpp
|
||||||
linenoise/linenoise.h)
|
linenoise/linenoise.h)
|
||||||
|
|
||||||
if(NOT WIN32)
|
if(NOT WIN32)
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
/*
|
||||||
|
* CacheRangeCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
|
#include "fdbclient/SystemData.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> cacheRangeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
if (tokens.size() != 4) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
state KeyRangeRef cacheRange(tokens[2], tokens[3]);
|
||||||
|
if (tokencmp(tokens[1], "set")) {
|
||||||
|
wait(ManagementAPI::addCachedRange(db, cacheRange));
|
||||||
|
} else if (tokencmp(tokens[1], "clear")) {
|
||||||
|
wait(ManagementAPI::removeCachedRange(db, cacheRange));
|
||||||
|
} else {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory cacheRangeFactory(
|
||||||
|
"cache_range",
|
||||||
|
CommandHelp(
|
||||||
|
"cache_range <set|clear> <BEGINKEY> <ENDKEY>",
|
||||||
|
"Mark a key range to add to or remove from storage caches.",
|
||||||
|
"Use the storage caches to assist in balancing hot read shards. Set the appropriate ranges when experiencing "
|
||||||
|
"heavy load, and clear them when they are no longer necessary."));
|
||||||
|
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,300 @@
|
||||||
|
/*
|
||||||
|
* ConfigureCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/FlowLineNoise.h"
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
|
||||||
|
Database localDb,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
LineNoise* linenoise,
|
||||||
|
Future<Void> warn) {
|
||||||
|
state ConfigurationResult result;
|
||||||
|
state StatusObject s;
|
||||||
|
state int startToken = 1;
|
||||||
|
state bool force = false;
|
||||||
|
if (tokens.size() < 2)
|
||||||
|
result = ConfigurationResult::NO_OPTIONS_PROVIDED;
|
||||||
|
else {
|
||||||
|
if (tokens[startToken] == LiteralStringRef("FORCE")) {
|
||||||
|
force = true;
|
||||||
|
startToken = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
state Optional<ConfigureAutoResult> conf;
|
||||||
|
if (tokens[startToken] == LiteralStringRef("auto")) {
|
||||||
|
// get cluster status
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
if (!tr->isValid()) {
|
||||||
|
StatusObject _s = wait(StatusClient::statusFetcher(localDb));
|
||||||
|
s = _s;
|
||||||
|
} else {
|
||||||
|
state ThreadFuture<Optional<Value>> statusValueF = tr->get(LiteralStringRef("\xff\xff/status/json"));
|
||||||
|
Optional<Value> statusValue = wait(safeThreadFutureToFuture(statusValueF));
|
||||||
|
if (!statusValue.present()) {
|
||||||
|
fprintf(stderr, "ERROR: Failed to get status json from the cluster\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
json_spirit::mValue mv;
|
||||||
|
json_spirit::read_string(statusValue.get().toString(), mv);
|
||||||
|
s = StatusObject(mv.get_obj());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (warn.isValid())
|
||||||
|
warn.cancel();
|
||||||
|
|
||||||
|
conf = parseConfig(s);
|
||||||
|
|
||||||
|
if (!conf.get().isValid()) {
|
||||||
|
printf("Unable to provide advice for the current configuration.\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool noChanges = conf.get().old_replication == conf.get().auto_replication &&
|
||||||
|
conf.get().old_logs == conf.get().auto_logs &&
|
||||||
|
conf.get().old_commit_proxies == conf.get().auto_commit_proxies &&
|
||||||
|
conf.get().old_grv_proxies == conf.get().auto_grv_proxies &&
|
||||||
|
conf.get().old_resolvers == conf.get().auto_resolvers &&
|
||||||
|
conf.get().old_processes_with_transaction == conf.get().auto_processes_with_transaction &&
|
||||||
|
conf.get().old_machines_with_transaction == conf.get().auto_machines_with_transaction;
|
||||||
|
|
||||||
|
bool noDesiredChanges = noChanges && conf.get().old_logs == conf.get().desired_logs &&
|
||||||
|
conf.get().old_commit_proxies == conf.get().desired_commit_proxies &&
|
||||||
|
conf.get().old_grv_proxies == conf.get().desired_grv_proxies &&
|
||||||
|
conf.get().old_resolvers == conf.get().desired_resolvers;
|
||||||
|
|
||||||
|
std::string outputString;
|
||||||
|
|
||||||
|
outputString += "\nYour cluster has:\n\n";
|
||||||
|
outputString += format(" processes %d\n", conf.get().processes);
|
||||||
|
outputString += format(" machines %d\n", conf.get().machines);
|
||||||
|
|
||||||
|
if (noDesiredChanges)
|
||||||
|
outputString += "\nConfigure recommends keeping your current configuration:\n\n";
|
||||||
|
else if (noChanges)
|
||||||
|
outputString +=
|
||||||
|
"\nConfigure cannot modify the configuration because some parameters have been set manually:\n\n";
|
||||||
|
else
|
||||||
|
outputString += "\nConfigure recommends the following changes:\n\n";
|
||||||
|
outputString += " ------------------------------------------------------------------- \n";
|
||||||
|
outputString += "| parameter | old | new |\n";
|
||||||
|
outputString += " ------------------------------------------------------------------- \n";
|
||||||
|
outputString += format("| replication | %16s | %16s |\n",
|
||||||
|
conf.get().old_replication.c_str(),
|
||||||
|
conf.get().auto_replication.c_str());
|
||||||
|
outputString +=
|
||||||
|
format("| logs | %16d | %16d |", conf.get().old_logs, conf.get().auto_logs);
|
||||||
|
outputString += conf.get().auto_logs != conf.get().desired_logs
|
||||||
|
? format(" (manually set; would be %d)\n", conf.get().desired_logs)
|
||||||
|
: "\n";
|
||||||
|
outputString += format("| commit_proxies | %16d | %16d |",
|
||||||
|
conf.get().old_commit_proxies,
|
||||||
|
conf.get().auto_commit_proxies);
|
||||||
|
outputString += conf.get().auto_commit_proxies != conf.get().desired_commit_proxies
|
||||||
|
? format(" (manually set; would be %d)\n", conf.get().desired_commit_proxies)
|
||||||
|
: "\n";
|
||||||
|
outputString += format("| grv_proxies | %16d | %16d |",
|
||||||
|
conf.get().old_grv_proxies,
|
||||||
|
conf.get().auto_grv_proxies);
|
||||||
|
outputString += conf.get().auto_grv_proxies != conf.get().desired_grv_proxies
|
||||||
|
? format(" (manually set; would be %d)\n", conf.get().desired_grv_proxies)
|
||||||
|
: "\n";
|
||||||
|
outputString += format(
|
||||||
|
"| resolvers | %16d | %16d |", conf.get().old_resolvers, conf.get().auto_resolvers);
|
||||||
|
outputString += conf.get().auto_resolvers != conf.get().desired_resolvers
|
||||||
|
? format(" (manually set; would be %d)\n", conf.get().desired_resolvers)
|
||||||
|
: "\n";
|
||||||
|
outputString += format("| transaction-class processes | %16d | %16d |\n",
|
||||||
|
conf.get().old_processes_with_transaction,
|
||||||
|
conf.get().auto_processes_with_transaction);
|
||||||
|
outputString += format("| transaction-class machines | %16d | %16d |\n",
|
||||||
|
conf.get().old_machines_with_transaction,
|
||||||
|
conf.get().auto_machines_with_transaction);
|
||||||
|
outputString += " ------------------------------------------------------------------- \n\n";
|
||||||
|
|
||||||
|
std::printf("%s", outputString.c_str());
|
||||||
|
|
||||||
|
if (noChanges)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
// TODO: disable completion
|
||||||
|
Optional<std::string> line = wait(linenoise->read("Would you like to make these changes? [y/n]> "));
|
||||||
|
|
||||||
|
if (!line.present() || (line.get() != "y" && line.get() != "Y")) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ConfigurationResult r = wait(ManagementAPI::changeConfig(
|
||||||
|
db, std::vector<StringRef>(tokens.begin() + startToken, tokens.end()), conf, force));
|
||||||
|
result = r;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
||||||
|
// there are various results specific to changeConfig() that we need to report:
|
||||||
|
bool ret = true;
|
||||||
|
switch (result) {
|
||||||
|
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
||||||
|
case ConfigurationResult::CONFLICTING_OPTIONS:
|
||||||
|
case ConfigurationResult::UNKNOWN_OPTION:
|
||||||
|
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
||||||
|
printUsage(LiteralStringRef("configure"));
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::INVALID_CONFIGURATION:
|
||||||
|
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::STORAGE_MIGRATION_DISABLED:
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: Storage engine type cannot be changed because "
|
||||||
|
"storage_migration_mode=disabled.\n");
|
||||||
|
fprintf(stderr,
|
||||||
|
"Type `configure perpetual_storage_wiggle=1 storage_migration_type=gradual' to enable gradual "
|
||||||
|
"migration with the perpetual wiggle, or `configure "
|
||||||
|
"storage_migration_type=aggressive' for aggressive migration.\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
||||||
|
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DATABASE_CREATED:
|
||||||
|
printf("Database created\n");
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||||
|
fprintf(stderr, "ERROR: The database is unavailable\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||||
|
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: When usable_regions > 1, all regions with priority >= 0 must be fully replicated "
|
||||||
|
"before changing the configuration\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||||
|
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::REGIONS_CHANGED:
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||||
|
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||||
|
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DCID_MISSING:
|
||||||
|
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
||||||
|
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::SUCCESS:
|
||||||
|
printf("Configuration changed\n");
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::LOCKED_NOT_NEW:
|
||||||
|
fprintf(stderr, "ERROR: `only new databases can be configured as locked`\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::SUCCESS_WARN_PPW_GRADUAL:
|
||||||
|
printf("Configuration changed, with warnings\n");
|
||||||
|
fprintf(stderr,
|
||||||
|
"WARN: To make progress toward the desired storage type with storage_migration_type=gradual, the "
|
||||||
|
"Perpetual Wiggle must be enabled.\n");
|
||||||
|
fprintf(stderr,
|
||||||
|
"Type `configure perpetual_storage_wiggle=1' to enable the perpetual wiggle, or `configure "
|
||||||
|
"storage_migration_type=gradual' to set the gradual migration type.\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(false);
|
||||||
|
ret = false;
|
||||||
|
};
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory configureFactory(
|
||||||
|
"configure",
|
||||||
|
CommandHelp(
|
||||||
|
"configure [new|tss]"
|
||||||
|
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|proxies=<PROXIES>|"
|
||||||
|
"commit_proxies=<COMMIT_PROXIES>|grv_proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*|"
|
||||||
|
"count=<TSS_COUNT>|perpetual_storage_wiggle=<WIGGLE_SPEED>|perpetual_storage_wiggle_locality="
|
||||||
|
"<<LOCALITY_KEY>:<LOCALITY_VALUE>|0>|storage_migration_type={disabled|gradual|aggressive}",
|
||||||
|
"change the database configuration",
|
||||||
|
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||||
|
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
|
||||||
|
"specified.\n\ntss: when enabled, configures the testing storage server for the cluster instead."
|
||||||
|
"When used with new to set up tss for the first time, it requires both a count and a storage engine."
|
||||||
|
"To disable the testing storage server, run \"configure tss count=0\"\n\n"
|
||||||
|
"Redundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies "
|
||||||
|
"of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - "
|
||||||
|
"See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage "
|
||||||
|
"engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small "
|
||||||
|
"datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. The proxy role is being "
|
||||||
|
"deprecated and split into GRV proxy and Commit proxy, now prefer configure 'grv_proxies' and 'commit_proxies' "
|
||||||
|
"separately. Generally we should follow that 'commit_proxies' is three times of 'grv_proxies' and "
|
||||||
|
"'grv_proxies' "
|
||||||
|
"should be not more than 4. If 'proxies' is specified, it will be converted to 'grv_proxies' and "
|
||||||
|
"'commit_proxies'. "
|
||||||
|
"Must be at least 2 (1 GRV proxy, 1 Commit proxy), or set to -1 which restores the number of proxies to the "
|
||||||
|
"default value.\n\ncommit_proxies=<COMMIT_PROXIES>: Sets the desired number of commit proxies in the cluster. "
|
||||||
|
"Must be at least 1, or set to -1 which restores the number of commit proxies to the default "
|
||||||
|
"value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the desired number of GRV proxies in the cluster. Must be at least "
|
||||||
|
"1, or set to -1 which restores the number of GRV proxies to the default value.\n\nlogs=<LOGS>: Sets the "
|
||||||
|
"desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of "
|
||||||
|
"logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. "
|
||||||
|
"Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\n"
|
||||||
|
"perpetual_storage_wiggle=<WIGGLE_SPEED>: Set the value speed (a.k.a., the number of processes that the Data "
|
||||||
|
"Distributor should wiggle at a time). Currently, only 0 and 1 are supported. The value 0 means to disable the "
|
||||||
|
"perpetual storage wiggle.\n\n"
|
||||||
|
"perpetual_storage_wiggle_locality=<<LOCALITY_KEY>:<LOCALITY_VALUE>|0>: Set the process filter for wiggling. "
|
||||||
|
"The processes that match the given locality key and locality value are only wiggled. The value 0 will disable "
|
||||||
|
"the locality filter and matches all the processes for wiggling.\n\n"
|
||||||
|
"See the FoundationDB Administration Guide for more information."));
|
||||||
|
|
||||||
|
} // namespace fdb_cli
|
|
@ -32,19 +32,25 @@ namespace fdb_cli {
|
||||||
|
|
||||||
const KeyRef consistencyCheckSpecialKey = LiteralStringRef("\xff\xff/management/consistency_check_suspended");
|
const KeyRef consistencyCheckSpecialKey = LiteralStringRef("\xff\xff/management/consistency_check_suspended");
|
||||||
|
|
||||||
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens) {
|
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
bool intrans) {
|
||||||
// Here we do not proceed in a try-catch loop since the transaction is always supposed to succeed.
|
// Here we do not proceed in a try-catch loop since the transaction is always supposed to succeed.
|
||||||
// If not, the outer loop catch block(fdbcli.actor.cpp) will handle the error and print out the error message
|
// If not, the outer loop catch block(fdbcli.actor.cpp) will handle the error and print out the error message
|
||||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
if (tokens.size() == 1) {
|
if (tokens.size() == 1) {
|
||||||
Optional<Value> suspended = wait(safeThreadFutureToFuture(tr->get(consistencyCheckSpecialKey)));
|
// hold the returned standalone object's memory
|
||||||
|
state ThreadFuture<Optional<Value>> suspendedF = tr->get(consistencyCheckSpecialKey);
|
||||||
|
Optional<Value> suspended = wait(safeThreadFutureToFuture(suspendedF));
|
||||||
printf("ConsistencyCheck is %s\n", suspended.present() ? "off" : "on");
|
printf("ConsistencyCheck is %s\n", suspended.present() ? "off" : "on");
|
||||||
} else if (tokens.size() == 2 && tokencmp(tokens[1], "off")) {
|
} else if (tokens.size() == 2 && tokencmp(tokens[1], "off")) {
|
||||||
tr->set(consistencyCheckSpecialKey, Value());
|
tr->set(consistencyCheckSpecialKey, Value());
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
if (!intrans)
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
} else if (tokens.size() == 2 && tokencmp(tokens[1], "on")) {
|
} else if (tokens.size() == 2 && tokencmp(tokens[1], "on")) {
|
||||||
tr->clear(consistencyCheckSpecialKey);
|
tr->clear(consistencyCheckSpecialKey);
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
if (!intrans)
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
} else {
|
} else {
|
||||||
printUsage(tokens[0]);
|
printUsage(tokens[0]);
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -0,0 +1,185 @@
|
||||||
|
/*
|
||||||
|
* CoordinatorsCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "boost/algorithm/string.hpp"
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
#include "fdbclient/Schemas.h"
|
||||||
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
ACTOR Future<Void> printCoordinatorsInfo(Reference<IDatabase> db) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
// Hold the reference to the standalone's memory
|
||||||
|
state ThreadFuture<Optional<Value>> descriptionF = tr->get(fdb_cli::clusterDescriptionSpecialKey);
|
||||||
|
Optional<Value> description = wait(safeThreadFutureToFuture(descriptionF));
|
||||||
|
ASSERT(description.present());
|
||||||
|
printf("Cluster description: %s\n", description.get().toString().c_str());
|
||||||
|
// Hold the reference to the standalone's memory
|
||||||
|
state ThreadFuture<Optional<Value>> processesF = tr->get(fdb_cli::coordinatorsProcessSpecialKey);
|
||||||
|
Optional<Value> processes = wait(safeThreadFutureToFuture(processesF));
|
||||||
|
ASSERT(processes.present());
|
||||||
|
std::vector<std::string> process_addresses;
|
||||||
|
boost::split(process_addresses, processes.get().toString(), [](char c) { return c == ','; });
|
||||||
|
printf("Cluster coordinators (%zu): %s\n", process_addresses.size(), processes.get().toString().c_str());
|
||||||
|
printf("Type `help coordinators' to learn how to change this information.\n");
|
||||||
|
return Void();
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
state int retries = 0;
|
||||||
|
state int notEnoughMachineResults = 0;
|
||||||
|
state StringRef new_cluster_description;
|
||||||
|
state std::string auto_coordinators_str;
|
||||||
|
StringRef nameTokenBegin = LiteralStringRef("description=");
|
||||||
|
for (auto tok = tokens.begin() + 1; tok != tokens.end(); ++tok)
|
||||||
|
if (tok->startsWith(nameTokenBegin)) {
|
||||||
|
new_cluster_description = tok->substr(nameTokenBegin.size());
|
||||||
|
std::copy(tok + 1, tokens.end(), tok);
|
||||||
|
tokens.resize(tokens.size() - 1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
state bool automatic = tokens.size() == 2 && tokens[1] == LiteralStringRef("auto");
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
// update cluster description
|
||||||
|
if (new_cluster_description.size()) {
|
||||||
|
tr->set(fdb_cli::clusterDescriptionSpecialKey, new_cluster_description);
|
||||||
|
}
|
||||||
|
// if auto change, read the special key to retrieve the recommended config
|
||||||
|
if (automatic) {
|
||||||
|
// if previous read failed, retry, otherwise, use the same recommened config
|
||||||
|
if (!auto_coordinators_str.size()) {
|
||||||
|
// Hold the reference to the standalone's memory
|
||||||
|
state ThreadFuture<Optional<Value>> auto_coordinatorsF =
|
||||||
|
tr->get(fdb_cli::coordinatorsAutoSpecialKey);
|
||||||
|
Optional<Value> auto_coordinators = wait(safeThreadFutureToFuture(auto_coordinatorsF));
|
||||||
|
ASSERT(auto_coordinators.present());
|
||||||
|
auto_coordinators_str = auto_coordinators.get().toString();
|
||||||
|
}
|
||||||
|
tr->set(fdb_cli::coordinatorsProcessSpecialKey, auto_coordinators_str);
|
||||||
|
} else if (tokens.size() > 1) {
|
||||||
|
state std::set<NetworkAddress> new_coordinators_addresses;
|
||||||
|
state std::vector<std::string> newAddresslist;
|
||||||
|
state std::vector<StringRef>::iterator t;
|
||||||
|
for (t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
||||||
|
try {
|
||||||
|
auto const& addr = NetworkAddress::parse(t->toString());
|
||||||
|
if (new_coordinators_addresses.count(addr)) {
|
||||||
|
fprintf(stderr, "ERROR: passed redundant coordinators: `%s'\n", addr.toString().c_str());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
new_coordinators_addresses.insert(addr);
|
||||||
|
newAddresslist.push_back(addr.toString());
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (e.code() == error_code_connection_string_invalid) {
|
||||||
|
fprintf(
|
||||||
|
stderr, "ERROR: '%s' is not a valid network endpoint address\n", t->toString().c_str());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::string new_addresses_str = boost::algorithm::join(newAddresslist, ", ");
|
||||||
|
tr->set(fdb_cli::coordinatorsProcessSpecialKey, new_addresses_str);
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
// commit should always fail here
|
||||||
|
// if coordinators are changed, we should get commit_unknown() error
|
||||||
|
ASSERT(false);
|
||||||
|
} catch (Error& e) {
|
||||||
|
state Error err(e);
|
||||||
|
if (e.code() == error_code_special_keys_api_failure) {
|
||||||
|
std::string errorMsgStr = wait(fdb_cli::getSpecialKeysFailureErrorMessage(tr));
|
||||||
|
if (errorMsgStr == ManagementAPI::generateErrorMessage(CoordinatorsResult::NOT_ENOUGH_MACHINES) &&
|
||||||
|
notEnoughMachineResults < 1) {
|
||||||
|
// we could get not_enough_machines if we happen to see the database while the cluster controller is
|
||||||
|
// updating the worker list, so make sure it happens twice before returning a failure
|
||||||
|
notEnoughMachineResults++;
|
||||||
|
wait(delay(1.0));
|
||||||
|
tr->reset();
|
||||||
|
continue;
|
||||||
|
} else if (errorMsgStr ==
|
||||||
|
ManagementAPI::generateErrorMessage(CoordinatorsResult::SAME_NETWORK_ADDRESSES)) {
|
||||||
|
if (retries)
|
||||||
|
printf("Coordination state changed\n");
|
||||||
|
else
|
||||||
|
printf("No change (existing configuration satisfies request)\n");
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "ERROR: %s\n", errorMsgStr.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(err)));
|
||||||
|
++retries;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
const KeyRef clusterDescriptionSpecialKey = LiteralStringRef("\xff\xff/configuration/coordinators/cluster_description");
|
||||||
|
const KeyRef coordinatorsAutoSpecialKey = LiteralStringRef("\xff\xff/management/auto_coordinators");
|
||||||
|
const KeyRef coordinatorsProcessSpecialKey = LiteralStringRef("\xff\xff/configuration/coordinators/processes");
|
||||||
|
|
||||||
|
ACTOR Future<bool> coordinatorsCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
if (tokens.size() < 2) {
|
||||||
|
wait(printCoordinatorsInfo(db));
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
bool result = wait(changeCoordinators(db, tokens));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory coordinatorsFactory(
|
||||||
|
"coordinators",
|
||||||
|
CommandHelp(
|
||||||
|
"coordinators auto|<ADDRESS>+ [description=new_cluster_description]",
|
||||||
|
"change cluster coordinators or description",
|
||||||
|
"If 'auto' is specified, coordinator addresses will be choosen automatically to support the configured "
|
||||||
|
"redundancy level. (If the current set of coordinators are healthy and already support the redundancy level, "
|
||||||
|
"nothing will be changed.)\n\nOtherwise, sets the coordinators to the list of IP:port pairs specified by "
|
||||||
|
"<ADDRESS>+. An fdbserver process must be running on each of the specified addresses.\n\ne.g. coordinators "
|
||||||
|
"10.0.0.1:4000 10.0.0.2:4000 10.0.0.3:4000\n\nIf 'description=desc' is specified then the description field in "
|
||||||
|
"the cluster\nfile is changed to desc, which must match [A-Za-z0-9_]+."));
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,137 @@
|
||||||
|
/*
|
||||||
|
* DataDistributionCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "boost/lexical_cast.hpp"
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBTypes.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
ACTOR Future<Void> setDDMode(Reference<IDatabase> db, int mode) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
tr->set(fdb_cli::ddModeSpecialKey, boost::lexical_cast<std::string>(mode));
|
||||||
|
if (mode) {
|
||||||
|
// set DDMode to 1 will enable all disabled parts, for instance the SS failure monitors.
|
||||||
|
// hold the returned standalone object's memory
|
||||||
|
state ThreadFuture<RangeResult> resultFuture =
|
||||||
|
tr->getRange(fdb_cli::maintenanceSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
RangeResult res = wait(safeThreadFutureToFuture(resultFuture));
|
||||||
|
ASSERT(res.size() <= 1);
|
||||||
|
if (res.size() == 1 && res[0].key == fdb_cli::ignoreSSFailureSpecialKey) {
|
||||||
|
// only clear the key if it is currently being used to disable all SS failure data movement
|
||||||
|
tr->clear(fdb_cli::maintenanceSpecialKeyRange);
|
||||||
|
}
|
||||||
|
tr->clear(fdb_cli::ddIgnoreRebalanceSpecialKey);
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
return Void();
|
||||||
|
} catch (Error& e) {
|
||||||
|
TraceEvent("SetDDModeRetrying").error(e);
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> setDDIgnoreRebalanceSwitch(Reference<IDatabase> db, bool ignoreRebalance) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
if (ignoreRebalance) {
|
||||||
|
tr->set(fdb_cli::ddIgnoreRebalanceSpecialKey, ValueRef());
|
||||||
|
} else {
|
||||||
|
tr->clear(fdb_cli::ddIgnoreRebalanceSpecialKey);
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
return Void();
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
const KeyRef ddModeSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/mode");
|
||||||
|
const KeyRef ddIgnoreRebalanceSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/rebalance_ignored");
|
||||||
|
|
||||||
|
ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
state bool result = true;
|
||||||
|
if (tokens.size() != 2 && tokens.size() != 3) {
|
||||||
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||||
|
"<ssfailure|rebalance>>\n");
|
||||||
|
result = false;
|
||||||
|
} else {
|
||||||
|
if (tokencmp(tokens[1], "on")) {
|
||||||
|
wait(success(setDDMode(db, 1)));
|
||||||
|
printf("Data distribution is turned on.\n");
|
||||||
|
} else if (tokencmp(tokens[1], "off")) {
|
||||||
|
wait(success(setDDMode(db, 0)));
|
||||||
|
printf("Data distribution is turned off.\n");
|
||||||
|
} else if (tokencmp(tokens[1], "disable")) {
|
||||||
|
if (tokencmp(tokens[2], "ssfailure")) {
|
||||||
|
wait(success((setHealthyZone(db, LiteralStringRef("IgnoreSSFailures"), 0))));
|
||||||
|
printf("Data distribution is disabled for storage server failures.\n");
|
||||||
|
} else if (tokencmp(tokens[2], "rebalance")) {
|
||||||
|
wait(setDDIgnoreRebalanceSwitch(db, true));
|
||||||
|
printf("Data distribution is disabled for rebalance.\n");
|
||||||
|
} else {
|
||||||
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||||
|
"<ssfailure|rebalance>>\n");
|
||||||
|
result = false;
|
||||||
|
}
|
||||||
|
} else if (tokencmp(tokens[1], "enable")) {
|
||||||
|
if (tokencmp(tokens[2], "ssfailure")) {
|
||||||
|
wait(success((clearHealthyZone(db, false, true))));
|
||||||
|
printf("Data distribution is enabled for storage server failures.\n");
|
||||||
|
} else if (tokencmp(tokens[2], "rebalance")) {
|
||||||
|
wait(setDDIgnoreRebalanceSwitch(db, false));
|
||||||
|
printf("Data distribution is enabled for rebalance.\n");
|
||||||
|
} else {
|
||||||
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||||
|
"<ssfailure|rebalance>>\n");
|
||||||
|
result = false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||||
|
"<ssfailure|rebalance>>\n");
|
||||||
|
result = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// hidden commands, no help text for now
|
||||||
|
CommandFactory dataDistributionFactory("datadistribution");
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,397 @@
|
||||||
|
/*
|
||||||
|
* ExcludeCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
|
#include "fdbclient/Schemas.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Exclue the given servers and localities
|
||||||
|
ACTOR Future<bool> excludeServersAndLocalities(Reference<IDatabase> db,
|
||||||
|
std::vector<AddressExclusion> servers,
|
||||||
|
std::unordered_set<std::string> localities,
|
||||||
|
bool failed,
|
||||||
|
bool force) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
if (force && servers.size())
|
||||||
|
tr->set(failed ? fdb_cli::failedForceOptionSpecialKey : fdb_cli::excludedForceOptionSpecialKey,
|
||||||
|
ValueRef());
|
||||||
|
for (const auto& s : servers) {
|
||||||
|
Key addr = failed ? fdb_cli::failedServersSpecialKeyRange.begin.withSuffix(s.toString())
|
||||||
|
: fdb_cli::excludedServersSpecialKeyRange.begin.withSuffix(s.toString());
|
||||||
|
tr->set(addr, ValueRef());
|
||||||
|
}
|
||||||
|
if (force && localities.size())
|
||||||
|
tr->set(failed ? fdb_cli::failedLocalityForceOptionSpecialKey
|
||||||
|
: fdb_cli::excludedLocalityForceOptionSpecialKey,
|
||||||
|
ValueRef());
|
||||||
|
for (const auto& l : localities) {
|
||||||
|
Key addr = failed ? fdb_cli::failedLocalitySpecialKeyRange.begin.withSuffix(l)
|
||||||
|
: fdb_cli::excludedLocalitySpecialKeyRange.begin.withSuffix(l);
|
||||||
|
tr->set(addr, ValueRef());
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
return true;
|
||||||
|
} catch (Error& e) {
|
||||||
|
state Error err(e);
|
||||||
|
if (e.code() == error_code_special_keys_api_failure) {
|
||||||
|
std::string errorMsgStr = wait(fdb_cli::getSpecialKeysFailureErrorMessage(tr));
|
||||||
|
// last character is \n
|
||||||
|
auto pos = errorMsgStr.find_last_of("\n", errorMsgStr.size() - 2);
|
||||||
|
auto last_line = errorMsgStr.substr(pos + 1);
|
||||||
|
// customized the error message for fdbcli
|
||||||
|
fprintf(stderr,
|
||||||
|
"%s\n%s\n",
|
||||||
|
errorMsgStr.substr(0, pos).c_str(),
|
||||||
|
last_line.find("free space") != std::string::npos
|
||||||
|
? "Type `exclude FORCE <ADDRESS...>' to exclude without checking free space."
|
||||||
|
: "Type `exclude FORCE failed <ADDRESS...>' to exclude without performing safety checks.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(err)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<std::vector<std::string>> getExcludedServers(Reference<IDatabase> db) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
state ThreadFuture<RangeResult> resultFuture =
|
||||||
|
tr->getRange(fdb_cli::excludedServersSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state RangeResult r = wait(safeThreadFutureToFuture(resultFuture));
|
||||||
|
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state ThreadFuture<RangeResult> resultFuture2 =
|
||||||
|
tr->getRange(fdb_cli::failedServersSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state RangeResult r2 = wait(safeThreadFutureToFuture(resultFuture2));
|
||||||
|
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
std::vector<std::string> exclusions;
|
||||||
|
for (const auto& i : r) {
|
||||||
|
auto addr = i.key.removePrefix(fdb_cli::excludedServersSpecialKeyRange.begin).toString();
|
||||||
|
exclusions.push_back(addr);
|
||||||
|
}
|
||||||
|
for (const auto& i : r2) {
|
||||||
|
auto addr = i.key.removePrefix(fdb_cli::failedServersSpecialKeyRange.begin).toString();
|
||||||
|
exclusions.push_back(addr);
|
||||||
|
}
|
||||||
|
return exclusions;
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the list of excluded localities by reading the keys.
|
||||||
|
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Reference<IDatabase> db) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
state ThreadFuture<RangeResult> resultFuture =
|
||||||
|
tr->getRange(fdb_cli::excludedLocalitySpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state RangeResult r = wait(safeThreadFutureToFuture(resultFuture));
|
||||||
|
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state ThreadFuture<RangeResult> resultFuture2 =
|
||||||
|
tr->getRange(fdb_cli::failedLocalitySpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state RangeResult r2 = wait(safeThreadFutureToFuture(resultFuture2));
|
||||||
|
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
std::vector<std::string> excludedLocalities;
|
||||||
|
for (const auto& i : r) {
|
||||||
|
auto locality = i.key.removePrefix(fdb_cli::excludedLocalitySpecialKeyRange.begin).toString();
|
||||||
|
excludedLocalities.push_back(locality);
|
||||||
|
}
|
||||||
|
for (const auto& i : r2) {
|
||||||
|
auto locality = i.key.removePrefix(fdb_cli::failedLocalitySpecialKeyRange.begin).toString();
|
||||||
|
excludedLocalities.push_back(locality);
|
||||||
|
}
|
||||||
|
return excludedLocalities;
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Reference<IDatabase> db,
|
||||||
|
std::vector<AddressExclusion> excl,
|
||||||
|
bool waitForAllExcluded) {
|
||||||
|
state std::set<AddressExclusion> exclusions(excl.begin(), excl.end());
|
||||||
|
state std::set<NetworkAddress> inProgressExclusion;
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
inProgressExclusion.clear();
|
||||||
|
try {
|
||||||
|
state ThreadFuture<RangeResult> resultFuture =
|
||||||
|
tr->getRange(fdb_cli::exclusionInProgressSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
RangeResult exclusionInProgress = wait(safeThreadFutureToFuture(resultFuture));
|
||||||
|
ASSERT(!exclusionInProgress.more && exclusionInProgress.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
if (exclusionInProgress.empty())
|
||||||
|
return inProgressExclusion;
|
||||||
|
for (const auto& addr : exclusionInProgress)
|
||||||
|
inProgressExclusion.insert(NetworkAddress::parse(
|
||||||
|
addr.key.removePrefix(fdb_cli::exclusionInProgressSpecialKeyRange.begin).toString()));
|
||||||
|
if (!waitForAllExcluded)
|
||||||
|
break;
|
||||||
|
|
||||||
|
wait(delayJittered(1.0)); // SOMEDAY: watches!
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return inProgressExclusion;
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> checkForCoordinators(Reference<IDatabase> db, std::vector<AddressExclusion> exclusionVector) {
|
||||||
|
|
||||||
|
state bool foundCoordinator = false;
|
||||||
|
state std::vector<NetworkAddress> coordinatorList;
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
// Hold the reference to the standalone's memory
|
||||||
|
state ThreadFuture<Optional<Value>> coordinatorsF = tr->get(fdb_cli::coordinatorsProcessSpecialKey);
|
||||||
|
Optional<Value> coordinators = wait(safeThreadFutureToFuture(coordinatorsF));
|
||||||
|
ASSERT(coordinators.present());
|
||||||
|
coordinatorList = NetworkAddress::parseList(coordinators.get().toString());
|
||||||
|
break;
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const auto& c : coordinatorList) {
|
||||||
|
if (std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip, c.port)) ||
|
||||||
|
std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip))) {
|
||||||
|
fprintf(stderr, "WARNING: %s is a coordinator!\n", c.toString().c_str());
|
||||||
|
foundCoordinator = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (foundCoordinator)
|
||||||
|
printf("Type `help coordinators' for information on how to change the\n"
|
||||||
|
"cluster's coordination servers before removing them.\n");
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
const KeyRangeRef excludedServersSpecialKeyRange(LiteralStringRef("\xff\xff/management/excluded/"),
|
||||||
|
LiteralStringRef("\xff\xff/management/excluded0"));
|
||||||
|
const KeyRangeRef failedServersSpecialKeyRange(LiteralStringRef("\xff\xff/management/failed/"),
|
||||||
|
LiteralStringRef("\xff\xff/management/failed0"));
|
||||||
|
const KeyRangeRef excludedLocalitySpecialKeyRange(LiteralStringRef("\xff\xff/management/excluded_locality/"),
|
||||||
|
LiteralStringRef("\xff\xff/management/excluded_locality0"));
|
||||||
|
const KeyRangeRef failedLocalitySpecialKeyRange(LiteralStringRef("\xff\xff/management/failed_locality/"),
|
||||||
|
LiteralStringRef("\xff\xff/management/failed_locality0"));
|
||||||
|
const KeyRef excludedForceOptionSpecialKey = LiteralStringRef("\xff\xff/management/options/excluded/force");
|
||||||
|
const KeyRef failedForceOptionSpecialKey = LiteralStringRef("\xff\xff/management/options/failed/force");
|
||||||
|
const KeyRef excludedLocalityForceOptionSpecialKey =
|
||||||
|
LiteralStringRef("\xff\xff/management/options/excluded_locality/force");
|
||||||
|
const KeyRef failedLocalityForceOptionSpecialKey =
|
||||||
|
LiteralStringRef("\xff\xff/management/options/failed_locality/force");
|
||||||
|
const KeyRangeRef exclusionInProgressSpecialKeyRange(LiteralStringRef("\xff\xff/management/in_progress_exclusion/"),
|
||||||
|
LiteralStringRef("\xff\xff/management/in_progress_exclusion0"));
|
||||||
|
|
||||||
|
ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens, Future<Void> warn) {
|
||||||
|
if (tokens.size() <= 1) {
|
||||||
|
state std::vector<std::string> excludedAddresses = wait(getExcludedServers(db));
|
||||||
|
state std::vector<std::string> excludedLocalities = wait(getExcludedLocalities(db));
|
||||||
|
|
||||||
|
if (!excludedAddresses.size() && !excludedLocalities.size()) {
|
||||||
|
printf("There are currently no servers or localities excluded from the database.\n"
|
||||||
|
"To learn how to exclude a server, type `help exclude'.\n");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("There are currently %zu servers or localities being excluded from the database:\n",
|
||||||
|
excludedAddresses.size() + excludedLocalities.size());
|
||||||
|
for (const auto& e : excludedAddresses)
|
||||||
|
printf(" %s\n", e.c_str());
|
||||||
|
for (const auto& e : excludedLocalities)
|
||||||
|
printf(" %s\n", e.c_str());
|
||||||
|
|
||||||
|
printf("To find out whether it is safe to remove one or more of these\n"
|
||||||
|
"servers from the cluster, type `exclude <addresses>'.\n"
|
||||||
|
"To return one of these servers to the cluster, type `include <addresses>'.\n");
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
state std::vector<AddressExclusion> exclusionVector;
|
||||||
|
state std::set<AddressExclusion> exclusionSet;
|
||||||
|
state std::vector<AddressExclusion> exclusionAddresses;
|
||||||
|
state std::unordered_set<std::string> exclusionLocalities;
|
||||||
|
state std::vector<std::string> noMatchLocalities;
|
||||||
|
state bool force = false;
|
||||||
|
state bool waitForAllExcluded = true;
|
||||||
|
state bool markFailed = false;
|
||||||
|
state std::vector<ProcessData> workers;
|
||||||
|
bool result = wait(fdb_cli::getWorkers(db, &workers));
|
||||||
|
if (!result)
|
||||||
|
return false;
|
||||||
|
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
||||||
|
if (*t == LiteralStringRef("FORCE")) {
|
||||||
|
force = true;
|
||||||
|
} else if (*t == LiteralStringRef("no_wait")) {
|
||||||
|
waitForAllExcluded = false;
|
||||||
|
} else if (*t == LiteralStringRef("failed")) {
|
||||||
|
markFailed = true;
|
||||||
|
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) &&
|
||||||
|
t->toString().find(':') != std::string::npos) {
|
||||||
|
std::set<AddressExclusion> localityAddresses = getAddressesByLocality(workers, t->toString());
|
||||||
|
if (localityAddresses.empty()) {
|
||||||
|
noMatchLocalities.push_back(t->toString());
|
||||||
|
} else {
|
||||||
|
// add all the server ipaddresses that belong to the given localities to the exclusionSet.
|
||||||
|
exclusionVector.insert(exclusionVector.end(), localityAddresses.begin(), localityAddresses.end());
|
||||||
|
exclusionSet.insert(localityAddresses.begin(), localityAddresses.end());
|
||||||
|
}
|
||||||
|
exclusionLocalities.insert(t->toString());
|
||||||
|
} else {
|
||||||
|
auto a = AddressExclusion::parse(*t);
|
||||||
|
if (!a.isValid()) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: '%s' is neither a valid network endpoint address nor a locality\n",
|
||||||
|
t->toString().c_str());
|
||||||
|
if (t->toString().find(":tls") != std::string::npos)
|
||||||
|
printf(" Do not include the `:tls' suffix when naming a process\n");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
exclusionVector.push_back(a);
|
||||||
|
exclusionSet.insert(a);
|
||||||
|
exclusionAddresses.push_back(a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exclusionAddresses.empty() && exclusionLocalities.empty()) {
|
||||||
|
fprintf(stderr, "ERROR: At least one valid network endpoint address or a locality is not provided\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool res = wait(excludeServersAndLocalities(db, exclusionAddresses, exclusionLocalities, markFailed, force));
|
||||||
|
if (!res)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (waitForAllExcluded) {
|
||||||
|
printf("Waiting for state to be removed from all excluded servers. This may take a while.\n");
|
||||||
|
printf("(Interrupting this wait with CTRL+C will not cancel the data movement.)\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (warn.isValid())
|
||||||
|
warn.cancel();
|
||||||
|
|
||||||
|
state std::set<NetworkAddress> notExcludedServers =
|
||||||
|
wait(checkForExcludingServers(db, exclusionVector, waitForAllExcluded));
|
||||||
|
std::map<IPAddress, std::set<uint16_t>> workerPorts;
|
||||||
|
for (auto addr : workers)
|
||||||
|
workerPorts[addr.address.ip].insert(addr.address.port);
|
||||||
|
|
||||||
|
// Print a list of all excluded addresses that don't have a corresponding worker
|
||||||
|
std::set<AddressExclusion> absentExclusions;
|
||||||
|
for (const auto& addr : exclusionVector) {
|
||||||
|
auto worker = workerPorts.find(addr.ip);
|
||||||
|
if (worker == workerPorts.end())
|
||||||
|
absentExclusions.insert(addr);
|
||||||
|
else if (addr.port > 0 && worker->second.count(addr.port) == 0)
|
||||||
|
absentExclusions.insert(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& exclusion : exclusionVector) {
|
||||||
|
if (absentExclusions.find(exclusion) != absentExclusions.end()) {
|
||||||
|
if (exclusion.port == 0) {
|
||||||
|
fprintf(stderr,
|
||||||
|
" %s(Whole machine) ---- WARNING: Missing from cluster!Be sure that you excluded the "
|
||||||
|
"correct machines before removing them from the cluster!\n",
|
||||||
|
exclusion.ip.toString().c_str());
|
||||||
|
} else {
|
||||||
|
fprintf(stderr,
|
||||||
|
" %s ---- WARNING: Missing from cluster! Be sure that you excluded the correct processes "
|
||||||
|
"before removing them from the cluster!\n",
|
||||||
|
exclusion.toString().c_str());
|
||||||
|
}
|
||||||
|
} else if (std::any_of(notExcludedServers.begin(), notExcludedServers.end(), [&](const NetworkAddress& a) {
|
||||||
|
return addressExcluded({ exclusion }, a);
|
||||||
|
})) {
|
||||||
|
if (exclusion.port == 0) {
|
||||||
|
fprintf(stderr,
|
||||||
|
" %s(Whole machine) ---- WARNING: Exclusion in progress! It is not safe to remove this "
|
||||||
|
"machine from the cluster\n",
|
||||||
|
exclusion.ip.toString().c_str());
|
||||||
|
} else {
|
||||||
|
fprintf(stderr,
|
||||||
|
" %s ---- WARNING: Exclusion in progress! It is not safe to remove this process from the "
|
||||||
|
"cluster\n",
|
||||||
|
exclusion.toString().c_str());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (exclusion.port == 0) {
|
||||||
|
printf(" %s(Whole machine) ---- Successfully excluded. It is now safe to remove this machine "
|
||||||
|
"from the cluster.\n",
|
||||||
|
exclusion.ip.toString().c_str());
|
||||||
|
} else {
|
||||||
|
printf(
|
||||||
|
" %s ---- Successfully excluded. It is now safe to remove this process from the cluster.\n",
|
||||||
|
exclusion.toString().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& locality : noMatchLocalities) {
|
||||||
|
fprintf(
|
||||||
|
stderr,
|
||||||
|
" %s ---- WARNING: Currently no servers found with this locality match! Be sure that you excluded "
|
||||||
|
"the correct locality.\n",
|
||||||
|
locality.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
wait(checkForCoordinators(db, exclusionVector));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory excludeFactory(
|
||||||
|
"exclude",
|
||||||
|
CommandHelp(
|
||||||
|
"exclude [FORCE] [failed] [no_wait] [<ADDRESS...>] [locality_dcid:<excludedcid>] "
|
||||||
|
"[locality_zoneid:<excludezoneid>] [locality_machineid:<excludemachineid>] "
|
||||||
|
"[locality_processid:<excludeprocessid>] or any locality data",
|
||||||
|
"exclude servers from the database either with IP address match or locality match",
|
||||||
|
"If no addresses or locaities are specified, lists the set of excluded addresses and localities."
|
||||||
|
"\n\nFor each IP address or IP:port pair in <ADDRESS...> or any LocalityData attributes (like dcid, "
|
||||||
|
"zoneid, "
|
||||||
|
"machineid, processid), adds the address/locality to the set of excluded servers and localities then waits "
|
||||||
|
"until all database state has been safely moved away from the specified servers. If 'no_wait' is set, the "
|
||||||
|
"command returns \nimmediately without checking if the exclusions have completed successfully.\n"
|
||||||
|
"If 'FORCE' is set, the command does not perform safety checks before excluding.\n"
|
||||||
|
"If 'failed' is set, the transaction log queue is dropped pre-emptively before waiting\n"
|
||||||
|
"for data movement to finish and the server cannot be included again."));
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,101 @@
|
||||||
|
/*
|
||||||
|
* ExpensiveDataCheckCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
// The command is used to send a data check request to the specified process
|
||||||
|
// The check request is accomplished by rebooting the process
|
||||||
|
|
||||||
|
ACTOR Future<bool> expensiveDataCheckCommandActor(
|
||||||
|
Reference<IDatabase> db,
|
||||||
|
Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||||
|
state bool result = true;
|
||||||
|
if (tokens.size() == 1) {
|
||||||
|
// initialize worker interfaces
|
||||||
|
wait(getWorkerInterfaces(tr, address_interface));
|
||||||
|
}
|
||||||
|
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
||||||
|
if (address_interface->size() == 0) {
|
||||||
|
printf("\nNo addresses can be checked.\n");
|
||||||
|
} else if (address_interface->size() == 1) {
|
||||||
|
printf("\nThe following address can be checked:\n");
|
||||||
|
} else {
|
||||||
|
printf("\nThe following %zu addresses can be checked:\n", address_interface->size());
|
||||||
|
}
|
||||||
|
for (auto it : *address_interface) {
|
||||||
|
printf("%s\n", printable(it.first).c_str());
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
} else if (tokencmp(tokens[1], "all")) {
|
||||||
|
state std::map<Key, std::pair<Value, ClientLeaderRegInterface>>::const_iterator it;
|
||||||
|
for (it = address_interface->cbegin(); it != address_interface->cend(); it++) {
|
||||||
|
int64_t checkRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(it->first, true, 0)));
|
||||||
|
if (!checkRequestSent) {
|
||||||
|
result = false;
|
||||||
|
fprintf(stderr, "ERROR: failed to send request to check process `%s'.\n", it->first.toString().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (address_interface->size() == 0) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: no processes to check. You must run the `expensive_data_check’ "
|
||||||
|
"command before running `expensive_data_check all’.\n");
|
||||||
|
} else {
|
||||||
|
printf("Attempted to kill and check %zu processes\n", address_interface->size());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
state int i;
|
||||||
|
for (i = 1; i < tokens.size(); i++) {
|
||||||
|
if (!address_interface->count(tokens[i])) {
|
||||||
|
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
||||||
|
result = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
for (i = 1; i < tokens.size(); i++) {
|
||||||
|
int64_t checkRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], true, 0)));
|
||||||
|
if (!checkRequestSent) {
|
||||||
|
result = false;
|
||||||
|
fprintf(
|
||||||
|
stderr, "ERROR: failed to send request to check process `%s'.\n", tokens[i].toString().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("Attempted to kill and check %zu processes\n", tokens.size() - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
// hidden commands, no help text for now
|
||||||
|
CommandFactory expensiveDataCheckFactory("expensive_data_check");
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,181 @@
|
||||||
|
/*
|
||||||
|
* FileConfigureCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/FlowLineNoise.h"
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
|
#include "fdbclient/Schemas.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> fileConfigureCommandActor(Reference<IDatabase> db,
|
||||||
|
std::string filePath,
|
||||||
|
bool isNewDatabase,
|
||||||
|
bool force) {
|
||||||
|
std::string contents(readFileBytes(filePath, 100000));
|
||||||
|
json_spirit::mValue config;
|
||||||
|
if (!json_spirit::read_string(contents, config)) {
|
||||||
|
fprintf(stderr, "ERROR: Invalid JSON\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (config.type() != json_spirit::obj_type) {
|
||||||
|
fprintf(stderr, "ERROR: Configuration file must contain a JSON object\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
StatusObject configJSON = config.get_obj();
|
||||||
|
|
||||||
|
json_spirit::mValue schema;
|
||||||
|
if (!json_spirit::read_string(JSONSchemas::clusterConfigurationSchema.toString(), schema)) {
|
||||||
|
ASSERT(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string errorStr;
|
||||||
|
if (!schemaMatch(schema.get_obj(), configJSON, errorStr)) {
|
||||||
|
printf("%s", errorStr.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string configString;
|
||||||
|
if (isNewDatabase) {
|
||||||
|
configString = "new";
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& [name, value] : configJSON) {
|
||||||
|
if (!configString.empty()) {
|
||||||
|
configString += " ";
|
||||||
|
}
|
||||||
|
if (value.type() == json_spirit::int_type) {
|
||||||
|
configString += name + ":=" + format("%d", value.get_int());
|
||||||
|
} else if (value.type() == json_spirit::str_type) {
|
||||||
|
configString += value.get_str();
|
||||||
|
} else if (value.type() == json_spirit::array_type) {
|
||||||
|
configString +=
|
||||||
|
name + "=" +
|
||||||
|
json_spirit::write_string(json_spirit::mValue(value.get_array()), json_spirit::Output_options::none);
|
||||||
|
} else {
|
||||||
|
printUsage(LiteralStringRef("fileconfigure"));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ConfigurationResult result = wait(ManagementAPI::changeConfig(db, configString, force));
|
||||||
|
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
||||||
|
// there are various results specific to changeConfig() that we need to report:
|
||||||
|
bool ret = true;
|
||||||
|
switch (result) {
|
||||||
|
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
||||||
|
fprintf(stderr, "ERROR: No options provided\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::CONFLICTING_OPTIONS:
|
||||||
|
fprintf(stderr, "ERROR: Conflicting options\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::UNKNOWN_OPTION:
|
||||||
|
fprintf(stderr, "ERROR: Unknown option\n"); // This should not be possible because of schema match
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: Must specify both a replication level and a storage engine when creating a new database\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::INVALID_CONFIGURATION:
|
||||||
|
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
||||||
|
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DATABASE_CREATED:
|
||||||
|
printf("Database created\n");
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||||
|
fprintf(stderr, "ERROR: The database is unavailable\n");
|
||||||
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||||
|
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
||||||
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: When usable_regions > 1, All regions with priority >= 0 must be fully replicated "
|
||||||
|
"before changing the configuration\n");
|
||||||
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||||
|
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||||
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::REGIONS_CHANGED:
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||||
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||||
|
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
||||||
|
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||||
|
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||||
|
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::DCID_MISSING:
|
||||||
|
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
||||||
|
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
case ConfigurationResult::SUCCESS:
|
||||||
|
printf("Configuration changed\n");
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(false);
|
||||||
|
ret = false;
|
||||||
|
};
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory fileconfigureFactory(
|
||||||
|
"fileconfigure",
|
||||||
|
CommandHelp(
|
||||||
|
"fileconfigure [new] <FILENAME>",
|
||||||
|
"change the database configuration from a file",
|
||||||
|
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||||
|
"the configuration of an existing one. Load a JSON document from the provided file, and change the database "
|
||||||
|
"configuration to match the contents of the JSON document. The format should be the same as the value of the "
|
||||||
|
"\"configuration\" entry in status JSON without \"excluded_servers\" or \"coordinators_count\"."));
|
||||||
|
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,176 @@
|
||||||
|
/*
|
||||||
|
* IncludeCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Remove the given localities from the exclusion list.
|
||||||
|
// include localities by clearing the keys.
|
||||||
|
ACTOR Future<Void> includeLocalities(Reference<IDatabase> db,
|
||||||
|
std::vector<std::string> localities,
|
||||||
|
bool failed,
|
||||||
|
bool includeAll) {
|
||||||
|
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
|
||||||
|
if (includeAll) {
|
||||||
|
if (failed) {
|
||||||
|
tr->clear(fdb_cli::failedLocalitySpecialKeyRange);
|
||||||
|
} else {
|
||||||
|
tr->clear(fdb_cli::excludedLocalitySpecialKeyRange);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (const auto& l : localities) {
|
||||||
|
Key locality = failed ? fdb_cli::failedLocalitySpecialKeyRange.begin.withSuffix(l)
|
||||||
|
: fdb_cli::excludedLocalitySpecialKeyRange.begin.withSuffix(l);
|
||||||
|
tr->clear(locality);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
return Void();
|
||||||
|
} catch (Error& e) {
|
||||||
|
TraceEvent("IncludeLocalitiesError").error(e, true);
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> includeServers(Reference<IDatabase> db, std::vector<AddressExclusion> servers, bool failed) {
|
||||||
|
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
for (auto& s : servers) {
|
||||||
|
// include all, just clear the whole key range
|
||||||
|
if (!s.isValid()) {
|
||||||
|
if (failed) {
|
||||||
|
tr->clear(fdb_cli::failedServersSpecialKeyRange);
|
||||||
|
} else {
|
||||||
|
tr->clear(fdb_cli::excludedServersSpecialKeyRange);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Key addr = failed ? fdb_cli::failedServersSpecialKeyRange.begin.withSuffix(s.toString())
|
||||||
|
: fdb_cli::excludedServersSpecialKeyRange.begin.withSuffix(s.toString());
|
||||||
|
tr->clear(addr);
|
||||||
|
// Eliminate both any ip-level exclusion (1.2.3.4) and any
|
||||||
|
// port-level exclusions (1.2.3.4:5)
|
||||||
|
// The range ['IP', 'IP;'] was originally deleted. ';' is
|
||||||
|
// char(':' + 1). This does not work, as other for all
|
||||||
|
// x between 0 and 9, 'IPx' will also be in this range.
|
||||||
|
//
|
||||||
|
// This is why we now make two clears: first only of the ip
|
||||||
|
// address, the second will delete all ports.
|
||||||
|
if (s.isWholeMachine())
|
||||||
|
tr->clear(KeyRangeRef(addr.withSuffix(LiteralStringRef(":")),
|
||||||
|
addr.withSuffix(LiteralStringRef(";"))));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
return Void();
|
||||||
|
} catch (Error& e) {
|
||||||
|
TraceEvent("IncludeServersError").error(e, true);
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Includes the servers that could be IP addresses or localities back to the cluster.
|
||||||
|
ACTOR Future<bool> include(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
state std::vector<AddressExclusion> addresses;
|
||||||
|
state std::vector<std::string> localities;
|
||||||
|
state bool failed = false;
|
||||||
|
state bool all = false;
|
||||||
|
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
||||||
|
if (*t == LiteralStringRef("all")) {
|
||||||
|
all = true;
|
||||||
|
} else if (*t == LiteralStringRef("failed")) {
|
||||||
|
failed = true;
|
||||||
|
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) && t->toString().find(':') != std::string::npos) {
|
||||||
|
// if the token starts with 'locality_' prefix.
|
||||||
|
localities.push_back(t->toString());
|
||||||
|
} else {
|
||||||
|
auto a = AddressExclusion::parse(*t);
|
||||||
|
if (!a.isValid()) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: '%s' is neither a valid network endpoint address nor a locality\n",
|
||||||
|
t->toString().c_str());
|
||||||
|
if (t->toString().find(":tls") != std::string::npos)
|
||||||
|
printf(" Do not include the `:tls' suffix when naming a process\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
addresses.push_back(a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (all) {
|
||||||
|
std::vector<AddressExclusion> includeAll;
|
||||||
|
includeAll.push_back(AddressExclusion());
|
||||||
|
wait(includeServers(db, includeAll, failed));
|
||||||
|
wait(includeLocalities(db, localities, failed, all));
|
||||||
|
} else {
|
||||||
|
if (!addresses.empty()) {
|
||||||
|
wait(includeServers(db, addresses, failed));
|
||||||
|
}
|
||||||
|
if (!localities.empty()) {
|
||||||
|
// include the servers that belong to given localities.
|
||||||
|
wait(includeLocalities(db, localities, failed, all));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> includeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
if (tokens.size() < 2) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
bool result = wait(include(db, tokens));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory includeFactory(
|
||||||
|
"include",
|
||||||
|
CommandHelp(
|
||||||
|
"include all|[<ADDRESS...>] [locality_dcid:<excludedcid>] [locality_zoneid:<excludezoneid>] "
|
||||||
|
"[locality_machineid:<excludemachineid>] [locality_processid:<excludeprocessid>] or any locality data",
|
||||||
|
"permit previously-excluded servers and localities to rejoin the database",
|
||||||
|
"If `all' is specified, the excluded servers and localities list is cleared.\n\nFor each IP address or IP:port "
|
||||||
|
"pair in <ADDRESS...> or any LocalityData (like dcid, zoneid, machineid, processid), removes any "
|
||||||
|
"matching exclusions from the excluded servers and localities list. "
|
||||||
|
"(A specified IP will match all IP:* exclusion entries)"));
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,107 @@
|
||||||
|
/*
|
||||||
|
* KillCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
|
||||||
|
Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||||
|
ASSERT(tokens.size() >= 1);
|
||||||
|
state bool result = true;
|
||||||
|
if (tokens.size() == 1) {
|
||||||
|
// initialize worker interfaces
|
||||||
|
wait(getWorkerInterfaces(tr, address_interface));
|
||||||
|
}
|
||||||
|
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
||||||
|
if (address_interface->size() == 0) {
|
||||||
|
printf("\nNo addresses can be killed.\n");
|
||||||
|
} else if (address_interface->size() == 1) {
|
||||||
|
printf("\nThe following address can be killed:\n");
|
||||||
|
} else {
|
||||||
|
printf("\nThe following %zu addresses can be killed:\n", address_interface->size());
|
||||||
|
}
|
||||||
|
for (auto it : *address_interface) {
|
||||||
|
printf("%s\n", printable(it.first).c_str());
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
} else if (tokencmp(tokens[1], "all")) {
|
||||||
|
state std::map<Key, std::pair<Value, ClientLeaderRegInterface>>::const_iterator it;
|
||||||
|
for (it = address_interface->cbegin(); it != address_interface->cend(); it++) {
|
||||||
|
int64_t killRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(it->first, false, 0)));
|
||||||
|
if (!killRequestSent) {
|
||||||
|
result = false;
|
||||||
|
fprintf(stderr, "ERROR: failed to send request to kill process `%s'.\n", it->first.toString().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (address_interface->size() == 0) {
|
||||||
|
result = false;
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: no processes to kill. You must run the `kill’ command before "
|
||||||
|
"running `kill all’.\n");
|
||||||
|
} else {
|
||||||
|
printf("Attempted to kill %zu processes\n", address_interface->size());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
state int i;
|
||||||
|
for (i = 1; i < tokens.size(); i++) {
|
||||||
|
if (!address_interface->count(tokens[i])) {
|
||||||
|
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
||||||
|
result = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
for (i = 1; i < tokens.size(); i++) {
|
||||||
|
int64_t killRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], false, 0)));
|
||||||
|
if (!killRequestSent) {
|
||||||
|
result = false;
|
||||||
|
fprintf(
|
||||||
|
stderr, "ERROR: failed to send request to kill process `%s'.\n", tokens[i].toString().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("Attempted to kill %zu processes\n", tokens.size() - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory killFactory(
|
||||||
|
"kill",
|
||||||
|
CommandHelp(
|
||||||
|
"kill all|list|<ADDRESS...>",
|
||||||
|
"attempts to kill one or more processes in the cluster",
|
||||||
|
"If no addresses are specified, populates the list of processes which can be killed. Processes cannot be "
|
||||||
|
"killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known "
|
||||||
|
"processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is "
|
||||||
|
"unresponsive.\n\nFor each IP:port pair in <ADDRESS ...>, attempt to kill the specified process."));
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,120 @@
|
||||||
|
/*
|
||||||
|
* LockCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
#include "fdbclient/Schemas.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
ACTOR Future<bool> lockDatabase(Reference<IDatabase> db, UID id) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
tr->set(fdb_cli::lockSpecialKey, id.toString());
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
printf("Database locked.\n");
|
||||||
|
return true;
|
||||||
|
} catch (Error& e) {
|
||||||
|
state Error err(e);
|
||||||
|
if (e.code() == error_code_database_locked)
|
||||||
|
throw e;
|
||||||
|
else if (e.code() == error_code_special_keys_api_failure) {
|
||||||
|
std::string errorMsgStr = wait(fdb_cli::getSpecialKeysFailureErrorMessage(tr));
|
||||||
|
fprintf(stderr, "%s\n", errorMsgStr.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(err)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
const KeyRef lockSpecialKey = LiteralStringRef("\xff\xff/management/db_locked");
|
||||||
|
|
||||||
|
ACTOR Future<bool> lockCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
if (tokens.size() != 1) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
state UID lockUID = deterministicRandom()->randomUniqueID();
|
||||||
|
printf("Locking database with lockUID: %s\n", lockUID.toString().c_str());
|
||||||
|
bool result = wait((lockDatabase(db, lockUID)));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<bool> unlockDatabaseActor(Reference<IDatabase> db, UID uid) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
state ThreadFuture<Optional<Value>> valF = tr->get(fdb_cli::lockSpecialKey);
|
||||||
|
Optional<Value> val = wait(safeThreadFutureToFuture(valF));
|
||||||
|
|
||||||
|
if (!val.present())
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (val.present() && UID::fromString(val.get().toString()) != uid) {
|
||||||
|
printf("Unable to unlock database. Make sure to unlock with the correct lock UID.\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
tr->clear(fdb_cli::lockSpecialKey);
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
printf("Database unlocked.\n");
|
||||||
|
return true;
|
||||||
|
} catch (Error& e) {
|
||||||
|
state Error err(e);
|
||||||
|
if (e.code() == error_code_special_keys_api_failure) {
|
||||||
|
std::string errorMsgStr = wait(fdb_cli::getSpecialKeysFailureErrorMessage(tr));
|
||||||
|
fprintf(stderr, "%s\n", errorMsgStr.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(err)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory lockFactory(
|
||||||
|
"lock",
|
||||||
|
CommandHelp("lock",
|
||||||
|
"lock the database with a randomly generated lockUID",
|
||||||
|
"Randomly generates a lockUID, prints this lockUID, and then uses the lockUID to lock the database."));
|
||||||
|
|
||||||
|
CommandFactory unlockFactory(
|
||||||
|
"unlock",
|
||||||
|
CommandHelp("unlock <UID>",
|
||||||
|
"unlock the database with the provided lockUID",
|
||||||
|
"Unlocks the database with the provided lockUID. This is a potentially dangerous operation, so the "
|
||||||
|
"user will be asked to enter a passphrase to confirm their intent."));
|
||||||
|
} // namespace fdb_cli
|
|
@ -64,43 +64,17 @@ ACTOR Future<Void> printHealthyZone(Reference<IDatabase> db) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// clear ongoing maintenance, let clearSSFailureZoneString = true to enable data distribution for storage
|
} // namespace
|
||||||
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db,
|
|
||||||
bool printWarning = false,
|
|
||||||
bool clearSSFailureZoneString = false) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
|
||||||
TraceEvent("ClearHealthyZone").detail("ClearSSFailureZoneString", clearSSFailureZoneString);
|
|
||||||
loop {
|
|
||||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
|
||||||
try {
|
|
||||||
// hold the returned standalone object's memory
|
|
||||||
state ThreadFuture<RangeResult> resultFuture =
|
|
||||||
tr->getRange(fdb_cli::maintenanceSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
|
||||||
RangeResult res = wait(safeThreadFutureToFuture(resultFuture));
|
|
||||||
ASSERT(res.size() <= 1);
|
|
||||||
if (!clearSSFailureZoneString && res.size() == 1 && res[0].key == fdb_cli::ignoreSSFailureSpecialKey) {
|
|
||||||
if (printWarning) {
|
|
||||||
fprintf(stderr,
|
|
||||||
"ERROR: Maintenance mode cannot be used while data distribution is disabled for storage "
|
|
||||||
"server failures. Use 'datadistribution on' to reenable data distribution.\n");
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
tr->clear(fdb_cli::maintenanceSpecialKeyRange);
|
namespace fdb_cli {
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
|
||||||
return true;
|
const KeyRangeRef maintenanceSpecialKeyRange = KeyRangeRef(LiteralStringRef("\xff\xff/management/maintenance/"),
|
||||||
} catch (Error& e) {
|
LiteralStringRef("\xff\xff/management/maintenance0"));
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
// The special key, if present, means data distribution is disabled for storage failures;
|
||||||
}
|
const KeyRef ignoreSSFailureSpecialKey = LiteralStringRef("\xff\xff/management/maintenance/IgnoreSSFailures");
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add a zone to maintenance and specify the maintenance duration
|
// add a zone to maintenance and specify the maintenance duration
|
||||||
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db,
|
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning) {
|
||||||
StringRef zoneId,
|
|
||||||
double seconds,
|
|
||||||
bool printWarning = false) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
TraceEvent("SetHealthyZone").detail("Zone", zoneId).detail("DurationSeconds", seconds);
|
TraceEvent("SetHealthyZone").detail("Zone", zoneId).detail("DurationSeconds", seconds);
|
||||||
loop {
|
loop {
|
||||||
|
@ -129,14 +103,35 @@ ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
// clear ongoing maintenance, let clearSSFailureZoneString = true to enable data distribution for storage
|
||||||
|
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db, bool printWarning, bool clearSSFailureZoneString) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
TraceEvent("ClearHealthyZone").detail("ClearSSFailureZoneString", clearSSFailureZoneString);
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
try {
|
||||||
|
// hold the returned standalone object's memory
|
||||||
|
state ThreadFuture<RangeResult> resultFuture =
|
||||||
|
tr->getRange(fdb_cli::maintenanceSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
RangeResult res = wait(safeThreadFutureToFuture(resultFuture));
|
||||||
|
ASSERT(res.size() <= 1);
|
||||||
|
if (!clearSSFailureZoneString && res.size() == 1 && res[0].key == fdb_cli::ignoreSSFailureSpecialKey) {
|
||||||
|
if (printWarning) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: Maintenance mode cannot be used while data distribution is disabled for storage "
|
||||||
|
"server failures. Use 'datadistribution on' to reenable data distribution.\n");
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
namespace fdb_cli {
|
tr->clear(fdb_cli::maintenanceSpecialKeyRange);
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
const KeyRangeRef maintenanceSpecialKeyRange = KeyRangeRef(LiteralStringRef("\xff\xff/management/maintenance/"),
|
return true;
|
||||||
LiteralStringRef("\xff\xff/management/maintenance0"));
|
} catch (Error& e) {
|
||||||
// The special key, if present, means data distribution is disabled for storage failures;
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
const KeyRef ignoreSSFailureSpecialKey = LiteralStringRef("\xff\xff/management/maintenance/IgnoreSSFailures");
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
state bool result = true;
|
state bool result = true;
|
||||||
|
|
|
@ -0,0 +1,140 @@
|
||||||
|
/*
|
||||||
|
* ProfileCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "boost/lexical_cast.hpp"
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/GlobalConfig.actor.h"
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
#include "fdbclient/Tuple.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens, bool intrans) {
|
||||||
|
state bool result = true;
|
||||||
|
if (tokens.size() == 1) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
result = false;
|
||||||
|
} else if (tokencmp(tokens[1], "client")) {
|
||||||
|
if (tokens.size() == 2) {
|
||||||
|
fprintf(stderr, "ERROR: Usage: profile client <get|set>\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
wait(GlobalConfig::globalConfig().onInitialized());
|
||||||
|
if (tokencmp(tokens[2], "get")) {
|
||||||
|
if (tokens.size() != 3) {
|
||||||
|
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
std::string sampleRateStr = "default";
|
||||||
|
std::string sizeLimitStr = "default";
|
||||||
|
const double sampleRateDbl = GlobalConfig::globalConfig().get<double>(
|
||||||
|
fdbClientInfoTxnSampleRate, std::numeric_limits<double>::infinity());
|
||||||
|
if (!std::isinf(sampleRateDbl)) {
|
||||||
|
sampleRateStr = std::to_string(sampleRateDbl);
|
||||||
|
}
|
||||||
|
const int64_t sizeLimit = GlobalConfig::globalConfig().get<int64_t>(fdbClientInfoTxnSizeLimit, -1);
|
||||||
|
if (sizeLimit != -1) {
|
||||||
|
sizeLimitStr = boost::lexical_cast<std::string>(sizeLimit);
|
||||||
|
}
|
||||||
|
printf("Client profiling rate is set to %s and size limit is set to %s.\n",
|
||||||
|
sampleRateStr.c_str(),
|
||||||
|
sizeLimitStr.c_str());
|
||||||
|
} else if (tokencmp(tokens[2], "set")) {
|
||||||
|
if (tokens.size() != 5) {
|
||||||
|
fprintf(stderr, "ERROR: Usage: profile client set <RATE|default> <SIZE|default>\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
double sampleRate;
|
||||||
|
if (tokencmp(tokens[3], "default")) {
|
||||||
|
sampleRate = std::numeric_limits<double>::infinity();
|
||||||
|
} else {
|
||||||
|
char* end;
|
||||||
|
sampleRate = std::strtod((const char*)tokens[3].begin(), &end);
|
||||||
|
if (!std::isspace(*end)) {
|
||||||
|
fprintf(stderr, "ERROR: %s failed to parse.\n", printable(tokens[3]).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
int64_t sizeLimit;
|
||||||
|
if (tokencmp(tokens[4], "default")) {
|
||||||
|
sizeLimit = -1;
|
||||||
|
} else {
|
||||||
|
Optional<uint64_t> parsed = parse_with_suffix(tokens[4].toString());
|
||||||
|
if (parsed.present()) {
|
||||||
|
sizeLimit = parsed.get();
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "ERROR: `%s` failed to parse.\n", printable(tokens[4]).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Tuple rate = Tuple().appendDouble(sampleRate);
|
||||||
|
Tuple size = Tuple().append(sizeLimit);
|
||||||
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
|
tr->set(GlobalConfig::prefixedKey(fdbClientInfoTxnSampleRate), rate.pack());
|
||||||
|
tr->set(GlobalConfig::prefixedKey(fdbClientInfoTxnSizeLimit), size.pack());
|
||||||
|
if (!intrans) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "ERROR: Unknown action: %s\n", printable(tokens[2]).c_str());
|
||||||
|
result = false;
|
||||||
|
}
|
||||||
|
} else if (tokencmp(tokens[1], "list")) {
|
||||||
|
if (tokens.size() != 2) {
|
||||||
|
fprintf(stderr, "ERROR: Usage: profile list\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Hold the reference to the standalone's memory
|
||||||
|
state ThreadFuture<RangeResult> kvsFuture =
|
||||||
|
tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
||||||
|
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||||
|
CLIENT_KNOBS->TOO_MANY);
|
||||||
|
RangeResult kvs = wait(safeThreadFutureToFuture(kvsFuture));
|
||||||
|
ASSERT(!kvs.more);
|
||||||
|
for (const auto& pair : kvs) {
|
||||||
|
auto ip_port =
|
||||||
|
(pair.key.endsWith(LiteralStringRef(":tls")) ? pair.key.removeSuffix(LiteralStringRef(":tls"))
|
||||||
|
: pair.key)
|
||||||
|
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
||||||
|
printf("%s\n", printable(ip_port).c_str());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "ERROR: Unknown type: %s\n", printable(tokens[1]).c_str());
|
||||||
|
result = false;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory profileFactory("profile",
|
||||||
|
CommandHelp("profile <client|list> <action> <ARGS>",
|
||||||
|
"namespace for all the profiling-related commands.",
|
||||||
|
"Different types support different actions. Run `profile` to get a list of "
|
||||||
|
"types, and iteratively explore the help.\n"));
|
||||||
|
} // namespace fdb_cli
|
|
@ -71,15 +71,22 @@ ACTOR Future<Void> printProcessClass(Reference<IDatabase> db) {
|
||||||
};
|
};
|
||||||
|
|
||||||
ACTOR Future<bool> setProcessClass(Reference<IDatabase> db, KeyRef network_address, KeyRef class_type) {
|
ACTOR Future<bool> setProcessClass(Reference<IDatabase> db, KeyRef network_address, KeyRef class_type) {
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
loop {
|
loop {
|
||||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||||
try {
|
try {
|
||||||
tr->set(network_address.withPrefix(fdb_cli::processClassTypeSpecialKeyRange.begin), class_type);
|
tr->set(network_address.withPrefix(fdb_cli::processClassTypeSpecialKeyRange.begin), class_type);
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
return true;
|
return true;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
state Error err(e);
|
||||||
|
if (e.code() == error_code_special_keys_api_failure) {
|
||||||
|
std::string errorMsgStr = wait(fdb_cli::getSpecialKeysFailureErrorMessage(tr));
|
||||||
|
// error message already has \n at the end
|
||||||
|
fprintf(stderr, "%s", errorMsgStr.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(err)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,8 +110,8 @@ ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<Str
|
||||||
} else if (tokens.size() == 1) {
|
} else if (tokens.size() == 1) {
|
||||||
wait(printProcessClass(db));
|
wait(printProcessClass(db));
|
||||||
} else {
|
} else {
|
||||||
bool successful = wait(setProcessClass(db, tokens[1], tokens[2]));
|
bool successful = wait(setProcessClass(db, tokens[1], tokens[2]));
|
||||||
return successful;
|
return successful;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,5 +60,5 @@ ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<Str
|
||||||
}
|
}
|
||||||
|
|
||||||
// hidden commands, no help text for now
|
// hidden commands, no help text for now
|
||||||
CommandFactory dataDistributionFactory("snapshot");
|
CommandFactory snapshotFactory("snapshot");
|
||||||
} // namespace fdb_cli
|
} // namespace fdb_cli
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,102 @@
|
||||||
|
/*
|
||||||
|
* SuspendCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/Knobs.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
|
||||||
|
Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||||
|
ASSERT(tokens.size() >= 1);
|
||||||
|
state bool result = true;
|
||||||
|
if (tokens.size() == 1) {
|
||||||
|
// initialize worker interfaces
|
||||||
|
wait(getWorkerInterfaces(tr, address_interface));
|
||||||
|
if (address_interface->size() == 0) {
|
||||||
|
printf("\nNo addresses can be suspended.\n");
|
||||||
|
} else if (address_interface->size() == 1) {
|
||||||
|
printf("\nThe following address can be suspended:\n");
|
||||||
|
} else {
|
||||||
|
printf("\nThe following %zu addresses can be suspended:\n", address_interface->size());
|
||||||
|
}
|
||||||
|
for (auto it : *address_interface) {
|
||||||
|
printf("%s\n", printable(it.first).c_str());
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
} else if (tokens.size() == 2) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
result = false;
|
||||||
|
} else {
|
||||||
|
for (int i = 2; i < tokens.size(); i++) {
|
||||||
|
if (!address_interface->count(tokens[i])) {
|
||||||
|
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
||||||
|
result = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
state double seconds;
|
||||||
|
int n = 0;
|
||||||
|
state int i;
|
||||||
|
auto secondsStr = tokens[1].toString();
|
||||||
|
if (sscanf(secondsStr.c_str(), "%lf%n", &seconds, &n) != 1 || n != secondsStr.size()) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
result = false;
|
||||||
|
} else {
|
||||||
|
int64_t timeout_ms = seconds * 1000;
|
||||||
|
tr->setOption(FDBTransactionOptions::TIMEOUT, StringRef((uint8_t*)&timeout_ms, sizeof(int64_t)));
|
||||||
|
for (i = 2; i < tokens.size(); i++) {
|
||||||
|
int64_t suspendRequestSent =
|
||||||
|
wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], false, static_cast<int>(seconds))));
|
||||||
|
if (!suspendRequestSent) {
|
||||||
|
result = false;
|
||||||
|
fprintf(stderr,
|
||||||
|
"ERROR: failed to send request to suspend process `%s'.\n",
|
||||||
|
tokens[i].toString().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("Attempted to suspend %zu processes\n", tokens.size() - 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory suspendFactory(
|
||||||
|
"suspend",
|
||||||
|
CommandHelp(
|
||||||
|
"suspend <SECONDS> <ADDRESS...>",
|
||||||
|
"attempts to suspend one or more processes in the cluster",
|
||||||
|
"If no parameters are specified, populates the list of processes which can be suspended. Processes cannot be "
|
||||||
|
"suspended before this list has been populated.\n\nFor each IP:port pair in <ADDRESS...>, attempt to suspend "
|
||||||
|
"the processes for the specified SECONDS after which the process will die."));
|
||||||
|
} // namespace fdb_cli
|
|
@ -21,345 +21,16 @@
|
||||||
#include "fdbcli/fdbcli.actor.h"
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
#include "fdbclient/IClientApi.h"
|
#include "fdbclient/IClientApi.h"
|
||||||
#include "fdbclient/TagThrottle.h"
|
#include "fdbclient/TagThrottle.actor.h"
|
||||||
#include "fdbclient/Knobs.h"
|
#include "fdbclient/Knobs.h"
|
||||||
#include "fdbclient/SystemData.h"
|
#include "fdbclient/SystemData.h"
|
||||||
#include "fdbclient/CommitTransaction.h"
|
#include "fdbclient/CommitTransaction.h"
|
||||||
|
|
||||||
#include "flow/Arena.h"
|
#include "flow/Arena.h"
|
||||||
#include "flow/FastRef.h"
|
|
||||||
#include "flow/ThreadHelper.actor.h"
|
#include "flow/ThreadHelper.actor.h"
|
||||||
#include "flow/genericactors.actor.h"
|
#include "flow/genericactors.actor.h"
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
// Helper functions copied from TagThrottle.actor.cpp
|
|
||||||
// The only difference is transactions are changed to go through MultiversionTransaction,
|
|
||||||
// instead of the native Transaction(i.e., RYWTransaction)
|
|
||||||
|
|
||||||
ACTOR Future<bool> getValidAutoEnabled(Reference<ITransaction> tr) {
|
|
||||||
state bool result;
|
|
||||||
loop {
|
|
||||||
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
|
|
||||||
if (!value.present()) {
|
|
||||||
tr->reset();
|
|
||||||
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
|
|
||||||
continue;
|
|
||||||
} else if (value.get() == LiteralStringRef("1")) {
|
|
||||||
result = true;
|
|
||||||
} else if (value.get() == LiteralStringRef("0")) {
|
|
||||||
result = false;
|
|
||||||
} else {
|
|
||||||
TraceEvent(SevWarnAlways, "InvalidAutoTagThrottlingValue").detail("Value", value.get());
|
|
||||||
tr->reset();
|
|
||||||
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<std::vector<TagThrottleInfo>> getThrottledTags(Reference<IDatabase> db,
|
|
||||||
int limit,
|
|
||||||
bool containsRecommend = false) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
|
||||||
state bool reportAuto = containsRecommend;
|
|
||||||
loop {
|
|
||||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
|
||||||
try {
|
|
||||||
if (!containsRecommend) {
|
|
||||||
wait(store(reportAuto, getValidAutoEnabled(tr)));
|
|
||||||
}
|
|
||||||
state ThreadFuture<RangeResult> f = tr->getRange(
|
|
||||||
reportAuto ? tagThrottleKeys : KeyRangeRef(tagThrottleKeysPrefix, tagThrottleAutoKeysPrefix), limit);
|
|
||||||
RangeResult throttles = wait(safeThreadFutureToFuture(f));
|
|
||||||
std::vector<TagThrottleInfo> results;
|
|
||||||
for (auto throttle : throttles) {
|
|
||||||
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
|
|
||||||
TagThrottleValue::fromValue(throttle.value)));
|
|
||||||
}
|
|
||||||
return results;
|
|
||||||
} catch (Error& e) {
|
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<std::vector<TagThrottleInfo>> getRecommendedTags(Reference<IDatabase> db, int limit) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
|
||||||
loop {
|
|
||||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
|
||||||
try {
|
|
||||||
bool enableAuto = wait(getValidAutoEnabled(tr));
|
|
||||||
if (enableAuto) {
|
|
||||||
return std::vector<TagThrottleInfo>();
|
|
||||||
}
|
|
||||||
state ThreadFuture<RangeResult> f =
|
|
||||||
tr->getRange(KeyRangeRef(tagThrottleAutoKeysPrefix, tagThrottleKeys.end), limit);
|
|
||||||
RangeResult throttles = wait(safeThreadFutureToFuture(f));
|
|
||||||
std::vector<TagThrottleInfo> results;
|
|
||||||
for (auto throttle : throttles) {
|
|
||||||
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
|
|
||||||
TagThrottleValue::fromValue(throttle.value)));
|
|
||||||
}
|
|
||||||
return results;
|
|
||||||
} catch (Error& e) {
|
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<Void> updateThrottleCount(Reference<ITransaction> tr, int64_t delta) {
|
|
||||||
state ThreadFuture<Optional<Value>> countVal = tr->get(tagThrottleCountKey);
|
|
||||||
state ThreadFuture<Optional<Value>> limitVal = tr->get(tagThrottleLimitKey);
|
|
||||||
|
|
||||||
wait(success(safeThreadFutureToFuture(countVal)) && success(safeThreadFutureToFuture(limitVal)));
|
|
||||||
|
|
||||||
int64_t count = 0;
|
|
||||||
int64_t limit = 0;
|
|
||||||
|
|
||||||
if (countVal.get().present()) {
|
|
||||||
BinaryReader reader(countVal.get().get(), Unversioned());
|
|
||||||
reader >> count;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (limitVal.get().present()) {
|
|
||||||
BinaryReader reader(limitVal.get().get(), Unversioned());
|
|
||||||
reader >> limit;
|
|
||||||
}
|
|
||||||
|
|
||||||
count += delta;
|
|
||||||
|
|
||||||
if (count > limit) {
|
|
||||||
throw too_many_tag_throttles();
|
|
||||||
}
|
|
||||||
|
|
||||||
BinaryWriter writer(Unversioned());
|
|
||||||
writer << count;
|
|
||||||
|
|
||||||
tr->set(tagThrottleCountKey, writer.toValue());
|
|
||||||
return Void();
|
|
||||||
}
|
|
||||||
|
|
||||||
void signalThrottleChange(Reference<ITransaction> tr) {
|
|
||||||
tr->atomicOp(
|
|
||||||
tagThrottleSignalKey, LiteralStringRef("XXXXXXXXXX\x00\x00\x00\x00"), MutationRef::SetVersionstampedValue);
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<Void> throttleTags(Reference<IDatabase> db,
|
|
||||||
TagSet tags,
|
|
||||||
double tpsRate,
|
|
||||||
double initialDuration,
|
|
||||||
TagThrottleType throttleType,
|
|
||||||
TransactionPriority priority,
|
|
||||||
Optional<double> expirationTime = Optional<double>(),
|
|
||||||
Optional<TagThrottledReason> reason = Optional<TagThrottledReason>()) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
|
||||||
state Key key = TagThrottleKey(tags, throttleType, priority).toKey();
|
|
||||||
|
|
||||||
ASSERT(initialDuration > 0);
|
|
||||||
|
|
||||||
if (throttleType == TagThrottleType::MANUAL) {
|
|
||||||
reason = TagThrottledReason::MANUAL;
|
|
||||||
}
|
|
||||||
TagThrottleValue throttle(tpsRate,
|
|
||||||
expirationTime.present() ? expirationTime.get() : 0,
|
|
||||||
initialDuration,
|
|
||||||
reason.present() ? reason.get() : TagThrottledReason::UNSET);
|
|
||||||
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
|
|
||||||
wr << throttle;
|
|
||||||
state Value value = wr.toValue();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
||||||
try {
|
|
||||||
if (throttleType == TagThrottleType::MANUAL) {
|
|
||||||
Optional<Value> oldThrottle = wait(safeThreadFutureToFuture(tr->get(key)));
|
|
||||||
if (!oldThrottle.present()) {
|
|
||||||
wait(updateThrottleCount(tr, 1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tr->set(key, value);
|
|
||||||
|
|
||||||
if (throttleType == TagThrottleType::MANUAL) {
|
|
||||||
signalThrottleChange(tr);
|
|
||||||
}
|
|
||||||
|
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
|
||||||
return Void();
|
|
||||||
} catch (Error& e) {
|
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<bool> unthrottleTags(Reference<IDatabase> db,
|
|
||||||
TagSet tags,
|
|
||||||
Optional<TagThrottleType> throttleType,
|
|
||||||
Optional<TransactionPriority> priority) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
|
||||||
|
|
||||||
state std::vector<Key> keys;
|
|
||||||
for (auto p : allTransactionPriorities) {
|
|
||||||
if (!priority.present() || priority.get() == p) {
|
|
||||||
if (!throttleType.present() || throttleType.get() == TagThrottleType::AUTO) {
|
|
||||||
keys.push_back(TagThrottleKey(tags, TagThrottleType::AUTO, p).toKey());
|
|
||||||
}
|
|
||||||
if (!throttleType.present() || throttleType.get() == TagThrottleType::MANUAL) {
|
|
||||||
keys.push_back(TagThrottleKey(tags, TagThrottleType::MANUAL, p).toKey());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
state bool removed = false;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
||||||
try {
|
|
||||||
state std::vector<Future<Optional<Value>>> values;
|
|
||||||
values.reserve(keys.size());
|
|
||||||
for (auto key : keys) {
|
|
||||||
values.push_back(safeThreadFutureToFuture(tr->get(key)));
|
|
||||||
}
|
|
||||||
|
|
||||||
wait(waitForAll(values));
|
|
||||||
|
|
||||||
int delta = 0;
|
|
||||||
for (int i = 0; i < values.size(); ++i) {
|
|
||||||
if (values[i].get().present()) {
|
|
||||||
if (TagThrottleKey::fromKey(keys[i]).throttleType == TagThrottleType::MANUAL) {
|
|
||||||
delta -= 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
tr->clear(keys[i]);
|
|
||||||
|
|
||||||
// Report that we are removing this tag if we ever see it present.
|
|
||||||
// This protects us from getting confused if the transaction is maybe committed.
|
|
||||||
// It's ok if someone else actually ends up removing this tag at the same time
|
|
||||||
// and we aren't the ones to actually do it.
|
|
||||||
removed = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (delta != 0) {
|
|
||||||
wait(updateThrottleCount(tr, delta));
|
|
||||||
}
|
|
||||||
if (removed) {
|
|
||||||
signalThrottleChange(tr);
|
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
|
||||||
}
|
|
||||||
|
|
||||||
return removed;
|
|
||||||
} catch (Error& e) {
|
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<Void> enableAuto(Reference<IDatabase> db, bool enabled) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
||||||
try {
|
|
||||||
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
|
|
||||||
if (!value.present() || (enabled && value.get() != LiteralStringRef("1")) ||
|
|
||||||
(!enabled && value.get() != LiteralStringRef("0"))) {
|
|
||||||
tr->set(tagThrottleAutoEnabledKey, LiteralStringRef(enabled ? "1" : "0"));
|
|
||||||
signalThrottleChange(tr);
|
|
||||||
|
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
|
||||||
}
|
|
||||||
return Void();
|
|
||||||
} catch (Error& e) {
|
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<bool> unthrottleMatchingThrottles(Reference<IDatabase> db,
|
|
||||||
KeyRef beginKey,
|
|
||||||
KeyRef endKey,
|
|
||||||
Optional<TransactionPriority> priority,
|
|
||||||
bool onlyExpiredThrottles) {
|
|
||||||
state Reference<ITransaction> tr = db->createTransaction();
|
|
||||||
|
|
||||||
state KeySelector begin = firstGreaterOrEqual(beginKey);
|
|
||||||
state KeySelector end = firstGreaterOrEqual(endKey);
|
|
||||||
|
|
||||||
state bool removed = false;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
||||||
try {
|
|
||||||
// holds memory of the RangeResult
|
|
||||||
state ThreadFuture<RangeResult> f = tr->getRange(begin, end, 1000);
|
|
||||||
state RangeResult tags = wait(safeThreadFutureToFuture(f));
|
|
||||||
state uint64_t unthrottledTags = 0;
|
|
||||||
uint64_t manualUnthrottledTags = 0;
|
|
||||||
for (auto tag : tags) {
|
|
||||||
if (onlyExpiredThrottles) {
|
|
||||||
double expirationTime = TagThrottleValue::fromValue(tag.value).expirationTime;
|
|
||||||
if (expirationTime == 0 || expirationTime > now()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TagThrottleKey key = TagThrottleKey::fromKey(tag.key);
|
|
||||||
if (priority.present() && key.priority != priority.get()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (key.throttleType == TagThrottleType::MANUAL) {
|
|
||||||
++manualUnthrottledTags;
|
|
||||||
}
|
|
||||||
|
|
||||||
removed = true;
|
|
||||||
tr->clear(tag.key);
|
|
||||||
unthrottledTags++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (manualUnthrottledTags > 0) {
|
|
||||||
wait(updateThrottleCount(tr, -manualUnthrottledTags));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unthrottledTags > 0) {
|
|
||||||
signalThrottleChange(tr);
|
|
||||||
}
|
|
||||||
|
|
||||||
wait(safeThreadFutureToFuture(tr->commit()));
|
|
||||||
|
|
||||||
if (!tags.more) {
|
|
||||||
return removed;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(tags.size() > 0);
|
|
||||||
begin = KeySelector(firstGreaterThan(tags[tags.size() - 1].key), tags.arena());
|
|
||||||
} catch (Error& e) {
|
|
||||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<bool> unthrottleAll(Reference<IDatabase> db,
|
|
||||||
Optional<TagThrottleType> tagThrottleType,
|
|
||||||
Optional<TransactionPriority> priority) {
|
|
||||||
KeyRef begin = tagThrottleKeys.begin;
|
|
||||||
KeyRef end = tagThrottleKeys.end;
|
|
||||||
|
|
||||||
if (tagThrottleType.present() && tagThrottleType == TagThrottleType::AUTO) {
|
|
||||||
begin = tagThrottleAutoKeysPrefix;
|
|
||||||
} else if (tagThrottleType.present() && tagThrottleType == TagThrottleType::MANUAL) {
|
|
||||||
end = tagThrottleAutoKeysPrefix;
|
|
||||||
}
|
|
||||||
|
|
||||||
return unthrottleMatchingThrottles(db, begin, end, priority, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
namespace fdb_cli {
|
namespace fdb_cli {
|
||||||
|
|
||||||
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
@ -403,11 +74,11 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
||||||
|
|
||||||
state std::vector<TagThrottleInfo> tags;
|
state std::vector<TagThrottleInfo> tags;
|
||||||
if (reportThrottled && reportRecommended) {
|
if (reportThrottled && reportRecommended) {
|
||||||
wait(store(tags, getThrottledTags(db, throttleListLimit, true)));
|
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit, true)));
|
||||||
} else if (reportThrottled) {
|
} else if (reportThrottled) {
|
||||||
wait(store(tags, getThrottledTags(db, throttleListLimit)));
|
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit)));
|
||||||
} else if (reportRecommended) {
|
} else if (reportRecommended) {
|
||||||
wait(store(tags, getRecommendedTags(db, throttleListLimit)));
|
wait(store(tags, ThrottleApi::getRecommendedTags(db, throttleListLimit)));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool anyLogged = false;
|
bool anyLogged = false;
|
||||||
|
@ -509,7 +180,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
||||||
TagSet tags;
|
TagSet tags;
|
||||||
tags.addTag(tokens[3]);
|
tags.addTag(tokens[3]);
|
||||||
|
|
||||||
wait(throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
|
wait(ThrottleApi::throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
|
||||||
printf("Tag `%s' has been throttled\n", tokens[3].toString().c_str());
|
printf("Tag `%s' has been throttled\n", tokens[3].toString().c_str());
|
||||||
} else if (tokencmp(tokens[1], "off")) {
|
} else if (tokencmp(tokens[1], "off")) {
|
||||||
int nextIndex = 2;
|
int nextIndex = 2;
|
||||||
|
@ -586,7 +257,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
||||||
priority.present() ? format(" at %s priority", transactionPriorityToString(priority.get(), false)) : "";
|
priority.present() ? format(" at %s priority", transactionPriorityToString(priority.get(), false)) : "";
|
||||||
|
|
||||||
if (tags.size() > 0) {
|
if (tags.size() > 0) {
|
||||||
bool success = wait(unthrottleTags(db, tags, throttleType, priority));
|
bool success = wait(ThrottleApi::unthrottleTags(db, tags, throttleType, priority));
|
||||||
if (success) {
|
if (success) {
|
||||||
printf("Unthrottled tag `%s'%s\n", tokens[3].toString().c_str(), priorityString.c_str());
|
printf("Unthrottled tag `%s'%s\n", tokens[3].toString().c_str(), priorityString.c_str());
|
||||||
} else {
|
} else {
|
||||||
|
@ -596,7 +267,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
||||||
priorityString.c_str());
|
priorityString.c_str());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bool unthrottled = wait(unthrottleAll(db, throttleType, priority));
|
bool unthrottled = wait(ThrottleApi::unthrottleAll(db, throttleType, priority));
|
||||||
if (unthrottled) {
|
if (unthrottled) {
|
||||||
printf("Unthrottled all %sthrottled tags%s\n", throttleTypeString, priorityString.c_str());
|
printf("Unthrottled all %sthrottled tags%s\n", throttleTypeString, priorityString.c_str());
|
||||||
} else {
|
} else {
|
||||||
|
@ -626,7 +297,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
state bool autoTagThrottlingEnabled = tokencmp(tokens[1], "enable");
|
state bool autoTagThrottlingEnabled = tokencmp(tokens[1], "enable");
|
||||||
wait(enableAuto(db, autoTagThrottlingEnabled));
|
wait(ThrottleApi::enableAuto(db, autoTagThrottlingEnabled));
|
||||||
printf("Automatic tag throttling has been %s\n", autoTagThrottlingEnabled ? "enabled" : "disabled");
|
printf("Automatic tag throttling has been %s\n", autoTagThrottlingEnabled ? "enabled" : "disabled");
|
||||||
} else {
|
} else {
|
||||||
printUsage(tokens[0]);
|
printUsage(tokens[0]);
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
/*
|
||||||
|
* TriggerDDTeamInfoLogCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/SystemData.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> triggerddteaminfologCommandActor(Reference<IDatabase> db) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
std::string v = deterministicRandom()->randomUniqueID().toString();
|
||||||
|
tr->set(triggerDDTeamInfoPrintKey, v);
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
printf("Triggered team info logging in data distribution.\n");
|
||||||
|
return true;
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory triggerddteaminfologFactory(
|
||||||
|
"triggerddteaminfolog",
|
||||||
|
CommandHelp("triggerddteaminfolog",
|
||||||
|
"trigger the data distributor teams logging",
|
||||||
|
"Trigger the data distributor to log detailed information about its teams."));
|
||||||
|
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,147 @@
|
||||||
|
/*
|
||||||
|
* TssqCommand.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
|
||||||
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/KeyBackedTypes.h"
|
||||||
|
#include "fdbclient/SystemData.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
#include "flow/FastRef.h"
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
ACTOR Future<Void> tssQuarantineList(Reference<IDatabase> db) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
// Hold the reference to the standalone's memory
|
||||||
|
state ThreadFuture<RangeResult> resultFuture = tr->getRange(tssQuarantineKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
RangeResult result = wait(safeThreadFutureToFuture(resultFuture));
|
||||||
|
// shouldn't have many quarantined TSSes
|
||||||
|
ASSERT(!result.more);
|
||||||
|
printf("Found %d quarantined TSS processes%s\n", result.size(), result.size() == 0 ? "." : ":");
|
||||||
|
for (auto& it : result) {
|
||||||
|
printf(" %s\n", decodeTssQuarantineKey(it.key).toString().c_str());
|
||||||
|
}
|
||||||
|
return Void();
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<bool> tssQuarantine(Reference<IDatabase> db, bool enable, UID tssId) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
state KeyBackedMap<UID, UID> tssMapDB = KeyBackedMap<UID, UID>(tssMappingKeys.begin);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
|
||||||
|
// Do some validation first to make sure the command is valid
|
||||||
|
// hold the returned standalone object's memory
|
||||||
|
state ThreadFuture<Optional<Value>> serverListValueF = tr->get(serverListKeyFor(tssId));
|
||||||
|
Optional<Value> serverListValue = wait(safeThreadFutureToFuture(serverListValueF));
|
||||||
|
if (!serverListValue.present()) {
|
||||||
|
printf("No TSS %s found in cluster!\n", tssId.toString().c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
state StorageServerInterface ssi = decodeServerListValue(serverListValue.get());
|
||||||
|
if (!ssi.isTss()) {
|
||||||
|
printf("Cannot quarantine Non-TSS storage ID %s!\n", tssId.toString().c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// hold the returned standalone object's memory
|
||||||
|
state ThreadFuture<Optional<Value>> currentQuarantineValueF = tr->get(tssQuarantineKeyFor(tssId));
|
||||||
|
Optional<Value> currentQuarantineValue = wait(safeThreadFutureToFuture(currentQuarantineValueF));
|
||||||
|
if (enable && currentQuarantineValue.present()) {
|
||||||
|
printf("TSS %s already in quarantine, doing nothing.\n", tssId.toString().c_str());
|
||||||
|
return false;
|
||||||
|
} else if (!enable && !currentQuarantineValue.present()) {
|
||||||
|
printf("TSS %s is not in quarantine, cannot remove from quarantine!.\n", tssId.toString().c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (enable) {
|
||||||
|
tr->set(tssQuarantineKeyFor(tssId), LiteralStringRef(""));
|
||||||
|
// remove server from TSS mapping when quarantine is enabled
|
||||||
|
tssMapDB.erase(tr, ssi.tssPairID.get());
|
||||||
|
} else {
|
||||||
|
tr->clear(tssQuarantineKeyFor(tssId));
|
||||||
|
}
|
||||||
|
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
break;
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("Successfully %s TSS %s\n", enable ? "quarantined" : "removed", tssId.toString().c_str());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
ACTOR Future<bool> tssqCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||||
|
if (tokens.size() == 2) {
|
||||||
|
if (tokens[1] != LiteralStringRef("list")) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
wait(tssQuarantineList(db));
|
||||||
|
}
|
||||||
|
} else if (tokens.size() == 3) {
|
||||||
|
if ((tokens[1] != LiteralStringRef("start") && tokens[1] != LiteralStringRef("stop")) ||
|
||||||
|
(tokens[2].size() != 32) || !std::all_of(tokens[2].begin(), tokens[2].end(), &isxdigit)) {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
bool enable = tokens[1] == LiteralStringRef("start");
|
||||||
|
UID tssId = UID::fromString(tokens[2].toString());
|
||||||
|
bool success = wait(tssQuarantine(db, enable, tssId));
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
printUsage(tokens[0]);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
CommandFactory tssqFactory(
|
||||||
|
"tssq",
|
||||||
|
CommandHelp("tssq start|stop <StorageUID>",
|
||||||
|
"start/stop tss quarantine",
|
||||||
|
"Toggles Quarantine mode for a Testing Storage Server. Quarantine will happen automatically if the "
|
||||||
|
"TSS is detected to have incorrect data, but can also be initiated manually. You can also remove a "
|
||||||
|
"TSS from quarantine once your investigation is finished, which will destroy the TSS process."));
|
||||||
|
|
||||||
|
} // namespace fdb_cli
|
|
@ -0,0 +1,163 @@
|
||||||
|
/*
|
||||||
|
* Util.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbcli/fdbcli.actor.h"
|
||||||
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
|
#include "fdbclient/Schemas.h"
|
||||||
|
#include "fdbclient/Status.h"
|
||||||
|
|
||||||
|
#include "flow/Arena.h"
|
||||||
|
|
||||||
|
#include "flow/ThreadHelper.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
namespace fdb_cli {
|
||||||
|
|
||||||
|
bool tokencmp(StringRef token, const char* command) {
|
||||||
|
if (token.size() != strlen(command))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return !memcmp(token.begin(), command, token.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
void printUsage(StringRef command) {
|
||||||
|
const auto& helpMap = CommandFactory::commands();
|
||||||
|
auto i = helpMap.find(command.toString());
|
||||||
|
if (i != helpMap.end())
|
||||||
|
printf("Usage: %s\n", i->second.usage.c_str());
|
||||||
|
else
|
||||||
|
fprintf(stderr, "ERROR: Unknown command `%s'\n", command.toString().c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<std::string> getSpecialKeysFailureErrorMessage(Reference<ITransaction> tr) {
|
||||||
|
// hold the returned standalone object's memory
|
||||||
|
state ThreadFuture<Optional<Value>> errorMsgF = tr->get(fdb_cli::errorMsgSpecialKey);
|
||||||
|
Optional<Value> errorMsg = wait(safeThreadFutureToFuture(errorMsgF));
|
||||||
|
// Error message should be present
|
||||||
|
ASSERT(errorMsg.present());
|
||||||
|
// Read the json string
|
||||||
|
auto valueObj = readJSONStrictly(errorMsg.get().toString()).get_obj();
|
||||||
|
// verify schema
|
||||||
|
auto schema = readJSONStrictly(JSONSchemas::managementApiErrorSchema.toString()).get_obj();
|
||||||
|
std::string errorStr;
|
||||||
|
ASSERT(schemaMatch(schema, valueObj, errorStr, SevError, true));
|
||||||
|
// return the error message
|
||||||
|
return valueObj["message"].get_str();
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> verifyAndAddInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||||
|
Reference<FlowLock> connectLock,
|
||||||
|
KeyValue kv) {
|
||||||
|
wait(connectLock->take());
|
||||||
|
state FlowLock::Releaser releaser(*connectLock);
|
||||||
|
state ClientWorkerInterface workerInterf;
|
||||||
|
try {
|
||||||
|
// the interface is back-ward compatible, thus if parsing failed, it needs to upgrade cli version
|
||||||
|
workerInterf = BinaryReader::fromStringRef<ClientWorkerInterface>(kv.value, IncludeVersion());
|
||||||
|
} catch (Error& e) {
|
||||||
|
fprintf(stderr, "Error: %s; CLI version is too old, please update to use a newer version\n", e.what());
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
state ClientLeaderRegInterface leaderInterf(workerInterf.address());
|
||||||
|
choose {
|
||||||
|
when(Optional<LeaderInfo> rep =
|
||||||
|
wait(brokenPromiseToNever(leaderInterf.getLeader.getReply(GetLeaderRequest())))) {
|
||||||
|
StringRef ip_port =
|
||||||
|
(kv.key.endsWith(LiteralStringRef(":tls")) ? kv.key.removeSuffix(LiteralStringRef(":tls")) : kv.key)
|
||||||
|
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
||||||
|
(*address_interface)[ip_port] = std::make_pair(kv.value, leaderInterf);
|
||||||
|
|
||||||
|
if (workerInterf.reboot.getEndpoint().addresses.secondaryAddress.present()) {
|
||||||
|
Key full_ip_port2 =
|
||||||
|
StringRef(workerInterf.reboot.getEndpoint().addresses.secondaryAddress.get().toString());
|
||||||
|
StringRef ip_port2 = full_ip_port2.endsWith(LiteralStringRef(":tls"))
|
||||||
|
? full_ip_port2.removeSuffix(LiteralStringRef(":tls"))
|
||||||
|
: full_ip_port2;
|
||||||
|
(*address_interface)[ip_port2] = std::make_pair(kv.value, leaderInterf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
when(wait(delay(CLIENT_KNOBS->CLI_CONNECT_TIMEOUT))) {}
|
||||||
|
}
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> getWorkerInterfaces(Reference<ITransaction> tr,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||||
|
// Hold the reference to the standalone's memory
|
||||||
|
state ThreadFuture<RangeResult> kvsFuture = tr->getRange(
|
||||||
|
KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||||
|
CLIENT_KNOBS->TOO_MANY);
|
||||||
|
RangeResult kvs = wait(safeThreadFutureToFuture(kvsFuture));
|
||||||
|
ASSERT(!kvs.more);
|
||||||
|
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||||
|
std::vector<Future<Void>> addInterfs;
|
||||||
|
for (auto it : kvs) {
|
||||||
|
addInterfs.push_back(verifyAndAddInterface(address_interface, connectLock, it));
|
||||||
|
}
|
||||||
|
wait(waitForAll(addInterfs));
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<bool> getWorkers(Reference<IDatabase> db, std::vector<ProcessData>* workers) {
|
||||||
|
state Reference<ITransaction> tr = db->createTransaction();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||||
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
state ThreadFuture<RangeResult> processClasses = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state ThreadFuture<RangeResult> processData = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
wait(success(safeThreadFutureToFuture(processClasses)) && success(safeThreadFutureToFuture(processData)));
|
||||||
|
ASSERT(!processClasses.get().more && processClasses.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
ASSERT(!processData.get().more && processData.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
state std::map<Optional<Standalone<StringRef>>, ProcessClass> id_class;
|
||||||
|
state int i;
|
||||||
|
for (i = 0; i < processClasses.get().size(); i++) {
|
||||||
|
try {
|
||||||
|
id_class[decodeProcessClassKey(processClasses.get()[i].key)] =
|
||||||
|
decodeProcessClassValue(processClasses.get()[i].value);
|
||||||
|
} catch (Error& e) {
|
||||||
|
fprintf(stderr, "Error: %s; Client version is too old, please use a newer version\n", e.what());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < processData.get().size(); i++) {
|
||||||
|
ProcessData data = decodeWorkerListValue(processData.get()[i].value);
|
||||||
|
ProcessClass processClass = id_class[data.locality.processId()];
|
||||||
|
|
||||||
|
if (processClass.classSource() == ProcessClass::DBSource ||
|
||||||
|
data.processClass.classType() == ProcessClass::UnsetClass)
|
||||||
|
data.processClass = processClass;
|
||||||
|
|
||||||
|
if (data.processClass.classType() != ProcessClass::TesterClass)
|
||||||
|
workers->push_back(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace fdb_cli
|
File diff suppressed because it is too large
Load Diff
|
@ -28,7 +28,11 @@
|
||||||
#elif !defined(FDBCLI_FDBCLI_ACTOR_H)
|
#elif !defined(FDBCLI_FDBCLI_ACTOR_H)
|
||||||
#define FDBCLI_FDBCLI_ACTOR_H
|
#define FDBCLI_FDBCLI_ACTOR_H
|
||||||
|
|
||||||
|
#include "fdbcli/FlowLineNoise.h"
|
||||||
|
|
||||||
|
#include "fdbclient/CoordinationInterface.h"
|
||||||
#include "fdbclient/IClientApi.h"
|
#include "fdbclient/IClientApi.h"
|
||||||
|
#include "fdbclient/StatusClient.h"
|
||||||
#include "flow/Arena.h"
|
#include "flow/Arena.h"
|
||||||
|
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
@ -61,34 +65,134 @@ struct CommandFactory {
|
||||||
extern const KeyRef advanceVersionSpecialKey;
|
extern const KeyRef advanceVersionSpecialKey;
|
||||||
// consistencycheck
|
// consistencycheck
|
||||||
extern const KeyRef consistencyCheckSpecialKey;
|
extern const KeyRef consistencyCheckSpecialKey;
|
||||||
|
// coordinators
|
||||||
|
extern const KeyRef clusterDescriptionSpecialKey;
|
||||||
|
extern const KeyRef coordinatorsAutoSpecialKey;
|
||||||
|
extern const KeyRef coordinatorsProcessSpecialKey;
|
||||||
|
// datadistribution
|
||||||
|
extern const KeyRef ddModeSpecialKey;
|
||||||
|
extern const KeyRef ddIgnoreRebalanceSpecialKey;
|
||||||
|
// exclude/include
|
||||||
|
extern const KeyRangeRef excludedServersSpecialKeyRange;
|
||||||
|
extern const KeyRangeRef failedServersSpecialKeyRange;
|
||||||
|
extern const KeyRangeRef excludedLocalitySpecialKeyRange;
|
||||||
|
extern const KeyRangeRef failedLocalitySpecialKeyRange;
|
||||||
|
extern const KeyRef excludedForceOptionSpecialKey;
|
||||||
|
extern const KeyRef failedForceOptionSpecialKey;
|
||||||
|
extern const KeyRef excludedLocalityForceOptionSpecialKey;
|
||||||
|
extern const KeyRef failedLocalityForceOptionSpecialKey;
|
||||||
|
extern const KeyRangeRef exclusionInProgressSpecialKeyRange;
|
||||||
|
// lock/unlock
|
||||||
|
extern const KeyRef lockSpecialKey;
|
||||||
// maintenance
|
// maintenance
|
||||||
extern const KeyRangeRef maintenanceSpecialKeyRange;
|
extern const KeyRangeRef maintenanceSpecialKeyRange;
|
||||||
extern const KeyRef ignoreSSFailureSpecialKey;
|
extern const KeyRef ignoreSSFailureSpecialKey;
|
||||||
// setclass
|
// setclass
|
||||||
extern const KeyRangeRef processClassSourceSpecialKeyRange;
|
extern const KeyRangeRef processClassSourceSpecialKeyRange;
|
||||||
extern const KeyRangeRef processClassTypeSpecialKeyRange;
|
extern const KeyRangeRef processClassTypeSpecialKeyRange;
|
||||||
|
// Other special keys
|
||||||
|
inline const KeyRef errorMsgSpecialKey = LiteralStringRef("\xff\xff/error_message");
|
||||||
// help functions (Copied from fdbcli.actor.cpp)
|
// help functions (Copied from fdbcli.actor.cpp)
|
||||||
|
// decode worker interfaces
|
||||||
|
ACTOR Future<Void> addInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||||
|
Reference<FlowLock> connectLock,
|
||||||
|
KeyValue kv);
|
||||||
|
// get all workers' info
|
||||||
|
ACTOR Future<bool> getWorkers(Reference<IDatabase> db, std::vector<ProcessData>* workers);
|
||||||
|
|
||||||
// compare StringRef with the given c string
|
// compare StringRef with the given c string
|
||||||
bool tokencmp(StringRef token, const char* command);
|
bool tokencmp(StringRef token, const char* command);
|
||||||
// print the usage of the specified command
|
// print the usage of the specified command
|
||||||
void printUsage(StringRef command);
|
void printUsage(StringRef command);
|
||||||
|
// Pre: tr failed with special_keys_api_failure error
|
||||||
|
// Read the error message special key and return the message
|
||||||
|
ACTOR Future<std::string> getSpecialKeysFailureErrorMessage(Reference<ITransaction> tr);
|
||||||
|
// Using \xff\xff/worker_interfaces/ special key, get all worker interfaces
|
||||||
|
ACTOR Future<Void> getWorkerInterfaces(Reference<ITransaction> tr,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||||
|
// Deserialize \xff\xff/worker_interfaces/<address>:=<ClientInterface> k-v pair and verify by a RPC call
|
||||||
|
ACTOR Future<Void> verifyAndAddInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||||
|
Reference<FlowLock> connectLock,
|
||||||
|
KeyValue kv);
|
||||||
|
// print cluster status info
|
||||||
|
void printStatus(StatusObjectReader statusObj,
|
||||||
|
StatusClient::StatusLevel level,
|
||||||
|
bool displayDatabaseAvailable = true,
|
||||||
|
bool hideErrorMessages = false);
|
||||||
|
|
||||||
// All fdbcli commands (alphabetically)
|
// All fdbcli commands (alphabetically)
|
||||||
|
// All below actors return true if the command is executed successfully
|
||||||
// advanceversion command
|
// advanceversion command
|
||||||
ACTOR Future<bool> advanceVersionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
ACTOR Future<bool> advanceVersionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// cache_range command
|
||||||
|
ACTOR Future<bool> cacheRangeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// configure command
|
||||||
|
ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
|
||||||
|
Database localDb,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
LineNoise* linenoise,
|
||||||
|
Future<Void> warn);
|
||||||
// consistency command
|
// consistency command
|
||||||
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens);
|
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
bool intrans);
|
||||||
|
// coordinators command
|
||||||
|
ACTOR Future<bool> coordinatorsCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// datadistribution command
|
||||||
|
ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// exclude command
|
||||||
|
ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens, Future<Void> warn);
|
||||||
|
// expensive_data_check command
|
||||||
|
ACTOR Future<bool> expensiveDataCheckCommandActor(
|
||||||
|
Reference<IDatabase> db,
|
||||||
|
Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||||
|
// fileconfigure command
|
||||||
|
ACTOR Future<bool> fileConfigureCommandActor(Reference<IDatabase> db,
|
||||||
|
std::string filePath,
|
||||||
|
bool isNewDatabase,
|
||||||
|
bool force);
|
||||||
// force_recovery_with_data_loss command
|
// force_recovery_with_data_loss command
|
||||||
ACTOR Future<bool> forceRecoveryWithDataLossCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
ACTOR Future<bool> forceRecoveryWithDataLossCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// include command
|
||||||
|
ACTOR Future<bool> includeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// kill command
|
||||||
|
ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
|
||||||
|
Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||||
|
// lock/unlock command
|
||||||
|
ACTOR Future<bool> lockCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
ACTOR Future<bool> unlockDatabaseActor(Reference<IDatabase> db, UID uid);
|
||||||
// maintenance command
|
// maintenance command
|
||||||
|
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning = false);
|
||||||
|
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db,
|
||||||
|
bool printWarning = false,
|
||||||
|
bool clearSSFailureZoneString = false);
|
||||||
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// profile command
|
||||||
|
ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens, bool intrans);
|
||||||
// setclass command
|
// setclass command
|
||||||
ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
// snapshot command
|
// snapshot command
|
||||||
ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// status command
|
||||||
|
ACTOR Future<bool> statusCommandActor(Reference<IDatabase> db,
|
||||||
|
Database localDb,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
bool isExecMode = false);
|
||||||
|
// suspend command
|
||||||
|
ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
|
||||||
|
Reference<ITransaction> tr,
|
||||||
|
std::vector<StringRef> tokens,
|
||||||
|
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||||
// throttle command
|
// throttle command
|
||||||
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
// triggerteaminfolog command
|
||||||
|
ACTOR Future<bool> triggerddteaminfologCommandActor(Reference<IDatabase> db);
|
||||||
|
// tssq command
|
||||||
|
ACTOR Future<bool> tssqCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||||
|
|
||||||
} // namespace fdb_cli
|
} // namespace fdb_cli
|
||||||
|
|
||||||
|
|
|
@ -239,11 +239,14 @@ std::vector<std::shared_ptr<Sample>> SampleCollection_t::get(double from /*= 0.0
|
||||||
}
|
}
|
||||||
|
|
||||||
void sample(LineageReference* lineagePtr) {
|
void sample(LineageReference* lineagePtr) {
|
||||||
if (!lineagePtr->isValid()) { return; }
|
if (!lineagePtr->isValid()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
(*lineagePtr)->modify(&NameLineage::actorName) = lineagePtr->actorName();
|
(*lineagePtr)->modify(&NameLineage::actorName) = lineagePtr->actorName();
|
||||||
boost::asio::post(ActorLineageProfiler::instance().context(), [lineage = LineageReference::addRef(lineagePtr->getPtr())]() {
|
boost::asio::post(ActorLineageProfiler::instance().context(),
|
||||||
SampleCollection::instance().collect(lineage);
|
[lineage = LineageReference::addRef(lineagePtr->getPtr())]() {
|
||||||
});
|
SampleCollection::instance().collect(lineage);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ProfilerImpl {
|
struct ProfilerImpl {
|
||||||
|
|
|
@ -32,7 +32,11 @@ public:
|
||||||
bool isTerminate() const override { return true; }
|
bool isTerminate() const override { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, std::atomic<int> *sum, int count, int clientId, double meanSleep) {
|
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread,
|
||||||
|
std::atomic<int>* sum,
|
||||||
|
int count,
|
||||||
|
int clientId,
|
||||||
|
double meanSleep) {
|
||||||
state int i = 0;
|
state int i = 0;
|
||||||
state double randomSleep = 0.0;
|
state double randomSleep = 0.0;
|
||||||
for (; i < count; ++i) {
|
for (; i < count; ++i) {
|
||||||
|
@ -43,11 +47,11 @@ ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, std::
|
||||||
return Void();
|
return Void();
|
||||||
}));
|
}));
|
||||||
TraceEvent("AsyncTaskThreadIncrementedSum")
|
TraceEvent("AsyncTaskThreadIncrementedSum")
|
||||||
.detail("Index", i)
|
.detail("Index", i)
|
||||||
.detail("Sum", sum->load())
|
.detail("Sum", sum->load())
|
||||||
.detail("ClientId", clientId)
|
.detail("ClientId", clientId)
|
||||||
.detail("RandomSleep", randomSleep)
|
.detail("RandomSleep", randomSleep)
|
||||||
.detail("MeanSleep", meanSleep);
|
.detail("MeanSleep", meanSleep);
|
||||||
}
|
}
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
@ -93,7 +97,8 @@ TEST_CASE("/asynctaskthread/add") {
|
||||||
std::vector<Future<Void>> clients;
|
std::vector<Future<Void>> clients;
|
||||||
clients.reserve(numClients);
|
clients.reserve(numClients);
|
||||||
for (int clientId = 0; clientId < numClients; ++clientId) {
|
for (int clientId = 0; clientId < numClients; ++clientId) {
|
||||||
clients.push_back(asyncTaskThreadClient(&asyncTaskThread, &sum, incrementsPerClient, clientId, deterministicRandom()->random01() * 0.01));
|
clients.push_back(asyncTaskThreadClient(
|
||||||
|
&asyncTaskThread, &sum, incrementsPerClient, clientId, deterministicRandom()->random01() * 0.01));
|
||||||
}
|
}
|
||||||
wait(waitForAll(clients));
|
wait(waitForAll(clients));
|
||||||
ASSERT_EQ(sum.load(), numClients * incrementsPerClient);
|
ASSERT_EQ(sum.load(), numClients * incrementsPerClient);
|
||||||
|
@ -103,12 +108,12 @@ TEST_CASE("/asynctaskthread/add") {
|
||||||
TEST_CASE("/asynctaskthread/error") {
|
TEST_CASE("/asynctaskthread/error") {
|
||||||
state AsyncTaskThread asyncTaskThread;
|
state AsyncTaskThread asyncTaskThread;
|
||||||
try {
|
try {
|
||||||
wait(asyncTaskThread.execAsync([]{
|
wait(asyncTaskThread.execAsync([] {
|
||||||
throw operation_failed();
|
throw operation_failed();
|
||||||
return Void();
|
return Void();
|
||||||
}));
|
}));
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
} catch (Error &e) {
|
} catch (Error& e) {
|
||||||
ASSERT_EQ(e.code(), error_code_operation_failed);
|
ASSERT_EQ(e.code(), error_code_operation_failed);
|
||||||
}
|
}
|
||||||
return Void();
|
return Void();
|
||||||
|
|
|
@ -533,6 +533,7 @@ Future<Void> eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
||||||
CheckBackupUID = CheckBackupUID::False,
|
CheckBackupUID = CheckBackupUID::False,
|
||||||
Version backupUid = 0);
|
Version backupUid = 0);
|
||||||
Key getApplyKey(Version version, Key backupUid);
|
Key getApplyKey(Version version, Key backupUid);
|
||||||
|
Key getLogKey(Version version, Key backupUid);
|
||||||
Version getLogKeyVersion(Key key);
|
Version getLogKeyVersion(Key key);
|
||||||
std::pair<Version, uint32_t> decodeBKMutationLogKey(Key key);
|
std::pair<Version, uint32_t> decodeBKMutationLogKey(Key key);
|
||||||
Future<Void> logError(Database cx, Key keyErrors, const std::string& message);
|
Future<Void> logError(Database cx, Key keyErrors, const std::string& message);
|
||||||
|
|
|
@ -229,6 +229,16 @@ Key getApplyKey(Version version, Key backupUid) {
|
||||||
return k2.withPrefix(applyLogKeys.begin);
|
return k2.withPrefix(applyLogKeys.begin);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Key getLogKey(Version version, Key backupUid) {
|
||||||
|
int64_t vblock = (version - 1) / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||||
|
uint64_t v = bigEndian64(version);
|
||||||
|
uint32_t data = vblock & 0xffffffff;
|
||||||
|
uint8_t hash = (uint8_t)hashlittle(&data, sizeof(uint32_t), 0);
|
||||||
|
Key k1 = StringRef((uint8_t*)&v, sizeof(uint64_t)).withPrefix(StringRef(&hash, sizeof(uint8_t)));
|
||||||
|
Key k2 = k1.withPrefix(backupUid);
|
||||||
|
return k2.withPrefix(backupLogKeys.begin);
|
||||||
|
}
|
||||||
|
|
||||||
Version getLogKeyVersion(Key key) {
|
Version getLogKeyVersion(Key key) {
|
||||||
return bigEndian64(*(int64_t*)(key.begin() + backupLogPrefixBytes + sizeof(UID) + sizeof(uint8_t)));
|
return bigEndian64(*(int64_t*)(key.begin() + backupLogPrefixBytes + sizeof(UID) + sizeof(uint8_t)));
|
||||||
}
|
}
|
||||||
|
|
|
@ -298,8 +298,8 @@ public:
|
||||||
static std::vector<std::string> getURLFormats();
|
static std::vector<std::string> getURLFormats();
|
||||||
static Future<std::vector<std::string>> listContainers(const std::string& baseURL);
|
static Future<std::vector<std::string>> listContainers(const std::string& baseURL);
|
||||||
|
|
||||||
std::string const &getURL() const { return URL; }
|
std::string const& getURL() const { return URL; }
|
||||||
Optional<std::string> const &getEncryptionKeyFileName() const { return encryptionKeyFileName; }
|
Optional<std::string> const& getEncryptionKeyFileName() const { return encryptionKeyFileName; }
|
||||||
|
|
||||||
static std::string lastOpenError;
|
static std::string lastOpenError;
|
||||||
|
|
||||||
|
|
|
@ -283,7 +283,6 @@ public:
|
||||||
}
|
}
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileName) {
|
Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileName) {
|
||||||
|
|
|
@ -70,6 +70,8 @@ set(FDBCLIENT_SRCS
|
||||||
MultiVersionTransaction.actor.cpp
|
MultiVersionTransaction.actor.cpp
|
||||||
MultiVersionTransaction.h
|
MultiVersionTransaction.h
|
||||||
MutationList.h
|
MutationList.h
|
||||||
|
MutationLogReader.actor.cpp
|
||||||
|
MutationLogReader.actor.h
|
||||||
NameLineage.h
|
NameLineage.h
|
||||||
NameLineage.cpp
|
NameLineage.cpp
|
||||||
NativeAPI.actor.cpp
|
NativeAPI.actor.cpp
|
||||||
|
@ -113,7 +115,7 @@ set(FDBCLIENT_SRCS
|
||||||
SystemData.cpp
|
SystemData.cpp
|
||||||
SystemData.h
|
SystemData.h
|
||||||
TagThrottle.actor.cpp
|
TagThrottle.actor.cpp
|
||||||
TagThrottle.h
|
TagThrottle.actor.h
|
||||||
TaskBucket.actor.cpp
|
TaskBucket.actor.cpp
|
||||||
TaskBucket.h
|
TaskBucket.h
|
||||||
TestKnobCollection.cpp
|
TestKnobCollection.cpp
|
||||||
|
@ -125,6 +127,7 @@ set(FDBCLIENT_SRCS
|
||||||
VersionedMap.actor.h
|
VersionedMap.actor.h
|
||||||
VersionedMap.h
|
VersionedMap.h
|
||||||
VersionedMap.cpp
|
VersionedMap.cpp
|
||||||
|
WellKnownEndpoints.h
|
||||||
WriteMap.h
|
WriteMap.h
|
||||||
json_spirit/json_spirit_error_position.h
|
json_spirit/json_spirit_error_position.h
|
||||||
json_spirit/json_spirit_reader_template.h
|
json_spirit/json_spirit_reader_template.h
|
||||||
|
@ -180,16 +183,19 @@ if(BUILD_AZURE_BACKUP)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||||
add_dependencies(fdbclient fdboptions fdb_c_options)
|
add_dependencies(fdbclient fdboptions)
|
||||||
target_link_libraries(fdbclient PUBLIC fdbrpc msgpack)
|
target_link_libraries(fdbclient PUBLIC fdbrpc msgpack)
|
||||||
|
|
||||||
# Create a separate fdbclient library with sampling enabled. This lets
|
# Create a separate fdbclient library with sampling enabled. This lets
|
||||||
# fdbserver retain sampling functionality in client code while disabling
|
# fdbserver retain sampling functionality in client code while disabling
|
||||||
# sampling for pure clients.
|
# sampling for pure clients.
|
||||||
add_flow_target(STATIC_LIBRARY NAME fdbclient_sampling SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
add_flow_target(STATIC_LIBRARY NAME fdbclient_sampling SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||||
add_dependencies(fdbclient_sampling fdboptions fdb_c_options)
|
add_dependencies(fdbclient_sampling fdboptions)
|
||||||
target_link_libraries(fdbclient_sampling PUBLIC fdbrpc_sampling msgpack)
|
target_link_libraries(fdbclient_sampling PUBLIC fdbrpc_sampling msgpack)
|
||||||
target_compile_definitions(fdbclient_sampling PRIVATE -DENABLE_SAMPLING)
|
target_compile_definitions(fdbclient_sampling PRIVATE -DENABLE_SAMPLING)
|
||||||
|
if(WIN32)
|
||||||
|
add_dependencies(fdbclient_sampling_actors fdbclient_actors)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(BUILD_AZURE_BACKUP)
|
if(BUILD_AZURE_BACKUP)
|
||||||
target_link_libraries(fdbclient PRIVATE curl uuid azure-storage-lite)
|
target_link_libraries(fdbclient PRIVATE curl uuid azure-storage-lite)
|
||||||
|
|
|
@ -49,3 +49,7 @@ Optional<KnobValue> ClientKnobCollection::tryParseKnobValue(std::string const& k
|
||||||
bool ClientKnobCollection::trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) {
|
bool ClientKnobCollection::trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) {
|
||||||
return knobValue.visitSetKnob(knobName, flowKnobs) || knobValue.visitSetKnob(knobName, clientKnobs);
|
return knobValue.visitSetKnob(knobName, flowKnobs) || knobValue.visitSetKnob(knobName, clientKnobs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ClientKnobCollection::isAtomic(std::string const& knobName) const {
|
||||||
|
return flowKnobs.isAtomic(knobName) || clientKnobs.isAtomic(knobName);
|
||||||
|
}
|
||||||
|
|
|
@ -43,4 +43,5 @@ public:
|
||||||
TestKnobs const& getTestKnobs() const override { throw internal_error(); }
|
TestKnobs const& getTestKnobs() const override { throw internal_error(); }
|
||||||
Optional<KnobValue> tryParseKnobValue(std::string const& knobName, std::string const& knobValue) const override;
|
Optional<KnobValue> tryParseKnobValue(std::string const& knobName, std::string const& knobValue) const override;
|
||||||
bool trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) override;
|
bool trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) override;
|
||||||
|
bool isAtomic(std::string const& knobName) const override;
|
||||||
};
|
};
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
#include "fdbclient/SystemData.h"
|
#include "fdbclient/SystemData.h"
|
||||||
#include "flow/UnitTest.h"
|
#include "flow/UnitTest.h"
|
||||||
|
|
||||||
#define init(knob, value) initKnob(knob, value, #knob)
|
#define init(...) KNOB_FN(__VA_ARGS__, INIT_ATOMIC_KNOB, INIT_KNOB)(__VA_ARGS__)
|
||||||
|
|
||||||
ClientKnobs::ClientKnobs(Randomize randomize) {
|
ClientKnobs::ClientKnobs(Randomize randomize) {
|
||||||
initialize(randomize);
|
initialize(randomize);
|
||||||
|
@ -247,6 +247,10 @@ void ClientKnobs::initialize(Randomize randomize) {
|
||||||
init( TAG_THROTTLE_RECHECK_INTERVAL, 5.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_RECHECK_INTERVAL = 0.0;
|
init( TAG_THROTTLE_RECHECK_INTERVAL, 5.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_RECHECK_INTERVAL = 0.0;
|
||||||
init( TAG_THROTTLE_EXPIRATION_INTERVAL, 60.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_EXPIRATION_INTERVAL = 1.0;
|
init( TAG_THROTTLE_EXPIRATION_INTERVAL, 60.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_EXPIRATION_INTERVAL = 1.0;
|
||||||
|
|
||||||
|
// busyness reporting
|
||||||
|
init( BUSYNESS_SPIKE_START_THRESHOLD, 0.100 );
|
||||||
|
init( BUSYNESS_SPIKE_SATURATED_THRESHOLD, 0.500 );
|
||||||
|
|
||||||
// clang-format on
|
// clang-format on
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -238,6 +238,10 @@ public:
|
||||||
double TAG_THROTTLE_RECHECK_INTERVAL;
|
double TAG_THROTTLE_RECHECK_INTERVAL;
|
||||||
double TAG_THROTTLE_EXPIRATION_INTERVAL;
|
double TAG_THROTTLE_EXPIRATION_INTERVAL;
|
||||||
|
|
||||||
|
// busyness reporting
|
||||||
|
double BUSYNESS_SPIKE_START_THRESHOLD;
|
||||||
|
double BUSYNESS_SPIKE_SATURATED_THRESHOLD;
|
||||||
|
|
||||||
ClientKnobs(Randomize randomize);
|
ClientKnobs(Randomize randomize);
|
||||||
void initialize(Randomize randomize);
|
void initialize(Randomize randomize);
|
||||||
};
|
};
|
||||||
|
|
|
@ -267,7 +267,7 @@ struct StatusRequest {
|
||||||
|
|
||||||
struct GetClientWorkersRequest {
|
struct GetClientWorkersRequest {
|
||||||
constexpr static FileIdentifier file_identifier = 10771791;
|
constexpr static FileIdentifier file_identifier = 10771791;
|
||||||
ReplyPromise<vector<ClientWorkerInterface>> reply;
|
ReplyPromise<std::vector<ClientWorkerInterface>> reply;
|
||||||
|
|
||||||
GetClientWorkersRequest() {}
|
GetClientWorkersRequest() {}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbclient/StorageServerInterface.h"
|
#include "fdbclient/StorageServerInterface.h"
|
||||||
#include "fdbclient/CommitTransaction.h"
|
#include "fdbclient/CommitTransaction.h"
|
||||||
#include "fdbclient/TagThrottle.h"
|
#include "fdbclient/TagThrottle.actor.h"
|
||||||
#include "fdbclient/GlobalConfig.h"
|
#include "fdbclient/GlobalConfig.h"
|
||||||
|
|
||||||
#include "fdbrpc/Stats.h"
|
#include "fdbrpc/Stats.h"
|
||||||
|
@ -109,12 +109,12 @@ struct CommitProxyInterface {
|
||||||
struct ClientDBInfo {
|
struct ClientDBInfo {
|
||||||
constexpr static FileIdentifier file_identifier = 5355080;
|
constexpr static FileIdentifier file_identifier = 5355080;
|
||||||
UID id; // Changes each time anything else changes
|
UID id; // Changes each time anything else changes
|
||||||
vector<GrvProxyInterface> grvProxies;
|
std::vector<GrvProxyInterface> grvProxies;
|
||||||
vector<CommitProxyInterface> commitProxies;
|
std::vector<CommitProxyInterface> commitProxies;
|
||||||
Optional<CommitProxyInterface>
|
Optional<CommitProxyInterface>
|
||||||
firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk
|
firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk
|
||||||
Optional<Value> forward;
|
Optional<Value> forward;
|
||||||
vector<VersionHistory> history;
|
std::vector<VersionHistory> history;
|
||||||
|
|
||||||
ClientDBInfo() {}
|
ClientDBInfo() {}
|
||||||
|
|
||||||
|
@ -285,7 +285,7 @@ struct GetReadVersionRequest : TimedRequest {
|
||||||
struct GetKeyServerLocationsReply {
|
struct GetKeyServerLocationsReply {
|
||||||
constexpr static FileIdentifier file_identifier = 10636023;
|
constexpr static FileIdentifier file_identifier = 10636023;
|
||||||
Arena arena;
|
Arena arena;
|
||||||
std::vector<std::pair<KeyRangeRef, vector<StorageServerInterface>>> results;
|
std::vector<std::pair<KeyRangeRef, std::vector<StorageServerInterface>>> results;
|
||||||
|
|
||||||
// if any storage servers in results have a TSS pair, that mapping is in here
|
// if any storage servers in results have a TSS pair, that mapping is in here
|
||||||
std::vector<std::pair<UID, StorageServerInterface>> resultsTssMapping;
|
std::vector<std::pair<UID, StorageServerInterface>> resultsTssMapping;
|
||||||
|
@ -499,11 +499,11 @@ struct ExclusionSafetyCheckReply {
|
||||||
|
|
||||||
struct ExclusionSafetyCheckRequest {
|
struct ExclusionSafetyCheckRequest {
|
||||||
constexpr static FileIdentifier file_identifier = 13852702;
|
constexpr static FileIdentifier file_identifier = 13852702;
|
||||||
vector<AddressExclusion> exclusions;
|
std::vector<AddressExclusion> exclusions;
|
||||||
ReplyPromise<ExclusionSafetyCheckReply> reply;
|
ReplyPromise<ExclusionSafetyCheckReply> reply;
|
||||||
|
|
||||||
ExclusionSafetyCheckRequest() {}
|
ExclusionSafetyCheckRequest() {}
|
||||||
explicit ExclusionSafetyCheckRequest(vector<AddressExclusion> exclusions) : exclusions(exclusions) {}
|
explicit ExclusionSafetyCheckRequest(std::vector<AddressExclusion> exclusions) : exclusions(exclusions) {}
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize(Ar& ar) {
|
void serialize(Ar& ar) {
|
||||||
|
|
|
@ -34,10 +34,11 @@ void ConfigTransactionInterface::setupWellKnownEndpoints() {
|
||||||
}
|
}
|
||||||
|
|
||||||
ConfigTransactionInterface::ConfigTransactionInterface(NetworkAddress const& remote)
|
ConfigTransactionInterface::ConfigTransactionInterface(NetworkAddress const& remote)
|
||||||
: getGeneration(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETGENERATION)),
|
: getGeneration(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GETGENERATION)),
|
||||||
get(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GET)), getClasses(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETCLASSES)),
|
get(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GET)),
|
||||||
getKnobs(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETKNOBS)), commit(Endpoint({ remote }, WLTOKEN_CONFIGTXN_COMMIT)) {
|
getClasses(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GETCLASSES)),
|
||||||
}
|
getKnobs(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GETKNOBS)),
|
||||||
|
commit(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_COMMIT)) {}
|
||||||
|
|
||||||
bool ConfigTransactionInterface::operator==(ConfigTransactionInterface const& rhs) const {
|
bool ConfigTransactionInterface::operator==(ConfigTransactionInterface const& rhs) const {
|
||||||
return _id == rhs._id;
|
return _id == rhs._id;
|
||||||
|
@ -70,22 +71,3 @@ bool ConfigGeneration::operator>(ConfigGeneration const& rhs) const {
|
||||||
return liveVersion > rhs.liveVersion;
|
return liveVersion > rhs.liveVersion;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConfigTransactionCommitRequest::set(KeyRef key, ValueRef value) {
|
|
||||||
if (key == configTransactionDescriptionKey) {
|
|
||||||
annotation.description = KeyRef(arena, value);
|
|
||||||
} else {
|
|
||||||
ConfigKey configKey = ConfigKeyRef::decodeKey(key);
|
|
||||||
auto knobValue = IKnobCollection::parseKnobValue(
|
|
||||||
configKey.knobName.toString(), value.toString(), IKnobCollection::Type::TEST);
|
|
||||||
mutations.emplace_back_deep(arena, configKey, knobValue.contents());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConfigTransactionCommitRequest::clear(KeyRef key) {
|
|
||||||
if (key == configTransactionDescriptionKey) {
|
|
||||||
annotation.description = ""_sr;
|
|
||||||
} else {
|
|
||||||
mutations.emplace_back_deep(arena, ConfigKeyRef::decodeKey(key), Optional<KnobValueRef>{});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -108,10 +108,13 @@ struct ConfigTransactionCommitRequest {
|
||||||
ConfigCommitAnnotationRef annotation;
|
ConfigCommitAnnotationRef annotation;
|
||||||
ReplyPromise<Void> reply;
|
ReplyPromise<Void> reply;
|
||||||
|
|
||||||
size_t expectedSize() const { return mutations.expectedSize() + annotation.expectedSize(); }
|
ConfigTransactionCommitRequest() = default;
|
||||||
|
explicit ConfigTransactionCommitRequest(ConfigGeneration generation,
|
||||||
|
VectorRef<ConfigMutationRef> mutations,
|
||||||
|
ConfigCommitAnnotationRef annotation)
|
||||||
|
: generation(generation), mutations(arena, mutations), annotation(arena, annotation) {}
|
||||||
|
|
||||||
void set(KeyRef key, ValueRef value);
|
size_t expectedSize() const { return mutations.expectedSize() + annotation.expectedSize(); }
|
||||||
void clear(KeyRef key);
|
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize(Ar& ar) {
|
void serialize(Ar& ar) {
|
||||||
|
|
|
@ -27,23 +27,10 @@
|
||||||
#include "fdbrpc/Locality.h"
|
#include "fdbrpc/Locality.h"
|
||||||
#include "fdbclient/CommitProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/ClusterInterface.h"
|
#include "fdbclient/ClusterInterface.h"
|
||||||
|
#include "fdbclient/WellKnownEndpoints.h"
|
||||||
|
|
||||||
const int MAX_CLUSTER_FILE_BYTES = 60000;
|
const int MAX_CLUSTER_FILE_BYTES = 60000;
|
||||||
|
|
||||||
// well known endpoints published to the client.
|
|
||||||
constexpr UID WLTOKEN_CLIENTLEADERREG_GETLEADER(-1, 2);
|
|
||||||
constexpr UID WLTOKEN_CLIENTLEADERREG_OPENDATABASE(-1, 3);
|
|
||||||
|
|
||||||
// the value of this endpoint should be stable and not change.
|
|
||||||
constexpr UID WLTOKEN_PROTOCOL_INFO(-1, 10);
|
|
||||||
constexpr UID WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE(-1, 11);
|
|
||||||
|
|
||||||
constexpr UID WLTOKEN_CONFIGTXN_GETGENERATION(-1, 12);
|
|
||||||
constexpr UID WLTOKEN_CONFIGTXN_GET(-1, 13);
|
|
||||||
constexpr UID WLTOKEN_CONFIGTXN_GETCLASSES(-1, 14);
|
|
||||||
constexpr UID WLTOKEN_CONFIGTXN_GETKNOBS(-1, 15);
|
|
||||||
constexpr UID WLTOKEN_CONFIGTXN_COMMIT(-1, 16);
|
|
||||||
|
|
||||||
struct ClientLeaderRegInterface {
|
struct ClientLeaderRegInterface {
|
||||||
RequestStream<struct GetLeaderRequest> getLeader;
|
RequestStream<struct GetLeaderRequest> getLeader;
|
||||||
RequestStream<struct OpenDatabaseCoordRequest> openDatabase;
|
RequestStream<struct OpenDatabaseCoordRequest> openDatabase;
|
||||||
|
@ -62,8 +49,8 @@ class ClusterConnectionString {
|
||||||
public:
|
public:
|
||||||
ClusterConnectionString() {}
|
ClusterConnectionString() {}
|
||||||
ClusterConnectionString(std::string const& connectionString);
|
ClusterConnectionString(std::string const& connectionString);
|
||||||
ClusterConnectionString(vector<NetworkAddress>, Key);
|
ClusterConnectionString(std::vector<NetworkAddress>, Key);
|
||||||
vector<NetworkAddress> const& coordinators() const { return coord; }
|
std::vector<NetworkAddress> const& coordinators() const { return coord; }
|
||||||
Key clusterKey() const { return key; }
|
Key clusterKey() const { return key; }
|
||||||
Key clusterKeyName() const {
|
Key clusterKeyName() const {
|
||||||
return keyDesc;
|
return keyDesc;
|
||||||
|
@ -74,7 +61,7 @@ public:
|
||||||
private:
|
private:
|
||||||
void parseKey(std::string const& key);
|
void parseKey(std::string const& key);
|
||||||
|
|
||||||
vector<NetworkAddress> coord;
|
std::vector<NetworkAddress> coord;
|
||||||
Key key, keyDesc;
|
Key key, keyDesc;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -199,7 +186,7 @@ struct OpenDatabaseCoordRequest {
|
||||||
Standalone<VectorRef<ClientVersionRef>> supportedVersions;
|
Standalone<VectorRef<ClientVersionRef>> supportedVersions;
|
||||||
UID knownClientInfoID;
|
UID knownClientInfoID;
|
||||||
Key clusterKey;
|
Key clusterKey;
|
||||||
vector<NetworkAddress> coordinators;
|
std::vector<NetworkAddress> coordinators;
|
||||||
ReplyPromise<CachedSerialization<struct ClientDBInfo>> reply;
|
ReplyPromise<CachedSerialization<struct ClientDBInfo>> reply;
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
|
@ -210,7 +197,7 @@ struct OpenDatabaseCoordRequest {
|
||||||
|
|
||||||
class ClientCoordinators {
|
class ClientCoordinators {
|
||||||
public:
|
public:
|
||||||
vector<ClientLeaderRegInterface> clientLeaderServers;
|
std::vector<ClientLeaderRegInterface> clientLeaderServers;
|
||||||
Key clusterKey;
|
Key clusterKey;
|
||||||
Reference<ClusterConnectionFile> ccf;
|
Reference<ClusterConnectionFile> ccf;
|
||||||
|
|
||||||
|
|
|
@ -3096,7 +3096,7 @@ public:
|
||||||
state Future<Optional<Key>> fBackupKeysPacked =
|
state Future<Optional<Key>> fBackupKeysPacked =
|
||||||
tr->get(backupAgent->config.get(BinaryWriter::toValue(logUid, Unversioned()))
|
tr->get(backupAgent->config.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||||
.pack(BackupAgentBase::keyConfigBackupRanges));
|
.pack(BackupAgentBase::keyConfigBackupRanges));
|
||||||
state Future<Optional<Value>> flogVersionKey =
|
state Future<Optional<Value>> flogVersionKey =
|
||||||
tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned()))
|
tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||||
.pack(BackupAgentBase::keyStateLogBeginVersion));
|
.pack(BackupAgentBase::keyStateLogBeginVersion));
|
||||||
|
|
||||||
|
@ -3115,13 +3115,11 @@ public:
|
||||||
|
|
||||||
state Optional<Value> stopVersionKey = wait(fStopVersionKey);
|
state Optional<Value> stopVersionKey = wait(fStopVersionKey);
|
||||||
Optional<Value> logVersionKey = wait(flogVersionKey);
|
Optional<Value> logVersionKey = wait(flogVersionKey);
|
||||||
state std::string logVersionText
|
state std::string logVersionText =
|
||||||
= ". Last log version is "
|
". Last log version is " +
|
||||||
+ (
|
(logVersionKey.present()
|
||||||
logVersionKey.present()
|
? format("%lld", BinaryReader::fromStringRef<Version>(logVersionKey.get(), Unversioned()))
|
||||||
? format("%lld", BinaryReader::fromStringRef<Version>(logVersionKey.get(), Unversioned()))
|
: "unset");
|
||||||
: "unset"
|
|
||||||
);
|
|
||||||
Optional<Key> backupKeysPacked = wait(fBackupKeysPacked);
|
Optional<Key> backupKeysPacked = wait(fBackupKeysPacked);
|
||||||
|
|
||||||
state Standalone<VectorRef<KeyRangeRef>> backupRanges;
|
state Standalone<VectorRef<KeyRangeRef>> backupRanges;
|
||||||
|
@ -3140,8 +3138,8 @@ public:
|
||||||
"The DR on tag `" + tagNameDisplay + "' is NOT a complete copy of the primary database.\n";
|
"The DR on tag `" + tagNameDisplay + "' is NOT a complete copy of the primary database.\n";
|
||||||
break;
|
break;
|
||||||
case EBackupState::STATE_RUNNING_DIFFERENTIAL:
|
case EBackupState::STATE_RUNNING_DIFFERENTIAL:
|
||||||
statusText +=
|
statusText += "The DR on tag `" + tagNameDisplay +
|
||||||
"The DR on tag `" + tagNameDisplay + "' is a complete copy of the primary database" + logVersionText + ".\n";
|
"' is a complete copy of the primary database" + logVersionText + ".\n";
|
||||||
break;
|
break;
|
||||||
case EBackupState::STATE_COMPLETED: {
|
case EBackupState::STATE_COMPLETED: {
|
||||||
Version stopVersion =
|
Version stopVersion =
|
||||||
|
|
|
@ -45,6 +45,8 @@ void DatabaseConfiguration::resetInternal() {
|
||||||
remoteTLogReplicationFactor = repopulateRegionAntiQuorum = 0;
|
remoteTLogReplicationFactor = repopulateRegionAntiQuorum = 0;
|
||||||
backupWorkerEnabled = false;
|
backupWorkerEnabled = false;
|
||||||
perpetualStorageWiggleSpeed = 0;
|
perpetualStorageWiggleSpeed = 0;
|
||||||
|
perpetualStorageWiggleLocality = "0";
|
||||||
|
storageMigrationType = StorageMigrationType::DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
void parse(int* i, ValueRef const& v) {
|
void parse(int* i, ValueRef const& v) {
|
||||||
|
@ -198,7 +200,9 @@ bool DatabaseConfiguration::isValid() const {
|
||||||
(usableRegions == 1 || regions.size() == 2) && (regions.size() == 0 || regions[0].priority >= 0) &&
|
(usableRegions == 1 || regions.size() == 2) && (regions.size() == 0 || regions[0].priority >= 0) &&
|
||||||
(regions.size() == 0 || tLogPolicy->info() != "dcid^2 x zoneid^2 x 1") &&
|
(regions.size() == 0 || tLogPolicy->info() != "dcid^2 x zoneid^2 x 1") &&
|
||||||
// We cannot specify regions with three_datacenter replication
|
// We cannot specify regions with three_datacenter replication
|
||||||
(perpetualStorageWiggleSpeed == 0 || perpetualStorageWiggleSpeed == 1))) {
|
(perpetualStorageWiggleSpeed == 0 || perpetualStorageWiggleSpeed == 1) &&
|
||||||
|
isValidPerpetualStorageWiggleLocality(perpetualStorageWiggleLocality) &&
|
||||||
|
storageMigrationType != StorageMigrationType::UNSET)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
std::set<Key> dcIds;
|
std::set<Key> dcIds;
|
||||||
|
@ -389,6 +393,8 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
|
||||||
|
|
||||||
result["backup_worker_enabled"] = (int32_t)backupWorkerEnabled;
|
result["backup_worker_enabled"] = (int32_t)backupWorkerEnabled;
|
||||||
result["perpetual_storage_wiggle"] = perpetualStorageWiggleSpeed;
|
result["perpetual_storage_wiggle"] = perpetualStorageWiggleSpeed;
|
||||||
|
result["perpetual_storage_wiggle_locality"] = perpetualStorageWiggleLocality;
|
||||||
|
result["storage_migration_type"] = storageMigrationType.toString();
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -542,6 +548,30 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
|
||||||
parse(®ions, value);
|
parse(®ions, value);
|
||||||
} else if (ck == LiteralStringRef("perpetual_storage_wiggle")) {
|
} else if (ck == LiteralStringRef("perpetual_storage_wiggle")) {
|
||||||
parse(&perpetualStorageWiggleSpeed, value);
|
parse(&perpetualStorageWiggleSpeed, value);
|
||||||
|
} else if (ck == LiteralStringRef("perpetual_storage_wiggle_locality")) {
|
||||||
|
if (!isValidPerpetualStorageWiggleLocality(value.toString())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
perpetualStorageWiggleLocality = value.toString();
|
||||||
|
} else if (ck == LiteralStringRef("storage_migration_type")) {
|
||||||
|
parse((&type), value);
|
||||||
|
storageMigrationType = (StorageMigrationType::MigrationType)type;
|
||||||
|
} else if (ck == LiteralStringRef("proxies")) {
|
||||||
|
int proxiesCount;
|
||||||
|
parse(&proxiesCount, value);
|
||||||
|
if (proxiesCount > 1) {
|
||||||
|
int derivedGrvProxyCount =
|
||||||
|
std::max(1,
|
||||||
|
std::min(CLIENT_KNOBS->DEFAULT_MAX_GRV_PROXIES,
|
||||||
|
proxiesCount / (CLIENT_KNOBS->DEFAULT_COMMIT_GRV_PROXIES_RATIO + 1)));
|
||||||
|
int derivedCommitProxyCount = proxiesCount - derivedGrvProxyCount;
|
||||||
|
if (grvProxyCount == -1) {
|
||||||
|
grvProxyCount = derivedGrvProxyCount;
|
||||||
|
}
|
||||||
|
if (commitProxyCount == -1) {
|
||||||
|
commitProxyCount = derivedCommitProxyCount;
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -245,6 +245,10 @@ struct DatabaseConfiguration {
|
||||||
|
|
||||||
// Perpetual Storage Setting
|
// Perpetual Storage Setting
|
||||||
int32_t perpetualStorageWiggleSpeed;
|
int32_t perpetualStorageWiggleSpeed;
|
||||||
|
std::string perpetualStorageWiggleLocality;
|
||||||
|
|
||||||
|
// Storage Migration Type
|
||||||
|
StorageMigrationType storageMigrationType;
|
||||||
|
|
||||||
// Excluded servers (no state should be here)
|
// Excluded servers (no state should be here)
|
||||||
bool isExcludedServer(NetworkAddressList) const;
|
bool isExcludedServer(NetworkAddressList) const;
|
||||||
|
|
|
@ -182,10 +182,10 @@ public:
|
||||||
|
|
||||||
std::pair<KeyRange, Reference<LocationInfo>> getCachedLocation(const KeyRef&, Reverse isBackward = Reverse::False);
|
std::pair<KeyRange, Reference<LocationInfo>> getCachedLocation(const KeyRef&, Reverse isBackward = Reverse::False);
|
||||||
bool getCachedLocations(const KeyRangeRef&,
|
bool getCachedLocations(const KeyRangeRef&,
|
||||||
vector<std::pair<KeyRange, Reference<LocationInfo>>>&,
|
std::vector<std::pair<KeyRange, Reference<LocationInfo>>>&,
|
||||||
int limit,
|
int limit,
|
||||||
Reverse reverse);
|
Reverse reverse);
|
||||||
Reference<LocationInfo> setCachedLocation(const KeyRangeRef&, const vector<struct StorageServerInterface>&);
|
Reference<LocationInfo> setCachedLocation(const KeyRangeRef&, const std::vector<struct StorageServerInterface>&);
|
||||||
void invalidateCache(const KeyRef&, Reverse isBackward = Reverse::False);
|
void invalidateCache(const KeyRef&, Reverse isBackward = Reverse::False);
|
||||||
void invalidateCache(const KeyRangeRef&);
|
void invalidateCache(const KeyRangeRef&);
|
||||||
|
|
||||||
|
@ -375,6 +375,8 @@ public:
|
||||||
Counter transactionsProcessBehind;
|
Counter transactionsProcessBehind;
|
||||||
Counter transactionsThrottled;
|
Counter transactionsThrottled;
|
||||||
Counter transactionsExpensiveClearCostEstCount;
|
Counter transactionsExpensiveClearCostEstCount;
|
||||||
|
Counter transactionGrvFullBatches;
|
||||||
|
Counter transactionGrvTimedOutBatches;
|
||||||
|
|
||||||
ContinuousSample<double> latencies, readLatencies, commitLatencies, GRVLatencies, mutationsPerCommit,
|
ContinuousSample<double> latencies, readLatencies, commitLatencies, GRVLatencies, mutationsPerCommit,
|
||||||
bytesPerCommit;
|
bytesPerCommit;
|
||||||
|
@ -437,6 +439,10 @@ public:
|
||||||
// Requests to the storage server will no longer be duplicated to its pair TSS.
|
// Requests to the storage server will no longer be duplicated to its pair TSS.
|
||||||
void removeTssMapping(StorageServerInterface const& ssi);
|
void removeTssMapping(StorageServerInterface const& ssi);
|
||||||
|
|
||||||
|
// used in template functions to create a transaction
|
||||||
|
using TransactionT = ReadYourWritesTransaction;
|
||||||
|
Reference<TransactionT> createTransaction();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::unordered_map<KeyRef, Reference<WatchMetadata>> watchMap;
|
std::unordered_map<KeyRef, Reference<WatchMetadata>> watchMap;
|
||||||
};
|
};
|
||||||
|
|
|
@ -42,15 +42,20 @@ struct FDBOptionInfo {
|
||||||
// be no cumulative effects from calling multiple times).
|
// be no cumulative effects from calling multiple times).
|
||||||
int defaultFor;
|
int defaultFor;
|
||||||
|
|
||||||
|
enum class ParamType { None, String, Int, Bytes };
|
||||||
|
|
||||||
|
ParamType paramType;
|
||||||
|
|
||||||
FDBOptionInfo(std::string name,
|
FDBOptionInfo(std::string name,
|
||||||
std::string comment,
|
std::string comment,
|
||||||
std::string parameterComment,
|
std::string parameterComment,
|
||||||
bool hasParameter,
|
bool hasParameter,
|
||||||
bool hidden,
|
bool hidden,
|
||||||
bool persistent,
|
bool persistent,
|
||||||
int defaultFor)
|
int defaultFor,
|
||||||
|
ParamType paramType)
|
||||||
: name(name), comment(comment), parameterComment(parameterComment), hasParameter(hasParameter), hidden(hidden),
|
: name(name), comment(comment), parameterComment(parameterComment), hasParameter(hasParameter), hidden(hidden),
|
||||||
persistent(persistent), defaultFor(defaultFor) {}
|
persistent(persistent), defaultFor(defaultFor), paramType(paramType) {}
|
||||||
|
|
||||||
FDBOptionInfo() {}
|
FDBOptionInfo() {}
|
||||||
};
|
};
|
||||||
|
@ -103,8 +108,9 @@ public:
|
||||||
typename OptionList::const_iterator end() const { return options.cend(); }
|
typename OptionList::const_iterator end() const { return options.cend(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ADD_OPTION_INFO(type, var, name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor) \
|
#define ADD_OPTION_INFO( \
|
||||||
|
type, var, name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor, paramType) \
|
||||||
type::optionInfo.insert( \
|
type::optionInfo.insert( \
|
||||||
var, FDBOptionInfo(name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor));
|
var, FDBOptionInfo(name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor, paramType));
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -864,6 +864,14 @@ struct StorageBytes {
|
||||||
used / 1e6,
|
used / 1e6,
|
||||||
temp / 1e6);
|
temp / 1e6);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void toTraceEvent(TraceEvent& e) const {
|
||||||
|
e.detail("StorageBytesUsed", used)
|
||||||
|
.detail("StorageBytesTemp", temp)
|
||||||
|
.detail("StorageBytesTotal", total)
|
||||||
|
.detail("StorageBytesFree", free)
|
||||||
|
.detail("StorageBytesAvailable", available);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
struct LogMessageVersion {
|
struct LogMessageVersion {
|
||||||
// Each message pushed into the log system has a unique, totally ordered LogMessageVersion
|
// Each message pushed into the log system has a unique, totally ordered LogMessageVersion
|
||||||
|
@ -1126,4 +1134,47 @@ inline const char* transactionPriorityToString(TransactionPriority priority, boo
|
||||||
throw internal_error();
|
throw internal_error();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct StorageMigrationType {
|
||||||
|
// These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones
|
||||||
|
// just before END.
|
||||||
|
enum MigrationType { DEFAULT = 1, UNSET = 0, DISABLED = 1, AGGRESSIVE = 2, GRADUAL = 3, END = 4 };
|
||||||
|
|
||||||
|
StorageMigrationType() : type(UNSET) {}
|
||||||
|
StorageMigrationType(MigrationType type) : type(type) {
|
||||||
|
if ((uint32_t)type >= END) {
|
||||||
|
this->type = UNSET;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
operator MigrationType() const { return MigrationType(type); }
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
serializer(ar, type);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string toString() const {
|
||||||
|
switch (type) {
|
||||||
|
case DISABLED:
|
||||||
|
return "disabled";
|
||||||
|
case AGGRESSIVE:
|
||||||
|
return "aggressive";
|
||||||
|
case GRADUAL:
|
||||||
|
return "gradual";
|
||||||
|
case UNSET:
|
||||||
|
return "unset";
|
||||||
|
default:
|
||||||
|
ASSERT(false);
|
||||||
|
}
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t type;
|
||||||
|
};
|
||||||
|
|
||||||
|
inline bool isValidPerpetualStorageWiggleLocality(std::string locality) {
|
||||||
|
int pos = locality.find(':');
|
||||||
|
// locality should be either 0 or in the format '<non_empty_string>:<non_empty_string>'
|
||||||
|
return ((pos > 0 && pos < locality.size() - 1) || locality == "0");
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2743,7 +2743,7 @@ struct StartFullBackupTaskFunc : BackupTaskFuncBase {
|
||||||
if (!backupWorkerEnabled && partitionedLog.get().present() && partitionedLog.get().get()) {
|
if (!backupWorkerEnabled && partitionedLog.get().present() && partitionedLog.get().get()) {
|
||||||
// Change configuration only when we set to use partitioned logs and
|
// Change configuration only when we set to use partitioned logs and
|
||||||
// the flag was not set before.
|
// the flag was not set before.
|
||||||
wait(success(changeConfig(cx, "backup_worker_enabled:=1", true)));
|
wait(success(ManagementAPI::changeConfig(cx.getReference(), "backup_worker_enabled:=1", true)));
|
||||||
backupWorkerEnabled = true;
|
backupWorkerEnabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3426,7 +3426,8 @@ struct RestoreLogDataTaskFunc : RestoreFileTaskFuncBase {
|
||||||
|
|
||||||
state Key mutationLogPrefix = restore.mutationLogPrefix();
|
state Key mutationLogPrefix = restore.mutationLogPrefix();
|
||||||
state Reference<IAsyncFile> inFile = wait(bc->readFile(logFile.fileName));
|
state Reference<IAsyncFile> inFile = wait(bc->readFile(logFile.fileName));
|
||||||
state Standalone<VectorRef<KeyValueRef>> dataOriginal = wait(decodeMutationLogFileBlock(inFile, readOffset, readLen));
|
state Standalone<VectorRef<KeyValueRef>> dataOriginal =
|
||||||
|
wait(decodeMutationLogFileBlock(inFile, readOffset, readLen));
|
||||||
|
|
||||||
// Filter the KV pairs extracted from the log file block to remove any records known to not be needed for this
|
// Filter the KV pairs extracted from the log file block to remove any records known to not be needed for this
|
||||||
// restore based on the restore range set.
|
// restore based on the restore range set.
|
||||||
|
|
|
@ -73,17 +73,13 @@ class SampleSender : public std::enable_shared_from_this<SampleSender<Protocol,
|
||||||
}
|
}
|
||||||
|
|
||||||
void send(boost::asio::ip::tcp::socket& socket, std::shared_ptr<Buf> const& buf) {
|
void send(boost::asio::ip::tcp::socket& socket, std::shared_ptr<Buf> const& buf) {
|
||||||
boost::asio::async_write(
|
boost::asio::async_write(socket,
|
||||||
socket,
|
boost::asio::const_buffer(buf->data, buf->size),
|
||||||
boost::asio::const_buffer(buf->data, buf->size),
|
[buf, this](auto const& ec, size_t) { this->sendCompletionHandler(ec); });
|
||||||
[buf, this](auto const& ec, size_t) {
|
|
||||||
this->sendCompletionHandler(ec);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
void send(boost::asio::ip::udp::socket& socket, std::shared_ptr<Buf> const& buf) {
|
void send(boost::asio::ip::udp::socket& socket, std::shared_ptr<Buf> const& buf) {
|
||||||
socket.async_send(
|
socket.async_send(boost::asio::const_buffer(buf->data, buf->size),
|
||||||
boost::asio::const_buffer(buf->data, buf->size),
|
[buf, this](auto const& ec, size_t) { this->sendCompletionHandler(ec); });
|
||||||
[buf, this](auto const& ec, size_t) { this->sendCompletionHandler(ec); });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void sendNext() {
|
void sendNext() {
|
||||||
|
@ -122,18 +118,16 @@ class SampleSender : public std::enable_shared_from_this<SampleSender<Protocol,
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SampleSender(Socket& socket, Callback const& callback, std::shared_ptr<Sample> const& sample)
|
SampleSender(Socket& socket, Callback const& callback, std::shared_ptr<Sample> const& sample)
|
||||||
: socket(socket),
|
: socket(socket), callback(callback), iter(sample->data.begin()), end(sample->data.end()), sample_(sample) {
|
||||||
callback(callback),
|
sendNext();
|
||||||
iter(sample->data.begin()),
|
}
|
||||||
end(sample->data.end()),
|
|
||||||
sample_(sample) {
|
|
||||||
sendNext();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Sample function to make instanciation of SampleSender easier
|
// Sample function to make instanciation of SampleSender easier
|
||||||
template <class Protocol, class Callback>
|
template <class Protocol, class Callback>
|
||||||
std::shared_ptr<SampleSender<Protocol, Callback>> makeSampleSender(typename Protocol::socket& socket, Callback const& callback, std::shared_ptr<Sample> const& sample) {
|
std::shared_ptr<SampleSender<Protocol, Callback>> makeSampleSender(typename Protocol::socket& socket,
|
||||||
|
Callback const& callback,
|
||||||
|
std::shared_ptr<Sample> const& sample) {
|
||||||
return std::make_shared<SampleSender<Protocol, Callback>>(socket, callback, sample);
|
return std::make_shared<SampleSender<Protocol, Callback>>(socket, callback, sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,11 +158,11 @@ struct FluentDSocketImpl : FluentDSocket, std::enable_shared_from_this<FluentDSo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void sendImpl(std::shared_ptr<Sample> const& sample) {
|
void sendImpl(std::shared_ptr<Sample> const& sample) {
|
||||||
makeSampleSender<Protocol>(socket, [self = this->shared_from_this()](boost::system::error_code const& ec){
|
makeSampleSender<Protocol>(
|
||||||
self->sendCompletionHandler(ec);
|
socket,
|
||||||
}, sample);
|
[self = this->shared_from_this()](boost::system::error_code const& ec) { self->sendCompletionHandler(ec); },
|
||||||
|
sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
void send(std::shared_ptr<Sample> const& sample) override {
|
void send(std::shared_ptr<Sample> const& sample) override {
|
||||||
|
|
|
@ -88,6 +88,14 @@ public:
|
||||||
|
|
||||||
virtual void addref() = 0;
|
virtual void addref() = 0;
|
||||||
virtual void delref() = 0;
|
virtual void delref() = 0;
|
||||||
|
|
||||||
|
// used in template functions as returned Future type
|
||||||
|
template <class Type>
|
||||||
|
using FutureT = ThreadFuture<Type>;
|
||||||
|
// internal use only, return true by default
|
||||||
|
// Only if it's a MultiVersionTransaction and the underlying transaction handler is null,
|
||||||
|
// it will return false
|
||||||
|
virtual bool isValid() { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// An interface that represents a connection to a cluster made by a client
|
// An interface that represents a connection to a cluster made by a client
|
||||||
|
@ -115,6 +123,9 @@ public:
|
||||||
virtual ThreadFuture<Void> forceRecoveryWithDataLoss(const StringRef& dcid) = 0;
|
virtual ThreadFuture<Void> forceRecoveryWithDataLoss(const StringRef& dcid) = 0;
|
||||||
// Management API, create snapshot
|
// Management API, create snapshot
|
||||||
virtual ThreadFuture<Void> createSnapshot(const StringRef& uid, const StringRef& snapshot_command) = 0;
|
virtual ThreadFuture<Void> createSnapshot(const StringRef& uid, const StringRef& snapshot_command) = 0;
|
||||||
|
|
||||||
|
// used in template functions as the Transaction type that can be created through createTransaction()
|
||||||
|
using TransactionT = ITransaction;
|
||||||
};
|
};
|
||||||
|
|
||||||
// An interface that presents the top-level FDB client API as exposed through the C bindings
|
// An interface that presents the top-level FDB client API as exposed through the C bindings
|
||||||
|
|
|
@ -88,3 +88,14 @@ IKnobCollection const& IKnobCollection::getGlobalKnobCollection() {
|
||||||
IKnobCollection& IKnobCollection::getMutableGlobalKnobCollection() {
|
IKnobCollection& IKnobCollection::getMutableGlobalKnobCollection() {
|
||||||
return *globalKnobCollection;
|
return *globalKnobCollection;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ConfigMutationRef IKnobCollection::createSetMutation(Arena arena, KeyRef key, ValueRef value) {
|
||||||
|
ConfigKey configKey = ConfigKeyRef::decodeKey(key);
|
||||||
|
auto knobValue =
|
||||||
|
IKnobCollection::parseKnobValue(configKey.knobName.toString(), value.toString(), IKnobCollection::Type::TEST);
|
||||||
|
return ConfigMutationRef(arena, configKey, knobValue.contents());
|
||||||
|
}
|
||||||
|
|
||||||
|
ConfigMutationRef IKnobCollection::createClearMutation(Arena arena, KeyRef key) {
|
||||||
|
return ConfigMutationRef(arena, ConfigKeyRef::decodeKey(key), {});
|
||||||
|
}
|
||||||
|
|
|
@ -54,14 +54,18 @@ public:
|
||||||
virtual ClientKnobs const& getClientKnobs() const = 0;
|
virtual ClientKnobs const& getClientKnobs() const = 0;
|
||||||
virtual ServerKnobs const& getServerKnobs() const = 0;
|
virtual ServerKnobs const& getServerKnobs() const = 0;
|
||||||
virtual class TestKnobs const& getTestKnobs() const = 0;
|
virtual class TestKnobs const& getTestKnobs() const = 0;
|
||||||
|
virtual void clearTestKnobs() {}
|
||||||
virtual Optional<KnobValue> tryParseKnobValue(std::string const& knobName, std::string const& knobValue) const = 0;
|
virtual Optional<KnobValue> tryParseKnobValue(std::string const& knobName, std::string const& knobValue) const = 0;
|
||||||
KnobValue parseKnobValue(std::string const& knobName, std::string const& knobValue) const;
|
KnobValue parseKnobValue(std::string const& knobName, std::string const& knobValue) const;
|
||||||
static KnobValue parseKnobValue(std::string const& knobName, std::string const& knobValue, Type);
|
static KnobValue parseKnobValue(std::string const& knobName, std::string const& knobValue, Type);
|
||||||
// Result indicates whether or not knob was successfully set:
|
// Result indicates whether or not knob was successfully set:
|
||||||
virtual bool trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) = 0;
|
virtual bool trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) = 0;
|
||||||
void setKnob(std::string const& knobName, KnobValueRef const& knobValue);
|
void setKnob(std::string const& knobName, KnobValueRef const& knobValue);
|
||||||
|
virtual bool isAtomic(std::string const& knobName) const = 0;
|
||||||
|
|
||||||
static void setGlobalKnobCollection(Type, Randomize, IsSimulated);
|
static void setGlobalKnobCollection(Type, Randomize, IsSimulated);
|
||||||
static IKnobCollection const& getGlobalKnobCollection();
|
static IKnobCollection const& getGlobalKnobCollection();
|
||||||
static IKnobCollection& getMutableGlobalKnobCollection();
|
static IKnobCollection& getMutableGlobalKnobCollection();
|
||||||
|
static ConfigMutationRef createSetMutation(Arena, KeyRef, ValueRef);
|
||||||
|
static ConfigMutationRef createClearMutation(Arena, KeyRef);
|
||||||
};
|
};
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "fdbclient/IClientApi.h"
|
||||||
#include "fdbclient/ReadYourWrites.h"
|
#include "fdbclient/ReadYourWrites.h"
|
||||||
#include "fdbclient/Subspace.h"
|
#include "fdbclient/Subspace.h"
|
||||||
#include "flow/genericactors.actor.h"
|
#include "flow/genericactors.actor.h"
|
||||||
|
@ -322,6 +323,10 @@ public:
|
||||||
return tr->clear(space.pack(Codec<KeyType>::pack(key)));
|
return tr->clear(space.pack(Codec<KeyType>::pack(key)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void erase(Reference<ITransaction> tr, KeyType const& key) {
|
||||||
|
return tr->clear(space.pack(Codec<KeyType>::pack(key)));
|
||||||
|
}
|
||||||
|
|
||||||
void erase(Reference<ReadYourWritesTransaction> tr, KeyType const& begin, KeyType const& end) {
|
void erase(Reference<ReadYourWritesTransaction> tr, KeyType const& begin, KeyType const& end) {
|
||||||
return tr->clear(KeyRangeRef(space.pack(Codec<KeyType>::pack(begin)), space.pack(Codec<KeyType>::pack(end))));
|
return tr->clear(KeyRangeRef(space.pack(Codec<KeyType>::pack(begin)), space.pack(Codec<KeyType>::pack(end))));
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include "fdbclient/ReadYourWrites.h"
|
#include "fdbclient/ReadYourWrites.h"
|
||||||
#include "flow/actorcompiler.h" // has to be last include
|
#include "flow/actorcompiler.h" // has to be last include
|
||||||
|
|
||||||
void KeyRangeActorMap::getRangesAffectedByInsertion(const KeyRangeRef& keys, vector<KeyRange>& affectedRanges) {
|
void KeyRangeActorMap::getRangesAffectedByInsertion(const KeyRangeRef& keys, std::vector<KeyRange>& affectedRanges) {
|
||||||
auto s = map.rangeContaining(keys.begin);
|
auto s = map.rangeContaining(keys.begin);
|
||||||
if (s.begin() != keys.begin && s.value().isValid() && !s.value().isReady())
|
if (s.begin() != keys.begin && s.value().isValid() && !s.value().isReady())
|
||||||
affectedRanges.push_back(KeyRangeRef(s.begin(), keys.begin));
|
affectedRanges.push_back(KeyRangeRef(s.begin(), keys.begin));
|
||||||
|
@ -176,7 +176,7 @@ static Future<Void> krmSetRangeCoalescing_(Transaction* tr,
|
||||||
state KeyRange maxWithPrefix =
|
state KeyRange maxWithPrefix =
|
||||||
KeyRangeRef(mapPrefix.toString() + maxRange.begin.toString(), mapPrefix.toString() + maxRange.end.toString());
|
KeyRangeRef(mapPrefix.toString() + maxRange.begin.toString(), mapPrefix.toString() + maxRange.end.toString());
|
||||||
|
|
||||||
state vector<Future<RangeResult>> keys;
|
state std::vector<Future<RangeResult>> keys;
|
||||||
keys.push_back(
|
keys.push_back(
|
||||||
tr->getRange(lastLessThan(withPrefix.begin), firstGreaterOrEqual(withPrefix.begin), 1, Snapshot::True));
|
tr->getRange(lastLessThan(withPrefix.begin), firstGreaterOrEqual(withPrefix.begin), 1, Snapshot::True));
|
||||||
keys.push_back(
|
keys.push_back(
|
||||||
|
|
|
@ -111,7 +111,7 @@ public:
|
||||||
|
|
||||||
class KeyRangeActorMap {
|
class KeyRangeActorMap {
|
||||||
public:
|
public:
|
||||||
void getRangesAffectedByInsertion(const KeyRangeRef& keys, vector<KeyRange>& affectedRanges);
|
void getRangesAffectedByInsertion(const KeyRangeRef& keys, std::vector<KeyRange>& affectedRanges);
|
||||||
void insert(const KeyRangeRef& keys, const Future<Void>& value) { map.insert(keys, value); }
|
void insert(const KeyRangeRef& keys, const Future<Void>& value) { map.insert(keys, value); }
|
||||||
void cancel(const KeyRangeRef& keys) { insert(keys, Future<Void>()); }
|
void cancel(const KeyRangeRef& keys) { insert(keys, Future<Void>()); }
|
||||||
bool liveActorAt(const KeyRef& key) {
|
bool liveActorAt(const KeyRef& key) {
|
||||||
|
|
|
@ -150,6 +150,28 @@ std::map<std::string, std::string> configForToken(std::string const& mode) {
|
||||||
}
|
}
|
||||||
out[p + key] = value;
|
out[p + key] = value;
|
||||||
}
|
}
|
||||||
|
if (key == "perpetual_storage_wiggle_locality") {
|
||||||
|
if (!isValidPerpetualStorageWiggleLocality(value)) {
|
||||||
|
printf("Error: perpetual_storage_wiggle_locality should be in <locality_key>:<locality_value> "
|
||||||
|
"format or enter 0 to disable the locality match for wiggling.\n");
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
out[p + key] = value;
|
||||||
|
}
|
||||||
|
if (key == "storage_migration_type") {
|
||||||
|
StorageMigrationType type;
|
||||||
|
if (value == "disabled") {
|
||||||
|
type = StorageMigrationType::DISABLED;
|
||||||
|
} else if (value == "aggressive") {
|
||||||
|
type = StorageMigrationType::AGGRESSIVE;
|
||||||
|
} else if (value == "gradual") {
|
||||||
|
type = StorageMigrationType::GRADUAL;
|
||||||
|
} else {
|
||||||
|
printf("Error: Only disabled|aggressive|gradual are valid for storage_migration_mode.\n");
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
out[p + key] = format("%d", type);
|
||||||
|
}
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,266 +435,6 @@ ACTOR Future<DatabaseConfiguration> getDatabaseConfiguration(Database cx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<ConfigurationResult> changeConfig(Database cx, std::map<std::string, std::string> m, bool force) {
|
|
||||||
state StringRef initIdKey = LiteralStringRef("\xff/init_id");
|
|
||||||
state Transaction tr(cx);
|
|
||||||
|
|
||||||
if (!m.size()) {
|
|
||||||
return ConfigurationResult::NO_OPTIONS_PROVIDED;
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure we have essential configuration options
|
|
||||||
std::string initKey = configKeysPrefix.toString() + "initialized";
|
|
||||||
state bool creating = m.count(initKey) != 0;
|
|
||||||
state Optional<UID> locked;
|
|
||||||
{
|
|
||||||
auto iter = m.find(databaseLockedKey.toString());
|
|
||||||
if (iter != m.end()) {
|
|
||||||
if (!creating) {
|
|
||||||
return ConfigurationResult::LOCKED_NOT_NEW;
|
|
||||||
}
|
|
||||||
locked = UID::fromString(iter->second);
|
|
||||||
m.erase(iter);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (creating) {
|
|
||||||
m[initIdKey.toString()] = deterministicRandom()->randomUniqueID().toString();
|
|
||||||
if (!isCompleteConfiguration(m)) {
|
|
||||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
state Future<Void> tooLong = delay(60);
|
|
||||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
|
||||||
state bool oldReplicationUsesDcId = false;
|
|
||||||
loop {
|
|
||||||
try {
|
|
||||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
||||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
|
||||||
|
|
||||||
if (!creating && !force) {
|
|
||||||
state Future<RangeResult> fConfig = tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY);
|
|
||||||
state Future<vector<ProcessData>> fWorkers = getWorkers(&tr);
|
|
||||||
wait(success(fConfig) || tooLong);
|
|
||||||
|
|
||||||
if (!fConfig.isReady()) {
|
|
||||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fConfig.isReady()) {
|
|
||||||
ASSERT(fConfig.get().size() < CLIENT_KNOBS->TOO_MANY);
|
|
||||||
state DatabaseConfiguration oldConfig;
|
|
||||||
oldConfig.fromKeyValues((VectorRef<KeyValueRef>)fConfig.get());
|
|
||||||
state DatabaseConfiguration newConfig = oldConfig;
|
|
||||||
for (auto kv : m) {
|
|
||||||
newConfig.set(kv.first, kv.second);
|
|
||||||
}
|
|
||||||
if (!newConfig.isValid()) {
|
|
||||||
return ConfigurationResult::INVALID_CONFIGURATION;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (newConfig.tLogPolicy->attributeKeys().count("dcid") && newConfig.regions.size() > 0) {
|
|
||||||
return ConfigurationResult::REGION_REPLICATION_MISMATCH;
|
|
||||||
}
|
|
||||||
|
|
||||||
oldReplicationUsesDcId =
|
|
||||||
oldReplicationUsesDcId || oldConfig.tLogPolicy->attributeKeys().count("dcid");
|
|
||||||
|
|
||||||
if (oldConfig.usableRegions != newConfig.usableRegions) {
|
|
||||||
// cannot change region configuration
|
|
||||||
std::map<Key, int32_t> dcId_priority;
|
|
||||||
for (auto& it : newConfig.regions) {
|
|
||||||
dcId_priority[it.dcId] = it.priority;
|
|
||||||
}
|
|
||||||
for (auto& it : oldConfig.regions) {
|
|
||||||
if (!dcId_priority.count(it.dcId) || dcId_priority[it.dcId] != it.priority) {
|
|
||||||
return ConfigurationResult::REGIONS_CHANGED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// must only have one region with priority >= 0
|
|
||||||
int activeRegionCount = 0;
|
|
||||||
for (auto& it : newConfig.regions) {
|
|
||||||
if (it.priority >= 0) {
|
|
||||||
activeRegionCount++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (activeRegionCount > 1) {
|
|
||||||
return ConfigurationResult::MULTIPLE_ACTIVE_REGIONS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
state Future<RangeResult> fServerList = (newConfig.regions.size())
|
|
||||||
? tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY)
|
|
||||||
: Future<RangeResult>();
|
|
||||||
|
|
||||||
if (newConfig.usableRegions == 2) {
|
|
||||||
if (oldReplicationUsesDcId) {
|
|
||||||
state Future<RangeResult> fLocalityList =
|
|
||||||
tr.getRange(tagLocalityListKeys, CLIENT_KNOBS->TOO_MANY);
|
|
||||||
wait(success(fLocalityList) || tooLong);
|
|
||||||
if (!fLocalityList.isReady()) {
|
|
||||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
|
||||||
}
|
|
||||||
RangeResult localityList = fLocalityList.get();
|
|
||||||
ASSERT(!localityList.more && localityList.size() < CLIENT_KNOBS->TOO_MANY);
|
|
||||||
|
|
||||||
std::set<Key> localityDcIds;
|
|
||||||
for (auto& s : localityList) {
|
|
||||||
auto dc = decodeTagLocalityListKey(s.key);
|
|
||||||
if (dc.present()) {
|
|
||||||
localityDcIds.insert(dc.get());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto& it : newConfig.regions) {
|
|
||||||
if (localityDcIds.count(it.dcId) == 0) {
|
|
||||||
return ConfigurationResult::DCID_MISSING;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// all regions with priority >= 0 must be fully replicated
|
|
||||||
state std::vector<Future<Optional<Value>>> replicasFutures;
|
|
||||||
for (auto& it : newConfig.regions) {
|
|
||||||
if (it.priority >= 0) {
|
|
||||||
replicasFutures.push_back(tr.get(datacenterReplicasKeyFor(it.dcId)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wait(waitForAll(replicasFutures) || tooLong);
|
|
||||||
|
|
||||||
for (auto& it : replicasFutures) {
|
|
||||||
if (!it.isReady()) {
|
|
||||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
|
||||||
}
|
|
||||||
if (!it.get().present()) {
|
|
||||||
return ConfigurationResult::REGION_NOT_FULLY_REPLICATED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (newConfig.regions.size()) {
|
|
||||||
// all storage servers must be in one of the regions
|
|
||||||
wait(success(fServerList) || tooLong);
|
|
||||||
if (!fServerList.isReady()) {
|
|
||||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
|
||||||
}
|
|
||||||
RangeResult serverList = fServerList.get();
|
|
||||||
ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY);
|
|
||||||
|
|
||||||
std::set<Key> newDcIds;
|
|
||||||
for (auto& it : newConfig.regions) {
|
|
||||||
newDcIds.insert(it.dcId);
|
|
||||||
}
|
|
||||||
std::set<Optional<Key>> missingDcIds;
|
|
||||||
for (auto& s : serverList) {
|
|
||||||
auto ssi = decodeServerListValue(s.value);
|
|
||||||
if (!ssi.locality.dcId().present() || !newDcIds.count(ssi.locality.dcId().get())) {
|
|
||||||
missingDcIds.insert(ssi.locality.dcId());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (missingDcIds.size() > (oldReplicationUsesDcId ? 1 : 0)) {
|
|
||||||
return ConfigurationResult::STORAGE_IN_UNKNOWN_DCID;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wait(success(fWorkers) || tooLong);
|
|
||||||
if (!fWorkers.isReady()) {
|
|
||||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (newConfig.regions.size()) {
|
|
||||||
std::map<Optional<Key>, std::set<Optional<Key>>> dcId_zoneIds;
|
|
||||||
for (auto& it : fWorkers.get()) {
|
|
||||||
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
|
||||||
dcId_zoneIds[it.locality.dcId()].insert(it.locality.zoneId());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (auto& region : newConfig.regions) {
|
|
||||||
if (dcId_zoneIds[region.dcId].size() <
|
|
||||||
std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
|
||||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
|
||||||
}
|
|
||||||
if (region.satelliteTLogReplicationFactor > 0 && region.priority >= 0) {
|
|
||||||
int totalSatelliteProcesses = 0;
|
|
||||||
for (auto& sat : region.satellites) {
|
|
||||||
totalSatelliteProcesses += dcId_zoneIds[sat.dcId].size();
|
|
||||||
}
|
|
||||||
if (totalSatelliteProcesses < region.satelliteTLogReplicationFactor) {
|
|
||||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
std::set<Optional<Key>> zoneIds;
|
|
||||||
for (auto& it : fWorkers.get()) {
|
|
||||||
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
|
||||||
zoneIds.insert(it.locality.zoneId());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (zoneIds.size() < std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
|
||||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (creating) {
|
|
||||||
tr.setOption(FDBTransactionOptions::INITIALIZE_NEW_DATABASE);
|
|
||||||
tr.addReadConflictRange(singleKeyRange(initIdKey));
|
|
||||||
} else if (m.size()) {
|
|
||||||
// might be used in an emergency transaction, so make sure it is retry-self-conflicting and
|
|
||||||
// CAUSAL_WRITE_RISKY
|
|
||||||
tr.setOption(FDBTransactionOptions::CAUSAL_WRITE_RISKY);
|
|
||||||
tr.addReadConflictRange(singleKeyRange(m.begin()->first));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (locked.present()) {
|
|
||||||
ASSERT(creating);
|
|
||||||
tr.atomicOp(databaseLockedKey,
|
|
||||||
BinaryWriter::toValue(locked.get(), Unversioned())
|
|
||||||
.withPrefix(LiteralStringRef("0123456789"))
|
|
||||||
.withSuffix(LiteralStringRef("\x00\x00\x00\x00")),
|
|
||||||
MutationRef::SetVersionstampedValue);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto i = m.begin(); i != m.end(); ++i) {
|
|
||||||
tr.set(StringRef(i->first), StringRef(i->second));
|
|
||||||
}
|
|
||||||
|
|
||||||
tr.addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
|
||||||
tr.set(moveKeysLockOwnerKey, versionKey);
|
|
||||||
|
|
||||||
wait(tr.commit());
|
|
||||||
break;
|
|
||||||
} catch (Error& e) {
|
|
||||||
state Error e1(e);
|
|
||||||
if ((e.code() == error_code_not_committed || e.code() == error_code_transaction_too_old) && creating) {
|
|
||||||
// The database now exists. Determine whether we created it or it was already existing/created by
|
|
||||||
// someone else. The latter is an error.
|
|
||||||
tr.reset();
|
|
||||||
loop {
|
|
||||||
try {
|
|
||||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
||||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
|
||||||
|
|
||||||
Optional<Value> v = wait(tr.get(initIdKey));
|
|
||||||
if (v != m[initIdKey.toString()])
|
|
||||||
return ConfigurationResult::DATABASE_ALREADY_CREATED;
|
|
||||||
else
|
|
||||||
return ConfigurationResult::DATABASE_CREATED;
|
|
||||||
} catch (Error& e2) {
|
|
||||||
wait(tr.onError(e2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wait(tr.onError(e1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ConfigurationResult::SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
ConfigureAutoResult parseConfig(StatusObject const& status) {
|
ConfigureAutoResult parseConfig(StatusObject const& status) {
|
||||||
ConfigureAutoResult result;
|
ConfigureAutoResult result;
|
||||||
StatusObjectReader statusObj(status);
|
StatusObjectReader statusObj(status);
|
||||||
|
@ -942,97 +704,7 @@ ConfigureAutoResult parseConfig(StatusObject const& status) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<ConfigurationResult> autoConfig(Database cx, ConfigureAutoResult conf) {
|
ACTOR Future<std::vector<ProcessData>> getWorkers(Transaction* tr) {
|
||||||
state Transaction tr(cx);
|
|
||||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
|
||||||
|
|
||||||
if (!conf.address_class.size())
|
|
||||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION; // FIXME: correct return type
|
|
||||||
|
|
||||||
loop {
|
|
||||||
try {
|
|
||||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
||||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
||||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
|
||||||
|
|
||||||
vector<ProcessData> workers = wait(getWorkers(&tr));
|
|
||||||
std::map<NetworkAddress, Optional<Standalone<StringRef>>> address_processId;
|
|
||||||
for (auto& w : workers) {
|
|
||||||
address_processId[w.address] = w.locality.processId();
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto& it : conf.address_class) {
|
|
||||||
if (it.second.classSource() == ProcessClass::CommandLineSource) {
|
|
||||||
tr.clear(processClassKeyFor(address_processId[it.first].get()));
|
|
||||||
} else {
|
|
||||||
tr.set(processClassKeyFor(address_processId[it.first].get()), processClassValue(it.second));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (conf.address_class.size())
|
|
||||||
tr.set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
|
|
||||||
|
|
||||||
if (conf.auto_logs != conf.old_logs)
|
|
||||||
tr.set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
|
|
||||||
|
|
||||||
if (conf.auto_commit_proxies != conf.old_commit_proxies)
|
|
||||||
tr.set(configKeysPrefix.toString() + "auto_commit_proxies", format("%d", conf.auto_commit_proxies));
|
|
||||||
|
|
||||||
if (conf.auto_grv_proxies != conf.old_grv_proxies)
|
|
||||||
tr.set(configKeysPrefix.toString() + "auto_grv_proxies", format("%d", conf.auto_grv_proxies));
|
|
||||||
|
|
||||||
if (conf.auto_resolvers != conf.old_resolvers)
|
|
||||||
tr.set(configKeysPrefix.toString() + "auto_resolvers", format("%d", conf.auto_resolvers));
|
|
||||||
|
|
||||||
if (conf.auto_replication != conf.old_replication) {
|
|
||||||
std::vector<StringRef> modes;
|
|
||||||
modes.push_back(conf.auto_replication);
|
|
||||||
std::map<std::string, std::string> m;
|
|
||||||
auto r = buildConfiguration(modes, m);
|
|
||||||
if (r != ConfigurationResult::SUCCESS)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
for (auto& kv : m)
|
|
||||||
tr.set(kv.first, kv.second);
|
|
||||||
}
|
|
||||||
|
|
||||||
tr.addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
|
||||||
tr.set(moveKeysLockOwnerKey, versionKey);
|
|
||||||
|
|
||||||
wait(tr.commit());
|
|
||||||
return ConfigurationResult::SUCCESS;
|
|
||||||
} catch (Error& e) {
|
|
||||||
wait(tr.onError(e));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<ConfigurationResult> changeConfig(Database const& cx,
|
|
||||||
std::vector<StringRef> const& modes,
|
|
||||||
Optional<ConfigureAutoResult> const& conf,
|
|
||||||
bool force) {
|
|
||||||
if (modes.size() && modes[0] == LiteralStringRef("auto") && conf.present()) {
|
|
||||||
return autoConfig(cx, conf.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
std::map<std::string, std::string> m;
|
|
||||||
auto r = buildConfiguration(modes, m);
|
|
||||||
if (r != ConfigurationResult::SUCCESS)
|
|
||||||
return r;
|
|
||||||
return changeConfig(cx, m, force);
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<ConfigurationResult> changeConfig(Database const& cx, std::string const& modes, bool force) {
|
|
||||||
TraceEvent("ChangeConfig").detail("Mode", modes);
|
|
||||||
std::map<std::string, std::string> m;
|
|
||||||
auto r = buildConfiguration(modes, m);
|
|
||||||
if (r != ConfigurationResult::SUCCESS)
|
|
||||||
return r;
|
|
||||||
return changeConfig(cx, m, force);
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<vector<ProcessData>> getWorkers(Transaction* tr) {
|
|
||||||
state Future<RangeResult> processClasses = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY);
|
state Future<RangeResult> processClasses = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
state Future<RangeResult> processData = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY);
|
state Future<RangeResult> processData = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
@ -1063,14 +735,14 @@ ACTOR Future<vector<ProcessData>> getWorkers(Transaction* tr) {
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<vector<ProcessData>> getWorkers(Database cx) {
|
ACTOR Future<std::vector<ProcessData>> getWorkers(Database cx) {
|
||||||
state Transaction tr(cx);
|
state Transaction tr(cx);
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); // necessary?
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); // necessary?
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
vector<ProcessData> workers = wait(getWorkers(&tr));
|
std::vector<ProcessData> workers = wait(getWorkers(&tr));
|
||||||
return workers;
|
return workers;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
wait(tr.onError(e));
|
wait(tr.onError(e));
|
||||||
|
@ -1148,7 +820,7 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<Future<Optional<LeaderInfo>>> leaderServers;
|
std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||||
ClientCoordinators coord(Reference<ClusterConnectionFile>(new ClusterConnectionFile(conn)));
|
ClientCoordinators coord(Reference<ClusterConnectionFile>(new ClusterConnectionFile(conn)));
|
||||||
|
|
||||||
leaderServers.reserve(coord.clientLeaderServers.size());
|
leaderServers.reserve(coord.clientLeaderServers.size());
|
||||||
|
@ -1234,7 +906,7 @@ ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChan
|
||||||
TEST(old.clusterKeyName() != conn.clusterKeyName()); // Quorum change with new name
|
TEST(old.clusterKeyName() != conn.clusterKeyName()); // Quorum change with new name
|
||||||
TEST(old.clusterKeyName() == conn.clusterKeyName()); // Quorum change with unchanged name
|
TEST(old.clusterKeyName() == conn.clusterKeyName()); // Quorum change with unchanged name
|
||||||
|
|
||||||
state vector<Future<Optional<LeaderInfo>>> leaderServers;
|
state std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||||
state ClientCoordinators coord(Reference<ClusterConnectionFile>(new ClusterConnectionFile(conn)));
|
state ClientCoordinators coord(Reference<ClusterConnectionFile>(new ClusterConnectionFile(conn)));
|
||||||
// check if allowed to modify the cluster descriptor
|
// check if allowed to modify the cluster descriptor
|
||||||
if (!change->getDesiredClusterKeyName().empty()) {
|
if (!change->getDesiredClusterKeyName().empty()) {
|
||||||
|
@ -1266,24 +938,24 @@ ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChan
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SpecifiedQuorumChange final : IQuorumChange {
|
struct SpecifiedQuorumChange final : IQuorumChange {
|
||||||
vector<NetworkAddress> desired;
|
std::vector<NetworkAddress> desired;
|
||||||
explicit SpecifiedQuorumChange(vector<NetworkAddress> const& desired) : desired(desired) {}
|
explicit SpecifiedQuorumChange(std::vector<NetworkAddress> const& desired) : desired(desired) {}
|
||||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||||
vector<NetworkAddress> oldCoordinators,
|
std::vector<NetworkAddress> oldCoordinators,
|
||||||
Reference<ClusterConnectionFile>,
|
Reference<ClusterConnectionFile>,
|
||||||
CoordinatorsResult&) override {
|
CoordinatorsResult&) override {
|
||||||
return desired;
|
return desired;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Reference<IQuorumChange> specifiedQuorumChange(vector<NetworkAddress> const& addresses) {
|
Reference<IQuorumChange> specifiedQuorumChange(std::vector<NetworkAddress> const& addresses) {
|
||||||
return Reference<IQuorumChange>(new SpecifiedQuorumChange(addresses));
|
return Reference<IQuorumChange>(new SpecifiedQuorumChange(addresses));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct NoQuorumChange final : IQuorumChange {
|
struct NoQuorumChange final : IQuorumChange {
|
||||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||||
vector<NetworkAddress> oldCoordinators,
|
std::vector<NetworkAddress> oldCoordinators,
|
||||||
Reference<ClusterConnectionFile>,
|
Reference<ClusterConnectionFile>,
|
||||||
CoordinatorsResult&) override {
|
CoordinatorsResult&) override {
|
||||||
return oldCoordinators;
|
return oldCoordinators;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1296,10 +968,10 @@ struct NameQuorumChange final : IQuorumChange {
|
||||||
Reference<IQuorumChange> otherChange;
|
Reference<IQuorumChange> otherChange;
|
||||||
explicit NameQuorumChange(std::string const& newName, Reference<IQuorumChange> const& otherChange)
|
explicit NameQuorumChange(std::string const& newName, Reference<IQuorumChange> const& otherChange)
|
||||||
: newName(newName), otherChange(otherChange) {}
|
: newName(newName), otherChange(otherChange) {}
|
||||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||||
vector<NetworkAddress> oldCoordinators,
|
std::vector<NetworkAddress> oldCoordinators,
|
||||||
Reference<ClusterConnectionFile> cf,
|
Reference<ClusterConnectionFile> cf,
|
||||||
CoordinatorsResult& t) override {
|
CoordinatorsResult& t) override {
|
||||||
return otherChange->getDesiredCoordinators(tr, oldCoordinators, cf, t);
|
return otherChange->getDesiredCoordinators(tr, oldCoordinators, cf, t);
|
||||||
}
|
}
|
||||||
std::string getDesiredClusterKeyName() const override { return newName; }
|
std::string getDesiredClusterKeyName() const override { return newName; }
|
||||||
|
@ -1312,10 +984,10 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
int desired;
|
int desired;
|
||||||
explicit AutoQuorumChange(int desired) : desired(desired) {}
|
explicit AutoQuorumChange(int desired) : desired(desired) {}
|
||||||
|
|
||||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||||
vector<NetworkAddress> oldCoordinators,
|
std::vector<NetworkAddress> oldCoordinators,
|
||||||
Reference<ClusterConnectionFile> ccf,
|
Reference<ClusterConnectionFile> ccf,
|
||||||
CoordinatorsResult& err) override {
|
CoordinatorsResult& err) override {
|
||||||
return getDesired(Reference<AutoQuorumChange>::addRef(this), tr, oldCoordinators, ccf, &err);
|
return getDesired(Reference<AutoQuorumChange>::addRef(this), tr, oldCoordinators, ccf, &err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1333,7 +1005,7 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
|
|
||||||
ACTOR static Future<bool> isAcceptable(AutoQuorumChange* self,
|
ACTOR static Future<bool> isAcceptable(AutoQuorumChange* self,
|
||||||
Transaction* tr,
|
Transaction* tr,
|
||||||
vector<NetworkAddress> oldCoordinators,
|
std::vector<NetworkAddress> oldCoordinators,
|
||||||
Reference<ClusterConnectionFile> ccf,
|
Reference<ClusterConnectionFile> ccf,
|
||||||
int desiredCount,
|
int desiredCount,
|
||||||
std::set<AddressExclusion>* excluded) {
|
std::set<AddressExclusion>* excluded) {
|
||||||
|
@ -1345,14 +1017,14 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
|
|
||||||
// Check availability
|
// Check availability
|
||||||
ClientCoordinators coord(ccf);
|
ClientCoordinators coord(ccf);
|
||||||
vector<Future<Optional<LeaderInfo>>> leaderServers;
|
std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||||
leaderServers.reserve(coord.clientLeaderServers.size());
|
leaderServers.reserve(coord.clientLeaderServers.size());
|
||||||
for (int i = 0; i < coord.clientLeaderServers.size(); i++) {
|
for (int i = 0; i < coord.clientLeaderServers.size(); i++) {
|
||||||
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader,
|
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader,
|
||||||
GetLeaderRequest(coord.clusterKey, UID()),
|
GetLeaderRequest(coord.clusterKey, UID()),
|
||||||
TaskPriority::CoordinationReply));
|
TaskPriority::CoordinationReply));
|
||||||
}
|
}
|
||||||
Optional<vector<Optional<LeaderInfo>>> results =
|
Optional<std::vector<Optional<LeaderInfo>>> results =
|
||||||
wait(timeout(getAll(leaderServers), CLIENT_KNOBS->IS_ACCEPTABLE_DELAY));
|
wait(timeout(getAll(leaderServers), CLIENT_KNOBS->IS_ACCEPTABLE_DELAY));
|
||||||
if (!results.present()) {
|
if (!results.present()) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -1379,11 +1051,11 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
return true; // The status quo seems fine
|
return true; // The status quo seems fine
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<vector<NetworkAddress>> getDesired(Reference<AutoQuorumChange> self,
|
ACTOR static Future<std::vector<NetworkAddress>> getDesired(Reference<AutoQuorumChange> self,
|
||||||
Transaction* tr,
|
Transaction* tr,
|
||||||
vector<NetworkAddress> oldCoordinators,
|
std::vector<NetworkAddress> oldCoordinators,
|
||||||
Reference<ClusterConnectionFile> ccf,
|
Reference<ClusterConnectionFile> ccf,
|
||||||
CoordinatorsResult* err) {
|
CoordinatorsResult* err) {
|
||||||
state int desiredCount = self->desired;
|
state int desiredCount = self->desired;
|
||||||
|
|
||||||
if (desiredCount == -1) {
|
if (desiredCount == -1) {
|
||||||
|
@ -1394,8 +1066,8 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
std::vector<AddressExclusion> excl = wait(getExcludedServers(tr));
|
std::vector<AddressExclusion> excl = wait(getExcludedServers(tr));
|
||||||
state std::set<AddressExclusion> excluded(excl.begin(), excl.end());
|
state std::set<AddressExclusion> excluded(excl.begin(), excl.end());
|
||||||
|
|
||||||
vector<ProcessData> _workers = wait(getWorkers(tr));
|
std::vector<ProcessData> _workers = wait(getWorkers(tr));
|
||||||
state vector<ProcessData> workers = _workers;
|
state std::vector<ProcessData> workers = _workers;
|
||||||
|
|
||||||
std::map<NetworkAddress, LocalityData> addr_locality;
|
std::map<NetworkAddress, LocalityData> addr_locality;
|
||||||
for (auto w : workers)
|
for (auto w : workers)
|
||||||
|
@ -1431,7 +1103,7 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
.detail("DesiredCoordinators", desiredCount)
|
.detail("DesiredCoordinators", desiredCount)
|
||||||
.detail("CurrentCoordinators", oldCoordinators.size());
|
.detail("CurrentCoordinators", oldCoordinators.size());
|
||||||
*err = CoordinatorsResult::NOT_ENOUGH_MACHINES;
|
*err = CoordinatorsResult::NOT_ENOUGH_MACHINES;
|
||||||
return vector<NetworkAddress>();
|
return std::vector<NetworkAddress>();
|
||||||
}
|
}
|
||||||
chosen.resize((chosen.size() - 1) | 1);
|
chosen.resize((chosen.size() - 1) | 1);
|
||||||
}
|
}
|
||||||
|
@ -1443,11 +1115,11 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
// (1) the number of workers at each locality type (e.g., dcid) <= desiredCount; and
|
// (1) the number of workers at each locality type (e.g., dcid) <= desiredCount; and
|
||||||
// (2) prefer workers at a locality where less workers has been chosen than other localities: evenly distribute
|
// (2) prefer workers at a locality where less workers has been chosen than other localities: evenly distribute
|
||||||
// workers.
|
// workers.
|
||||||
void addDesiredWorkers(vector<NetworkAddress>& chosen,
|
void addDesiredWorkers(std::vector<NetworkAddress>& chosen,
|
||||||
const vector<ProcessData>& workers,
|
const std::vector<ProcessData>& workers,
|
||||||
int desiredCount,
|
int desiredCount,
|
||||||
const std::set<AddressExclusion>& excluded) {
|
const std::set<AddressExclusion>& excluded) {
|
||||||
vector<ProcessData> remainingWorkers(workers);
|
std::vector<ProcessData> remainingWorkers(workers);
|
||||||
deterministicRandom()->randomShuffle(remainingWorkers);
|
deterministicRandom()->randomShuffle(remainingWorkers);
|
||||||
|
|
||||||
std::partition(remainingWorkers.begin(), remainingWorkers.end(), [](const ProcessData& data) {
|
std::partition(remainingWorkers.begin(), remainingWorkers.end(), [](const ProcessData& data) {
|
||||||
|
@ -1470,10 +1142,10 @@ struct AutoQuorumChange final : IQuorumChange {
|
||||||
std::map<StringRef, std::map<StringRef, int>> currentCounts;
|
std::map<StringRef, std::map<StringRef, int>> currentCounts;
|
||||||
std::map<StringRef, int> hardLimits;
|
std::map<StringRef, int> hardLimits;
|
||||||
|
|
||||||
vector<StringRef> fields({ LiteralStringRef("dcid"),
|
std::vector<StringRef> fields({ LiteralStringRef("dcid"),
|
||||||
LiteralStringRef("data_hall"),
|
LiteralStringRef("data_hall"),
|
||||||
LiteralStringRef("zoneid"),
|
LiteralStringRef("zoneid"),
|
||||||
LiteralStringRef("machineid") });
|
LiteralStringRef("machineid") });
|
||||||
|
|
||||||
for (auto field = fields.begin(); field != fields.end(); field++) {
|
for (auto field = fields.begin(); field != fields.end(); field++) {
|
||||||
if (field->toString() == "zoneid") {
|
if (field->toString() == "zoneid") {
|
||||||
|
@ -1537,7 +1209,7 @@ Reference<IQuorumChange> autoQuorumChange(int desired) {
|
||||||
return Reference<IQuorumChange>(new AutoQuorumChange(desired));
|
return Reference<IQuorumChange>(new AutoQuorumChange(desired));
|
||||||
}
|
}
|
||||||
|
|
||||||
void excludeServers(Transaction& tr, vector<AddressExclusion>& servers, bool failed) {
|
void excludeServers(Transaction& tr, std::vector<AddressExclusion>& servers, bool failed) {
|
||||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
@ -1556,7 +1228,7 @@ void excludeServers(Transaction& tr, vector<AddressExclusion>& servers, bool fai
|
||||||
TraceEvent("ExcludeServersCommit").detail("Servers", describe(servers)).detail("ExcludeFailed", failed);
|
TraceEvent("ExcludeServersCommit").detail("Servers", describe(servers)).detail("ExcludeFailed", failed);
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> excludeServers(Database cx, vector<AddressExclusion> servers, bool failed) {
|
ACTOR Future<Void> excludeServers(Database cx, std::vector<AddressExclusion> servers, bool failed) {
|
||||||
if (cx->apiVersionAtLeast(700)) {
|
if (cx->apiVersionAtLeast(700)) {
|
||||||
state ReadYourWritesTransaction ryw(cx);
|
state ReadYourWritesTransaction ryw(cx);
|
||||||
loop {
|
loop {
|
||||||
|
@ -1659,7 +1331,7 @@ ACTOR Future<Void> excludeLocalities(Database cx, std::unordered_set<std::string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> includeServers(Database cx, vector<AddressExclusion> servers, bool failed) {
|
ACTOR Future<Void> includeServers(Database cx, std::vector<AddressExclusion> servers, bool failed) {
|
||||||
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||||
if (cx->apiVersionAtLeast(700)) {
|
if (cx->apiVersionAtLeast(700)) {
|
||||||
state ReadYourWritesTransaction ryw(cx);
|
state ReadYourWritesTransaction ryw(cx);
|
||||||
|
@ -1762,7 +1434,7 @@ ACTOR Future<Void> includeServers(Database cx, vector<AddressExclusion> servers,
|
||||||
|
|
||||||
// Remove the given localities from the exclusion list.
|
// Remove the given localities from the exclusion list.
|
||||||
// include localities by clearing the keys.
|
// include localities by clearing the keys.
|
||||||
ACTOR Future<Void> includeLocalities(Database cx, vector<std::string> localities, bool failed, bool includeAll) {
|
ACTOR Future<Void> includeLocalities(Database cx, std::vector<std::string> localities, bool failed, bool includeAll) {
|
||||||
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||||
if (cx->apiVersionAtLeast(700)) {
|
if (cx->apiVersionAtLeast(700)) {
|
||||||
state ReadYourWritesTransaction ryw(cx);
|
state ReadYourWritesTransaction ryw(cx);
|
||||||
|
@ -1856,7 +1528,7 @@ ACTOR Future<Void> setClass(Database cx, AddressExclusion server, ProcessClass p
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||||
|
|
||||||
vector<ProcessData> workers = wait(getWorkers(&tr));
|
std::vector<ProcessData> workers = wait(getWorkers(&tr));
|
||||||
|
|
||||||
bool foundChange = false;
|
bool foundChange = false;
|
||||||
for (int i = 0; i < workers.size(); i++) {
|
for (int i = 0; i < workers.size(); i++) {
|
||||||
|
@ -1881,13 +1553,13 @@ ACTOR Future<Void> setClass(Database cx, AddressExclusion server, ProcessClass p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Transaction* tr) {
|
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Transaction* tr) {
|
||||||
state RangeResult r = wait(tr->getRange(excludedServersKeys, CLIENT_KNOBS->TOO_MANY));
|
state RangeResult r = wait(tr->getRange(excludedServersKeys, CLIENT_KNOBS->TOO_MANY));
|
||||||
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
state RangeResult r2 = wait(tr->getRange(failedServersKeys, CLIENT_KNOBS->TOO_MANY));
|
state RangeResult r2 = wait(tr->getRange(failedServersKeys, CLIENT_KNOBS->TOO_MANY));
|
||||||
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
vector<AddressExclusion> exclusions;
|
std::vector<AddressExclusion> exclusions;
|
||||||
for (auto i = r.begin(); i != r.end(); ++i) {
|
for (auto i = r.begin(); i != r.end(); ++i) {
|
||||||
auto a = decodeExcludedServersKey(i->key);
|
auto a = decodeExcludedServersKey(i->key);
|
||||||
if (a.isValid())
|
if (a.isValid())
|
||||||
|
@ -1902,14 +1574,14 @@ ACTOR Future<vector<AddressExclusion>> getExcludedServers(Transaction* tr) {
|
||||||
return exclusions;
|
return exclusions;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Database cx) {
|
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Database cx) {
|
||||||
state Transaction tr(cx);
|
state Transaction tr(cx);
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); // necessary?
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); // necessary?
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
vector<AddressExclusion> exclusions = wait(getExcludedServers(&tr));
|
std::vector<AddressExclusion> exclusions = wait(getExcludedServers(&tr));
|
||||||
return exclusions;
|
return exclusions;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
wait(tr.onError(e));
|
wait(tr.onError(e));
|
||||||
|
@ -1918,13 +1590,13 @@ ACTOR Future<vector<AddressExclusion>> getExcludedServers(Database cx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the current list of excluded localities by reading the keys.
|
// Get the current list of excluded localities by reading the keys.
|
||||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Transaction* tr) {
|
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Transaction* tr) {
|
||||||
state RangeResult r = wait(tr->getRange(excludedLocalityKeys, CLIENT_KNOBS->TOO_MANY));
|
state RangeResult r = wait(tr->getRange(excludedLocalityKeys, CLIENT_KNOBS->TOO_MANY));
|
||||||
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
state RangeResult r2 = wait(tr->getRange(failedLocalityKeys, CLIENT_KNOBS->TOO_MANY));
|
state RangeResult r2 = wait(tr->getRange(failedLocalityKeys, CLIENT_KNOBS->TOO_MANY));
|
||||||
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
vector<std::string> excludedLocalities;
|
std::vector<std::string> excludedLocalities;
|
||||||
for (const auto& i : r) {
|
for (const auto& i : r) {
|
||||||
auto a = decodeExcludedLocalityKey(i.key);
|
auto a = decodeExcludedLocalityKey(i.key);
|
||||||
excludedLocalities.push_back(a);
|
excludedLocalities.push_back(a);
|
||||||
|
@ -1938,14 +1610,14 @@ ACTOR Future<vector<std::string>> getExcludedLocalities(Transaction* tr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the list of excluded localities by reading the keys.
|
// Get the list of excluded localities by reading the keys.
|
||||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Database cx) {
|
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Database cx) {
|
||||||
state Transaction tr(cx);
|
state Transaction tr(cx);
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
vector<std::string> exclusions = wait(getExcludedLocalities(&tr));
|
std::vector<std::string> exclusions = wait(getExcludedLocalities(&tr));
|
||||||
return exclusions;
|
return exclusions;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
wait(tr.onError(e));
|
wait(tr.onError(e));
|
||||||
|
@ -2175,7 +1847,7 @@ ACTOR Future<bool> checkForExcludingServersTxActor(ReadYourWritesTransaction* tr
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Database cx,
|
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Database cx,
|
||||||
vector<AddressExclusion> excl,
|
std::vector<AddressExclusion> excl,
|
||||||
bool waitForAllExcluded) {
|
bool waitForAllExcluded) {
|
||||||
state std::set<AddressExclusion> exclusions(excl.begin(), excl.end());
|
state std::set<AddressExclusion> exclusions(excl.begin(), excl.end());
|
||||||
state std::set<NetworkAddress> inProgressExclusion;
|
state std::set<NetworkAddress> inProgressExclusion;
|
||||||
|
@ -2459,68 +2131,6 @@ ACTOR Future<Void> waitForPrimaryDC(Database cx, StringRef dcId) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> changeCachedRange(Database cx, KeyRangeRef range, bool add) {
|
|
||||||
state ReadYourWritesTransaction tr(cx);
|
|
||||||
state KeyRange sysRange = KeyRangeRef(storageCacheKey(range.begin), storageCacheKey(range.end));
|
|
||||||
state KeyRange sysRangeClear = KeyRangeRef(storageCacheKey(range.begin), keyAfter(storageCacheKey(range.end)));
|
|
||||||
state KeyRange privateRange = KeyRangeRef(cacheKeysKey(0, range.begin), cacheKeysKey(0, range.end));
|
|
||||||
state Value trueValue = storageCacheValue(std::vector<uint16_t>{ 0 });
|
|
||||||
state Value falseValue = storageCacheValue(std::vector<uint16_t>{});
|
|
||||||
loop {
|
|
||||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
||||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
||||||
try {
|
|
||||||
tr.clear(sysRangeClear);
|
|
||||||
tr.clear(privateRange);
|
|
||||||
tr.addReadConflictRange(privateRange);
|
|
||||||
RangeResult previous =
|
|
||||||
wait(tr.getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, Snapshot::True));
|
|
||||||
bool prevIsCached = false;
|
|
||||||
if (!previous.empty()) {
|
|
||||||
std::vector<uint16_t> prevVal;
|
|
||||||
decodeStorageCacheValue(previous[0].value, prevVal);
|
|
||||||
prevIsCached = !prevVal.empty();
|
|
||||||
}
|
|
||||||
if (prevIsCached && !add) {
|
|
||||||
// we need to uncache from here
|
|
||||||
tr.set(sysRange.begin, falseValue);
|
|
||||||
tr.set(privateRange.begin, serverKeysFalse);
|
|
||||||
} else if (!prevIsCached && add) {
|
|
||||||
// we need to cache, starting from here
|
|
||||||
tr.set(sysRange.begin, trueValue);
|
|
||||||
tr.set(privateRange.begin, serverKeysTrue);
|
|
||||||
}
|
|
||||||
RangeResult after = wait(tr.getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, Snapshot::False));
|
|
||||||
bool afterIsCached = false;
|
|
||||||
if (!after.empty()) {
|
|
||||||
std::vector<uint16_t> afterVal;
|
|
||||||
decodeStorageCacheValue(after[0].value, afterVal);
|
|
||||||
afterIsCached = afterVal.empty();
|
|
||||||
}
|
|
||||||
if (afterIsCached && !add) {
|
|
||||||
tr.set(sysRange.end, trueValue);
|
|
||||||
tr.set(privateRange.end, serverKeysTrue);
|
|
||||||
} else if (!afterIsCached && add) {
|
|
||||||
tr.set(sysRange.end, falseValue);
|
|
||||||
tr.set(privateRange.end, serverKeysFalse);
|
|
||||||
}
|
|
||||||
wait(tr.commit());
|
|
||||||
return Void();
|
|
||||||
} catch (Error& e) {
|
|
||||||
state Error err = e;
|
|
||||||
wait(tr.onError(err));
|
|
||||||
TraceEvent(SevDebug, "ChangeCachedRangeError").error(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<Void> addCachedRange(const Database& cx, KeyRangeRef range) {
|
|
||||||
return changeCachedRange(cx, range, true);
|
|
||||||
}
|
|
||||||
Future<Void> removeCachedRange(const Database& cx, KeyRangeRef range) {
|
|
||||||
return changeCachedRange(cx, range, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
json_spirit::Value_type normJSONType(json_spirit::Value_type type) {
|
json_spirit::Value_type normJSONType(json_spirit::Value_type type) {
|
||||||
if (type == json_spirit::int_type)
|
if (type == json_spirit::int_type)
|
||||||
return json_spirit::real_type;
|
return json_spirit::real_type;
|
||||||
|
@ -2688,6 +2298,40 @@ bool schemaMatch(json_spirit::mValue const& schemaValue,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string ManagementAPI::generateErrorMessage(const CoordinatorsResult& res) {
|
||||||
|
// Note: the error message here should not be changed if possible
|
||||||
|
// If you do change the message here,
|
||||||
|
// please update the corresponding fdbcli code to support both the old and the new message
|
||||||
|
|
||||||
|
std::string msg;
|
||||||
|
switch (res) {
|
||||||
|
case CoordinatorsResult::INVALID_NETWORK_ADDRESSES:
|
||||||
|
msg = "The specified network addresses are invalid";
|
||||||
|
break;
|
||||||
|
case CoordinatorsResult::SAME_NETWORK_ADDRESSES:
|
||||||
|
msg = "No change (existing configuration satisfies request)";
|
||||||
|
break;
|
||||||
|
case CoordinatorsResult::NOT_COORDINATORS:
|
||||||
|
msg = "Coordination servers are not running on the specified network addresses";
|
||||||
|
break;
|
||||||
|
case CoordinatorsResult::DATABASE_UNREACHABLE:
|
||||||
|
msg = "Database unreachable";
|
||||||
|
break;
|
||||||
|
case CoordinatorsResult::BAD_DATABASE_STATE:
|
||||||
|
msg = "The database is in an unexpected state from which changing coordinators might be unsafe";
|
||||||
|
break;
|
||||||
|
case CoordinatorsResult::COORDINATOR_UNREACHABLE:
|
||||||
|
msg = "One of the specified coordinators is unreachable";
|
||||||
|
break;
|
||||||
|
case CoordinatorsResult::NOT_ENOUGH_MACHINES:
|
||||||
|
msg = "Too few fdbserver machines to provide coordination at the current redundancy level";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return msg;
|
||||||
|
}
|
||||||
|
|
||||||
TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
|
TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
|
||||||
wait(Future<Void>(Void()));
|
wait(Future<Void>(Void()));
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,7 @@ enum class ConfigurationResult {
|
||||||
UNKNOWN_OPTION,
|
UNKNOWN_OPTION,
|
||||||
INCOMPLETE_CONFIGURATION,
|
INCOMPLETE_CONFIGURATION,
|
||||||
INVALID_CONFIGURATION,
|
INVALID_CONFIGURATION,
|
||||||
|
STORAGE_MIGRATION_DISABLED,
|
||||||
DATABASE_ALREADY_CREATED,
|
DATABASE_ALREADY_CREATED,
|
||||||
DATABASE_CREATED,
|
DATABASE_CREATED,
|
||||||
DATABASE_UNAVAILABLE,
|
DATABASE_UNAVAILABLE,
|
||||||
|
@ -60,6 +61,7 @@ enum class ConfigurationResult {
|
||||||
REGION_REPLICATION_MISMATCH,
|
REGION_REPLICATION_MISMATCH,
|
||||||
DCID_MISSING,
|
DCID_MISSING,
|
||||||
LOCKED_NOT_NEW,
|
LOCKED_NOT_NEW,
|
||||||
|
SUCCESS_WARN_PPW_GRADUAL,
|
||||||
SUCCESS,
|
SUCCESS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -119,31 +121,17 @@ ConfigurationResult buildConfiguration(
|
||||||
|
|
||||||
bool isCompleteConfiguration(std::map<std::string, std::string> const& options);
|
bool isCompleteConfiguration(std::map<std::string, std::string> const& options);
|
||||||
|
|
||||||
// All versions of changeConfig apply the given set of configuration tokens to the database, and return a
|
|
||||||
// ConfigurationResult (or error).
|
|
||||||
Future<ConfigurationResult> changeConfig(Database const& cx,
|
|
||||||
std::string const& configMode,
|
|
||||||
bool force); // Accepts tokens separated by spaces in a single string
|
|
||||||
|
|
||||||
ConfigureAutoResult parseConfig(StatusObject const& status);
|
ConfigureAutoResult parseConfig(StatusObject const& status);
|
||||||
Future<ConfigurationResult> changeConfig(Database const& cx,
|
|
||||||
std::vector<StringRef> const& modes,
|
|
||||||
Optional<ConfigureAutoResult> const& conf,
|
|
||||||
bool force); // Accepts a vector of configuration tokens
|
|
||||||
ACTOR Future<ConfigurationResult> changeConfig(
|
|
||||||
Database cx,
|
|
||||||
std::map<std::string, std::string> m,
|
|
||||||
bool force); // Accepts a full configuration in key/value format (from buildConfiguration)
|
|
||||||
|
|
||||||
ACTOR Future<DatabaseConfiguration> getDatabaseConfiguration(Database cx);
|
ACTOR Future<DatabaseConfiguration> getDatabaseConfiguration(Database cx);
|
||||||
ACTOR Future<Void> waitForFullReplication(Database cx);
|
ACTOR Future<Void> waitForFullReplication(Database cx);
|
||||||
|
|
||||||
struct IQuorumChange : ReferenceCounted<IQuorumChange> {
|
struct IQuorumChange : ReferenceCounted<IQuorumChange> {
|
||||||
virtual ~IQuorumChange() {}
|
virtual ~IQuorumChange() {}
|
||||||
virtual Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
virtual Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||||
vector<NetworkAddress> oldCoordinators,
|
std::vector<NetworkAddress> oldCoordinators,
|
||||||
Reference<ClusterConnectionFile>,
|
Reference<ClusterConnectionFile>,
|
||||||
CoordinatorsResult&) = 0;
|
CoordinatorsResult&) = 0;
|
||||||
virtual std::string getDesiredClusterKeyName() const { return std::string(); }
|
virtual std::string getDesiredClusterKeyName() const { return std::string(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -154,14 +142,14 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
||||||
ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChange> change);
|
ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChange> change);
|
||||||
Reference<IQuorumChange> autoQuorumChange(int desired = -1);
|
Reference<IQuorumChange> autoQuorumChange(int desired = -1);
|
||||||
Reference<IQuorumChange> noQuorumChange();
|
Reference<IQuorumChange> noQuorumChange();
|
||||||
Reference<IQuorumChange> specifiedQuorumChange(vector<NetworkAddress> const&);
|
Reference<IQuorumChange> specifiedQuorumChange(std::vector<NetworkAddress> const&);
|
||||||
Reference<IQuorumChange> nameQuorumChange(std::string const& name, Reference<IQuorumChange> const& other);
|
Reference<IQuorumChange> nameQuorumChange(std::string const& name, Reference<IQuorumChange> const& other);
|
||||||
|
|
||||||
// Exclude the given set of servers from use as state servers. Returns as soon as the change is durable, without
|
// Exclude the given set of servers from use as state servers. Returns as soon as the change is durable, without
|
||||||
// necessarily waiting for the servers to be evacuated. A NetworkAddress with a port of 0 means all servers on the
|
// necessarily waiting for the servers to be evacuated. A NetworkAddress with a port of 0 means all servers on the
|
||||||
// given IP.
|
// given IP.
|
||||||
ACTOR Future<Void> excludeServers(Database cx, vector<AddressExclusion> servers, bool failed = false);
|
ACTOR Future<Void> excludeServers(Database cx, std::vector<AddressExclusion> servers, bool failed = false);
|
||||||
void excludeServers(Transaction& tr, vector<AddressExclusion>& servers, bool failed = false);
|
void excludeServers(Transaction& tr, std::vector<AddressExclusion>& servers, bool failed = false);
|
||||||
|
|
||||||
// Exclude the servers matching the given set of localities from use as state servers. Returns as soon as the change
|
// Exclude the servers matching the given set of localities from use as state servers. Returns as soon as the change
|
||||||
// is durable, without necessarily waiting for the servers to be evacuated.
|
// is durable, without necessarily waiting for the servers to be evacuated.
|
||||||
|
@ -170,11 +158,11 @@ void excludeLocalities(Transaction& tr, std::unordered_set<std::string> localiti
|
||||||
|
|
||||||
// Remove the given servers from the exclusion list. A NetworkAddress with a port of 0 means all servers on the given
|
// Remove the given servers from the exclusion list. A NetworkAddress with a port of 0 means all servers on the given
|
||||||
// IP. A NetworkAddress() means all servers (don't exclude anything)
|
// IP. A NetworkAddress() means all servers (don't exclude anything)
|
||||||
ACTOR Future<Void> includeServers(Database cx, vector<AddressExclusion> servers, bool failed = false);
|
ACTOR Future<Void> includeServers(Database cx, std::vector<AddressExclusion> servers, bool failed = false);
|
||||||
|
|
||||||
// Remove the given localities from the exclusion list.
|
// Remove the given localities from the exclusion list.
|
||||||
ACTOR Future<Void> includeLocalities(Database cx,
|
ACTOR Future<Void> includeLocalities(Database cx,
|
||||||
vector<std::string> localities,
|
std::vector<std::string> localities,
|
||||||
bool failed = false,
|
bool failed = false,
|
||||||
bool includeAll = false);
|
bool includeAll = false);
|
||||||
|
|
||||||
|
@ -183,12 +171,12 @@ ACTOR Future<Void> includeLocalities(Database cx,
|
||||||
ACTOR Future<Void> setClass(Database cx, AddressExclusion server, ProcessClass processClass);
|
ACTOR Future<Void> setClass(Database cx, AddressExclusion server, ProcessClass processClass);
|
||||||
|
|
||||||
// Get the current list of excluded servers
|
// Get the current list of excluded servers
|
||||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Database cx);
|
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Database cx);
|
||||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Transaction* tr);
|
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Transaction* tr);
|
||||||
|
|
||||||
// Get the current list of excluded localities
|
// Get the current list of excluded localities
|
||||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Database cx);
|
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Database cx);
|
||||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Transaction* tr);
|
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Transaction* tr);
|
||||||
|
|
||||||
std::set<AddressExclusion> getAddressesByLocality(const std::vector<ProcessData>& workers, const std::string& locality);
|
std::set<AddressExclusion> getAddressesByLocality(const std::vector<ProcessData>& workers, const std::string& locality);
|
||||||
|
|
||||||
|
@ -196,15 +184,15 @@ std::set<AddressExclusion> getAddressesByLocality(const std::vector<ProcessData>
|
||||||
// true, this actor returns once it is safe to shut down all such machines without impacting fault tolerance, until and
|
// true, this actor returns once it is safe to shut down all such machines without impacting fault tolerance, until and
|
||||||
// unless any of them are explicitly included with includeServers()
|
// unless any of them are explicitly included with includeServers()
|
||||||
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Database cx,
|
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Database cx,
|
||||||
vector<AddressExclusion> servers,
|
std::vector<AddressExclusion> servers,
|
||||||
bool waitForAllExcluded);
|
bool waitForAllExcluded);
|
||||||
ACTOR Future<bool> checkForExcludingServersTxActor(ReadYourWritesTransaction* tr,
|
ACTOR Future<bool> checkForExcludingServersTxActor(ReadYourWritesTransaction* tr,
|
||||||
std::set<AddressExclusion>* exclusions,
|
std::set<AddressExclusion>* exclusions,
|
||||||
std::set<NetworkAddress>* inProgressExclusion);
|
std::set<NetworkAddress>* inProgressExclusion);
|
||||||
|
|
||||||
// Gets a list of all workers in the cluster (excluding testers)
|
// Gets a list of all workers in the cluster (excluding testers)
|
||||||
ACTOR Future<vector<ProcessData>> getWorkers(Database cx);
|
ACTOR Future<std::vector<ProcessData>> getWorkers(Database cx);
|
||||||
ACTOR Future<vector<ProcessData>> getWorkers(Transaction* tr);
|
ACTOR Future<std::vector<ProcessData>> getWorkers(Transaction* tr);
|
||||||
|
|
||||||
ACTOR Future<Void> timeKeeperSetDisable(Database cx);
|
ACTOR Future<Void> timeKeeperSetDisable(Database cx);
|
||||||
|
|
||||||
|
@ -248,8 +236,511 @@ bool schemaMatch(json_spirit::mValue const& schema,
|
||||||
// storage nodes
|
// storage nodes
|
||||||
ACTOR Future<Void> mgmtSnapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID);
|
ACTOR Future<Void> mgmtSnapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID);
|
||||||
|
|
||||||
Future<Void> addCachedRange(const Database& cx, KeyRangeRef range);
|
// Management API written in template code to support both IClientAPI and NativeAPI
|
||||||
Future<Void> removeCachedRange(const Database& cx, KeyRangeRef range);
|
namespace ManagementAPI {
|
||||||
|
|
||||||
|
ACTOR template <class DB>
|
||||||
|
Future<Void> changeCachedRange(Reference<DB> db, KeyRangeRef range, bool add) {
|
||||||
|
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||||
|
state KeyRange sysRange = KeyRangeRef(storageCacheKey(range.begin), storageCacheKey(range.end));
|
||||||
|
state KeyRange sysRangeClear = KeyRangeRef(storageCacheKey(range.begin), keyAfter(storageCacheKey(range.end)));
|
||||||
|
state KeyRange privateRange = KeyRangeRef(cacheKeysKey(0, range.begin), cacheKeysKey(0, range.end));
|
||||||
|
state Value trueValue = storageCacheValue(std::vector<uint16_t>{ 0 });
|
||||||
|
state Value falseValue = storageCacheValue(std::vector<uint16_t>{});
|
||||||
|
loop {
|
||||||
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
try {
|
||||||
|
tr->clear(sysRangeClear);
|
||||||
|
tr->clear(privateRange);
|
||||||
|
tr->addReadConflictRange(privateRange);
|
||||||
|
// hold the returned standalone object's memory
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> previousFuture =
|
||||||
|
tr->getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, Snapshot::False, Reverse::True);
|
||||||
|
RangeResult previous = wait(safeThreadFutureToFuture(previousFuture));
|
||||||
|
bool prevIsCached = false;
|
||||||
|
if (!previous.empty()) {
|
||||||
|
std::vector<uint16_t> prevVal;
|
||||||
|
decodeStorageCacheValue(previous[0].value, prevVal);
|
||||||
|
prevIsCached = !prevVal.empty();
|
||||||
|
}
|
||||||
|
if (prevIsCached && !add) {
|
||||||
|
// we need to uncache from here
|
||||||
|
tr->set(sysRange.begin, falseValue);
|
||||||
|
tr->set(privateRange.begin, serverKeysFalse);
|
||||||
|
} else if (!prevIsCached && add) {
|
||||||
|
// we need to cache, starting from here
|
||||||
|
tr->set(sysRange.begin, trueValue);
|
||||||
|
tr->set(privateRange.begin, serverKeysTrue);
|
||||||
|
}
|
||||||
|
// hold the returned standalone object's memory
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> afterFuture =
|
||||||
|
tr->getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, Snapshot::False, Reverse::False);
|
||||||
|
RangeResult after = wait(safeThreadFutureToFuture(afterFuture));
|
||||||
|
bool afterIsCached = false;
|
||||||
|
if (!after.empty()) {
|
||||||
|
std::vector<uint16_t> afterVal;
|
||||||
|
decodeStorageCacheValue(after[0].value, afterVal);
|
||||||
|
afterIsCached = afterVal.empty();
|
||||||
|
}
|
||||||
|
if (afterIsCached && !add) {
|
||||||
|
tr->set(sysRange.end, trueValue);
|
||||||
|
tr->set(privateRange.end, serverKeysTrue);
|
||||||
|
} else if (!afterIsCached && add) {
|
||||||
|
tr->set(sysRange.end, falseValue);
|
||||||
|
tr->set(privateRange.end, serverKeysFalse);
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
return Void();
|
||||||
|
} catch (Error& e) {
|
||||||
|
state Error err = e;
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
TraceEvent(SevDebug, "ChangeCachedRangeError").error(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class DB>
|
||||||
|
Future<Void> addCachedRange(Reference<DB> db, KeyRangeRef range) {
|
||||||
|
return changeCachedRange(db, range, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class DB>
|
||||||
|
Future<Void> removeCachedRange(Reference<DB> db, KeyRangeRef range) {
|
||||||
|
return changeCachedRange(db, range, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR template <class Tr>
|
||||||
|
Future<std::vector<ProcessData>> getWorkers(Reference<Tr> tr,
|
||||||
|
typename Tr::template FutureT<RangeResult> processClassesF,
|
||||||
|
typename Tr::template FutureT<RangeResult> processDataF) {
|
||||||
|
// processClassesF and processDataF are used to hold standalone memory
|
||||||
|
processClassesF = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
processDataF = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state Future<RangeResult> processClasses = safeThreadFutureToFuture(processClassesF);
|
||||||
|
state Future<RangeResult> processData = safeThreadFutureToFuture(processDataF);
|
||||||
|
|
||||||
|
wait(success(processClasses) && success(processData));
|
||||||
|
ASSERT(!processClasses.get().more && processClasses.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
ASSERT(!processData.get().more && processData.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
std::map<Optional<Standalone<StringRef>>, ProcessClass> id_class;
|
||||||
|
for (int i = 0; i < processClasses.get().size(); i++) {
|
||||||
|
id_class[decodeProcessClassKey(processClasses.get()[i].key)] =
|
||||||
|
decodeProcessClassValue(processClasses.get()[i].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<ProcessData> results;
|
||||||
|
|
||||||
|
for (int i = 0; i < processData.get().size(); i++) {
|
||||||
|
ProcessData data = decodeWorkerListValue(processData.get()[i].value);
|
||||||
|
ProcessClass processClass = id_class[data.locality.processId()];
|
||||||
|
|
||||||
|
if (processClass.classSource() == ProcessClass::DBSource ||
|
||||||
|
data.processClass.classType() == ProcessClass::UnsetClass)
|
||||||
|
data.processClass = processClass;
|
||||||
|
|
||||||
|
if (data.processClass.classType() != ProcessClass::TesterClass)
|
||||||
|
results.push_back(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
// All versions of changeConfig apply the given set of configuration tokens to the database, and return a
|
||||||
|
// ConfigurationResult (or error).
|
||||||
|
|
||||||
|
// Accepts a full configuration in key/value format (from buildConfiguration)
|
||||||
|
ACTOR template <class DB>
|
||||||
|
Future<ConfigurationResult> changeConfig(Reference<DB> db, std::map<std::string, std::string> m, bool force) {
|
||||||
|
state StringRef initIdKey = LiteralStringRef("\xff/init_id");
|
||||||
|
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||||
|
|
||||||
|
if (!m.size()) {
|
||||||
|
return ConfigurationResult::NO_OPTIONS_PROVIDED;
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure we have essential configuration options
|
||||||
|
std::string initKey = configKeysPrefix.toString() + "initialized";
|
||||||
|
state bool creating = m.count(initKey) != 0;
|
||||||
|
state Optional<UID> locked;
|
||||||
|
{
|
||||||
|
auto iter = m.find(databaseLockedKey.toString());
|
||||||
|
if (iter != m.end()) {
|
||||||
|
if (!creating) {
|
||||||
|
return ConfigurationResult::LOCKED_NOT_NEW;
|
||||||
|
}
|
||||||
|
locked = UID::fromString(iter->second);
|
||||||
|
m.erase(iter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (creating) {
|
||||||
|
m[initIdKey.toString()] = deterministicRandom()->randomUniqueID().toString();
|
||||||
|
if (!isCompleteConfiguration(m)) {
|
||||||
|
return ConfigurationResult::INCOMPLETE_CONFIGURATION;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state Future<Void> tooLong = delay(60);
|
||||||
|
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||||
|
state bool oldReplicationUsesDcId = false;
|
||||||
|
state bool warnPPWGradual = false;
|
||||||
|
state bool warnChangeStorageNoMigrate = false;
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
tr->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||||
|
|
||||||
|
if (!creating && !force) {
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> fConfigF =
|
||||||
|
tr->getRange(configKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state Future<RangeResult> fConfig = safeThreadFutureToFuture(fConfigF);
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> processClassesF;
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> processDataF;
|
||||||
|
state Future<std::vector<ProcessData>> fWorkers = getWorkers(tr, processClassesF, processDataF);
|
||||||
|
wait(success(fConfig) || tooLong);
|
||||||
|
|
||||||
|
if (!fConfig.isReady()) {
|
||||||
|
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fConfig.isReady()) {
|
||||||
|
ASSERT(fConfig.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state DatabaseConfiguration oldConfig;
|
||||||
|
oldConfig.fromKeyValues((VectorRef<KeyValueRef>)fConfig.get());
|
||||||
|
state DatabaseConfiguration newConfig = oldConfig;
|
||||||
|
for (auto kv : m) {
|
||||||
|
newConfig.set(kv.first, kv.second);
|
||||||
|
}
|
||||||
|
if (!newConfig.isValid()) {
|
||||||
|
return ConfigurationResult::INVALID_CONFIGURATION;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newConfig.tLogPolicy->attributeKeys().count("dcid") && newConfig.regions.size() > 0) {
|
||||||
|
return ConfigurationResult::REGION_REPLICATION_MISMATCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
oldReplicationUsesDcId =
|
||||||
|
oldReplicationUsesDcId || oldConfig.tLogPolicy->attributeKeys().count("dcid");
|
||||||
|
|
||||||
|
if (oldConfig.usableRegions != newConfig.usableRegions) {
|
||||||
|
// cannot change region configuration
|
||||||
|
std::map<Key, int32_t> dcId_priority;
|
||||||
|
for (auto& it : newConfig.regions) {
|
||||||
|
dcId_priority[it.dcId] = it.priority;
|
||||||
|
}
|
||||||
|
for (auto& it : oldConfig.regions) {
|
||||||
|
if (!dcId_priority.count(it.dcId) || dcId_priority[it.dcId] != it.priority) {
|
||||||
|
return ConfigurationResult::REGIONS_CHANGED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// must only have one region with priority >= 0
|
||||||
|
int activeRegionCount = 0;
|
||||||
|
for (auto& it : newConfig.regions) {
|
||||||
|
if (it.priority >= 0) {
|
||||||
|
activeRegionCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (activeRegionCount > 1) {
|
||||||
|
return ConfigurationResult::MULTIPLE_ACTIVE_REGIONS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> fServerListF =
|
||||||
|
tr->getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state Future<RangeResult> fServerList =
|
||||||
|
(newConfig.regions.size()) ? safeThreadFutureToFuture(fServerListF) : Future<RangeResult>();
|
||||||
|
|
||||||
|
if (newConfig.usableRegions == 2) {
|
||||||
|
if (oldReplicationUsesDcId) {
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> fLocalityListF =
|
||||||
|
tr->getRange(tagLocalityListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
state Future<RangeResult> fLocalityList = safeThreadFutureToFuture(fLocalityListF);
|
||||||
|
wait(success(fLocalityList) || tooLong);
|
||||||
|
if (!fLocalityList.isReady()) {
|
||||||
|
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||||
|
}
|
||||||
|
RangeResult localityList = fLocalityList.get();
|
||||||
|
ASSERT(!localityList.more && localityList.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
std::set<Key> localityDcIds;
|
||||||
|
for (auto& s : localityList) {
|
||||||
|
auto dc = decodeTagLocalityListKey(s.key);
|
||||||
|
if (dc.present()) {
|
||||||
|
localityDcIds.insert(dc.get());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto& it : newConfig.regions) {
|
||||||
|
if (localityDcIds.count(it.dcId) == 0) {
|
||||||
|
return ConfigurationResult::DCID_MISSING;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// all regions with priority >= 0 must be fully replicated
|
||||||
|
state std::vector<typename DB::TransactionT::template FutureT<Optional<Value>>>
|
||||||
|
replicasFuturesF;
|
||||||
|
state std::vector<Future<Optional<Value>>> replicasFutures;
|
||||||
|
for (auto& it : newConfig.regions) {
|
||||||
|
if (it.priority >= 0) {
|
||||||
|
replicasFuturesF.push_back(tr->get(datacenterReplicasKeyFor(it.dcId)));
|
||||||
|
replicasFutures.push_back(safeThreadFutureToFuture(replicasFuturesF.back()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wait(waitForAll(replicasFutures) || tooLong);
|
||||||
|
|
||||||
|
for (auto& it : replicasFutures) {
|
||||||
|
if (!it.isReady()) {
|
||||||
|
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||||
|
}
|
||||||
|
if (!it.get().present()) {
|
||||||
|
return ConfigurationResult::REGION_NOT_FULLY_REPLICATED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newConfig.regions.size()) {
|
||||||
|
// all storage servers must be in one of the regions
|
||||||
|
wait(success(fServerList) || tooLong);
|
||||||
|
if (!fServerList.isReady()) {
|
||||||
|
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||||
|
}
|
||||||
|
RangeResult serverList = fServerList.get();
|
||||||
|
ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
|
||||||
|
std::set<Key> newDcIds;
|
||||||
|
for (auto& it : newConfig.regions) {
|
||||||
|
newDcIds.insert(it.dcId);
|
||||||
|
}
|
||||||
|
std::set<Optional<Key>> missingDcIds;
|
||||||
|
for (auto& s : serverList) {
|
||||||
|
auto ssi = decodeServerListValue(s.value);
|
||||||
|
if (!ssi.locality.dcId().present() || !newDcIds.count(ssi.locality.dcId().get())) {
|
||||||
|
missingDcIds.insert(ssi.locality.dcId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (missingDcIds.size() > (oldReplicationUsesDcId ? 1 : 0)) {
|
||||||
|
return ConfigurationResult::STORAGE_IN_UNKNOWN_DCID;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wait(success(fWorkers) || tooLong);
|
||||||
|
if (!fWorkers.isReady()) {
|
||||||
|
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newConfig.regions.size()) {
|
||||||
|
std::map<Optional<Key>, std::set<Optional<Key>>> dcId_zoneIds;
|
||||||
|
for (auto& it : fWorkers.get()) {
|
||||||
|
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
||||||
|
dcId_zoneIds[it.locality.dcId()].insert(it.locality.zoneId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto& region : newConfig.regions) {
|
||||||
|
if (dcId_zoneIds[region.dcId].size() <
|
||||||
|
std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||||
|
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||||
|
}
|
||||||
|
if (region.satelliteTLogReplicationFactor > 0 && region.priority >= 0) {
|
||||||
|
int totalSatelliteProcesses = 0;
|
||||||
|
for (auto& sat : region.satellites) {
|
||||||
|
totalSatelliteProcesses += dcId_zoneIds[sat.dcId].size();
|
||||||
|
}
|
||||||
|
if (totalSatelliteProcesses < region.satelliteTLogReplicationFactor) {
|
||||||
|
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std::set<Optional<Key>> zoneIds;
|
||||||
|
for (auto& it : fWorkers.get()) {
|
||||||
|
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
||||||
|
zoneIds.insert(it.locality.zoneId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (zoneIds.size() < std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||||
|
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newConfig.storageServerStoreType != oldConfig.storageServerStoreType &&
|
||||||
|
newConfig.storageMigrationType == StorageMigrationType::DISABLED) {
|
||||||
|
return ConfigurationResult::STORAGE_MIGRATION_DISABLED;
|
||||||
|
} else if (newConfig.storageMigrationType == StorageMigrationType::GRADUAL &&
|
||||||
|
newConfig.perpetualStorageWiggleSpeed == 0) {
|
||||||
|
warnPPWGradual = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (creating) {
|
||||||
|
tr->setOption(FDBTransactionOptions::INITIALIZE_NEW_DATABASE);
|
||||||
|
tr->addReadConflictRange(singleKeyRange(initIdKey));
|
||||||
|
} else if (m.size()) {
|
||||||
|
// might be used in an emergency transaction, so make sure it is retry-self-conflicting and
|
||||||
|
// CAUSAL_WRITE_RISKY
|
||||||
|
tr->setOption(FDBTransactionOptions::CAUSAL_WRITE_RISKY);
|
||||||
|
tr->addReadConflictRange(singleKeyRange(m.begin()->first));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (locked.present()) {
|
||||||
|
ASSERT(creating);
|
||||||
|
tr->atomicOp(databaseLockedKey,
|
||||||
|
BinaryWriter::toValue(locked.get(), Unversioned())
|
||||||
|
.withPrefix(LiteralStringRef("0123456789"))
|
||||||
|
.withSuffix(LiteralStringRef("\x00\x00\x00\x00")),
|
||||||
|
MutationRef::SetVersionstampedValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto i = m.begin(); i != m.end(); ++i) {
|
||||||
|
tr->set(StringRef(i->first), StringRef(i->second));
|
||||||
|
}
|
||||||
|
|
||||||
|
tr->addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
||||||
|
tr->set(moveKeysLockOwnerKey, versionKey);
|
||||||
|
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
break;
|
||||||
|
} catch (Error& e) {
|
||||||
|
state Error e1(e);
|
||||||
|
if ((e.code() == error_code_not_committed || e.code() == error_code_transaction_too_old) && creating) {
|
||||||
|
// The database now exists. Determine whether we created it or it was already existing/created by
|
||||||
|
// someone else. The latter is an error.
|
||||||
|
tr->reset();
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
tr->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||||
|
|
||||||
|
state typename DB::TransactionT::template FutureT<Optional<Value>> vF = tr->get(initIdKey);
|
||||||
|
Optional<Value> v = wait(safeThreadFutureToFuture(vF));
|
||||||
|
if (v != m[initIdKey.toString()])
|
||||||
|
return ConfigurationResult::DATABASE_ALREADY_CREATED;
|
||||||
|
else
|
||||||
|
return ConfigurationResult::DATABASE_CREATED;
|
||||||
|
} catch (Error& e2) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e2)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e1)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (warnPPWGradual) {
|
||||||
|
return ConfigurationResult::SUCCESS_WARN_PPW_GRADUAL;
|
||||||
|
} else {
|
||||||
|
return ConfigurationResult::SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR template <class DB>
|
||||||
|
Future<ConfigurationResult> autoConfig(Reference<DB> db, ConfigureAutoResult conf) {
|
||||||
|
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||||
|
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||||
|
|
||||||
|
if (!conf.address_class.size())
|
||||||
|
return ConfigurationResult::INCOMPLETE_CONFIGURATION; // FIXME: correct return type
|
||||||
|
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
tr->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||||
|
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> processClassesF;
|
||||||
|
state typename DB::TransactionT::template FutureT<RangeResult> processDataF;
|
||||||
|
std::vector<ProcessData> workers = wait(getWorkers(tr, processClassesF, processDataF));
|
||||||
|
std::map<NetworkAddress, Optional<Standalone<StringRef>>> address_processId;
|
||||||
|
for (auto& w : workers) {
|
||||||
|
address_processId[w.address] = w.locality.processId();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto& it : conf.address_class) {
|
||||||
|
if (it.second.classSource() == ProcessClass::CommandLineSource) {
|
||||||
|
tr->clear(processClassKeyFor(address_processId[it.first].get()));
|
||||||
|
} else {
|
||||||
|
tr->set(processClassKeyFor(address_processId[it.first].get()), processClassValue(it.second));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (conf.address_class.size())
|
||||||
|
tr->set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
|
||||||
|
|
||||||
|
if (conf.auto_logs != conf.old_logs)
|
||||||
|
tr->set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
|
||||||
|
|
||||||
|
if (conf.auto_commit_proxies != conf.old_commit_proxies)
|
||||||
|
tr->set(configKeysPrefix.toString() + "auto_commit_proxies", format("%d", conf.auto_commit_proxies));
|
||||||
|
|
||||||
|
if (conf.auto_grv_proxies != conf.old_grv_proxies)
|
||||||
|
tr->set(configKeysPrefix.toString() + "auto_grv_proxies", format("%d", conf.auto_grv_proxies));
|
||||||
|
|
||||||
|
if (conf.auto_resolvers != conf.old_resolvers)
|
||||||
|
tr->set(configKeysPrefix.toString() + "auto_resolvers", format("%d", conf.auto_resolvers));
|
||||||
|
|
||||||
|
if (conf.auto_replication != conf.old_replication) {
|
||||||
|
std::vector<StringRef> modes;
|
||||||
|
modes.push_back(conf.auto_replication);
|
||||||
|
std::map<std::string, std::string> m;
|
||||||
|
auto r = buildConfiguration(modes, m);
|
||||||
|
if (r != ConfigurationResult::SUCCESS)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
for (auto& kv : m)
|
||||||
|
tr->set(kv.first, kv.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
tr->addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
||||||
|
tr->set(moveKeysLockOwnerKey, versionKey);
|
||||||
|
|
||||||
|
wait(safeThreadFutureToFuture(tr->commit()));
|
||||||
|
return ConfigurationResult::SUCCESS;
|
||||||
|
} catch (Error& e) {
|
||||||
|
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accepts tokens separated by spaces in a single string
|
||||||
|
template <class DB>
|
||||||
|
Future<ConfigurationResult> changeConfig(Reference<DB> db, std::string const& modes, bool force) {
|
||||||
|
TraceEvent("ChangeConfig").detail("Mode", modes);
|
||||||
|
std::map<std::string, std::string> m;
|
||||||
|
auto r = buildConfiguration(modes, m);
|
||||||
|
if (r != ConfigurationResult::SUCCESS)
|
||||||
|
return r;
|
||||||
|
return changeConfig(db, m, force);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accepts a vector of configuration tokens
|
||||||
|
template <class DB>
|
||||||
|
Future<ConfigurationResult> changeConfig(Reference<DB> db,
|
||||||
|
std::vector<StringRef> const& modes,
|
||||||
|
Optional<ConfigureAutoResult> const& conf,
|
||||||
|
bool force) {
|
||||||
|
if (modes.size() && modes[0] == LiteralStringRef("auto") && conf.present()) {
|
||||||
|
return autoConfig(db, conf.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::map<std::string, std::string> m;
|
||||||
|
auto r = buildConfiguration(modes, m);
|
||||||
|
if (r != ConfigurationResult::SUCCESS)
|
||||||
|
return r;
|
||||||
|
return changeConfig(db, m, force);
|
||||||
|
}
|
||||||
|
|
||||||
|
// return the corresponding error message for the CoordinatorsResult
|
||||||
|
// used by special keys and fdbcli
|
||||||
|
std::string generateErrorMessage(const CoordinatorsResult& res);
|
||||||
|
|
||||||
|
} // namespace ManagementAPI
|
||||||
|
|
||||||
#include "flow/unactorcompiler.h"
|
#include "flow/unactorcompiler.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -328,7 +328,7 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/fuzz") {
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
ClusterConnectionString::ClusterConnectionString(vector<NetworkAddress> servers, Key key) : coord(servers) {
|
ClusterConnectionString::ClusterConnectionString(std::vector<NetworkAddress> servers, Key key) : coord(servers) {
|
||||||
parseKey(key.toString());
|
parseKey(key.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,9 +383,9 @@ ClientCoordinators::ClientCoordinators(Key clusterKey, std::vector<NetworkAddres
|
||||||
}
|
}
|
||||||
|
|
||||||
ClientLeaderRegInterface::ClientLeaderRegInterface(NetworkAddress remote)
|
ClientLeaderRegInterface::ClientLeaderRegInterface(NetworkAddress remote)
|
||||||
: getLeader(Endpoint({ remote }, WLTOKEN_CLIENTLEADERREG_GETLEADER)),
|
: getLeader(Endpoint::wellKnown({ remote }, WLTOKEN_CLIENTLEADERREG_GETLEADER)),
|
||||||
openDatabase(Endpoint({ remote }, WLTOKEN_CLIENTLEADERREG_OPENDATABASE)),
|
openDatabase(Endpoint::wellKnown({ remote }, WLTOKEN_CLIENTLEADERREG_OPENDATABASE)),
|
||||||
checkDescriptorMutable(Endpoint({ remote }, WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE)) {}
|
checkDescriptorMutable(Endpoint::wellKnown({ remote }, WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE)) {}
|
||||||
|
|
||||||
ClientLeaderRegInterface::ClientLeaderRegInterface(INetwork* local) {
|
ClientLeaderRegInterface::ClientLeaderRegInterface(INetwork* local) {
|
||||||
getLeader.makeWellKnownEndpoint(WLTOKEN_CLIENTLEADERREG_GETLEADER, TaskPriority::Coordination);
|
getLeader.makeWellKnownEndpoint(WLTOKEN_CLIENTLEADERREG_GETLEADER, TaskPriority::Coordination);
|
||||||
|
@ -394,9 +394,8 @@ ClientLeaderRegInterface::ClientLeaderRegInterface(INetwork* local) {
|
||||||
TaskPriority::Coordination);
|
TaskPriority::Coordination);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nominee is the worker among all workers that are considered as leader by a coordinator
|
// Nominee is the worker among all workers that are considered as leader by one coordinator
|
||||||
// This function contacts a coordinator coord to ask if the worker is considered as a leader (i.e., if the worker
|
// This function contacts a coordinator coord to ask who is its nominee.
|
||||||
// is a nominee)
|
|
||||||
ACTOR Future<Void> monitorNominee(Key key,
|
ACTOR Future<Void> monitorNominee(Key key,
|
||||||
ClientLeaderRegInterface coord,
|
ClientLeaderRegInterface coord,
|
||||||
AsyncTrigger* nomineeChange,
|
AsyncTrigger* nomineeChange,
|
||||||
|
@ -428,13 +427,13 @@ ACTOR Future<Void> monitorNominee(Key key,
|
||||||
// Also used in fdbserver/LeaderElection.actor.cpp!
|
// Also used in fdbserver/LeaderElection.actor.cpp!
|
||||||
// bool represents if the LeaderInfo is a majority answer or not.
|
// bool represents if the LeaderInfo is a majority answer or not.
|
||||||
// This function also masks the first 7 bits of changeId of the nominees and returns the Leader with masked changeId
|
// This function also masks the first 7 bits of changeId of the nominees and returns the Leader with masked changeId
|
||||||
Optional<std::pair<LeaderInfo, bool>> getLeader(const vector<Optional<LeaderInfo>>& nominees) {
|
Optional<std::pair<LeaderInfo, bool>> getLeader(const std::vector<Optional<LeaderInfo>>& nominees) {
|
||||||
// If any coordinator says that the quorum is forwarded, then it is
|
// If any coordinator says that the quorum is forwarded, then it is
|
||||||
for (int i = 0; i < nominees.size(); i++)
|
for (int i = 0; i < nominees.size(); i++)
|
||||||
if (nominees[i].present() && nominees[i].get().forward)
|
if (nominees[i].present() && nominees[i].get().forward)
|
||||||
return std::pair<LeaderInfo, bool>(nominees[i].get(), true);
|
return std::pair<LeaderInfo, bool>(nominees[i].get(), true);
|
||||||
|
|
||||||
vector<std::pair<UID, int>> maskedNominees;
|
std::vector<std::pair<UID, int>> maskedNominees;
|
||||||
maskedNominees.reserve(nominees.size());
|
maskedNominees.reserve(nominees.size());
|
||||||
for (int i = 0; i < nominees.size(); i++) {
|
for (int i = 0; i < nominees.size(); i++) {
|
||||||
if (nominees[i].present()) {
|
if (nominees[i].present()) {
|
||||||
|
@ -529,18 +528,6 @@ ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration(Reference<ClusterConn
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<Void> monitorLeaderRemotelyInternal(Reference<ClusterConnectionFile> const& connFile,
|
|
||||||
Reference<AsyncVar<Value>> const& outSerializedLeaderInfo);
|
|
||||||
|
|
||||||
template <class LeaderInterface>
|
|
||||||
Future<Void> monitorLeaderRemotely(Reference<ClusterConnectionFile> const& connFile,
|
|
||||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader) {
|
|
||||||
LeaderDeserializer<LeaderInterface> deserializer;
|
|
||||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
|
||||||
Future<Void> m = monitorLeaderRemotelyInternal(connFile, serializedInfo);
|
|
||||||
return m || deserializer(serializedInfo, outKnownLeader);
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<Void> monitorLeaderInternal(Reference<ClusterConnectionFile> connFile,
|
ACTOR Future<Void> monitorLeaderInternal(Reference<ClusterConnectionFile> connFile,
|
||||||
Reference<AsyncVar<Value>> outSerializedLeaderInfo) {
|
Reference<AsyncVar<Value>> outSerializedLeaderInfo) {
|
||||||
state MonitorLeaderInfo info(connFile);
|
state MonitorLeaderInfo info(connFile);
|
||||||
|
@ -656,7 +643,7 @@ ACTOR Future<Void> getClientInfoFromLeader(Reference<AsyncVar<Optional<ClusterCo
|
||||||
choose {
|
choose {
|
||||||
when(ClientDBInfo ni =
|
when(ClientDBInfo ni =
|
||||||
wait(brokenPromiseToNever(knownLeader->get().get().clientInterface.openDatabase.getReply(req)))) {
|
wait(brokenPromiseToNever(knownLeader->get().get().clientInterface.openDatabase.getReply(req)))) {
|
||||||
TraceEvent("MonitorLeaderForProxiesGotClientInfo", knownLeader->get().get().clientInterface.id())
|
TraceEvent("GetClientInfoFromLeaderGotClientInfo", knownLeader->get().get().clientInterface.id())
|
||||||
.detail("CommitProxy0", ni.commitProxies.size() ? ni.commitProxies[0].id() : UID())
|
.detail("CommitProxy0", ni.commitProxies.size() ? ni.commitProxies[0].id() : UID())
|
||||||
.detail("GrvProxy0", ni.grvProxies.size() ? ni.grvProxies[0].id() : UID())
|
.detail("GrvProxy0", ni.grvProxies.size() ? ni.grvProxies[0].id() : UID())
|
||||||
.detail("ClientID", ni.id);
|
.detail("ClientID", ni.id);
|
||||||
|
@ -667,11 +654,11 @@ ACTOR Future<Void> getClientInfoFromLeader(Reference<AsyncVar<Optional<ClusterCo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> monitorLeaderForProxies(Key clusterKey,
|
ACTOR Future<Void> monitorLeaderAndGetClientInfo(Key clusterKey,
|
||||||
vector<NetworkAddress> coordinators,
|
std::vector<NetworkAddress> coordinators,
|
||||||
ClientData* clientData,
|
ClientData* clientData,
|
||||||
Reference<AsyncVar<Optional<LeaderInfo>>> leaderInfo) {
|
Reference<AsyncVar<Optional<LeaderInfo>>> leaderInfo) {
|
||||||
state vector<ClientLeaderRegInterface> clientLeaderServers;
|
state std::vector<ClientLeaderRegInterface> clientLeaderServers;
|
||||||
state AsyncTrigger nomineeChange;
|
state AsyncTrigger nomineeChange;
|
||||||
state std::vector<Optional<LeaderInfo>> nominees;
|
state std::vector<Optional<LeaderInfo>> nominees;
|
||||||
state Future<Void> allActors;
|
state Future<Void> allActors;
|
||||||
|
@ -695,7 +682,7 @@ ACTOR Future<Void> monitorLeaderForProxies(Key clusterKey,
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
Optional<std::pair<LeaderInfo, bool>> leader = getLeader(nominees);
|
Optional<std::pair<LeaderInfo, bool>> leader = getLeader(nominees);
|
||||||
TraceEvent("MonitorLeaderForProxiesChange")
|
TraceEvent("MonitorLeaderAndGetClientInfoLeaderChange")
|
||||||
.detail("NewLeader", leader.present() ? leader.get().first.changeID : UID(1, 1))
|
.detail("NewLeader", leader.present() ? leader.get().first.changeID : UID(1, 1))
|
||||||
.detail("Key", clusterKey.printable());
|
.detail("Key", clusterKey.printable());
|
||||||
if (leader.present()) {
|
if (leader.present()) {
|
||||||
|
@ -705,7 +692,7 @@ ACTOR Future<Void> monitorLeaderForProxies(Key clusterKey,
|
||||||
outInfo.forward = leader.get().first.serializedInfo;
|
outInfo.forward = leader.get().first.serializedInfo;
|
||||||
clientData->clientInfo->set(CachedSerialization<ClientDBInfo>(outInfo));
|
clientData->clientInfo->set(CachedSerialization<ClientDBInfo>(outInfo));
|
||||||
leaderInfo->set(leader.get().first);
|
leaderInfo->set(leader.get().first);
|
||||||
TraceEvent("MonitorLeaderForProxiesForwarding")
|
TraceEvent("MonitorLeaderAndGetClientInfoForwarding")
|
||||||
.detail("NewConnStr", leader.get().first.serializedInfo.toString());
|
.detail("NewConnStr", leader.get().first.serializedInfo.toString());
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
@ -762,7 +749,6 @@ void shrinkProxyList(ClientDBInfo& ni,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Leader is the process that will be elected by coordinators as the cluster controller
|
|
||||||
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
||||||
Reference<ClusterConnectionFile> connFile,
|
Reference<ClusterConnectionFile> connFile,
|
||||||
Reference<AsyncVar<ClientDBInfo>> clientInfo,
|
Reference<AsyncVar<ClientDBInfo>> clientInfo,
|
||||||
|
@ -771,9 +757,9 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
||||||
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions,
|
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions,
|
||||||
Key traceLogGroup) {
|
Key traceLogGroup) {
|
||||||
state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString();
|
state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString();
|
||||||
state vector<NetworkAddress> addrs = cs.coordinators();
|
state std::vector<NetworkAddress> addrs = cs.coordinators();
|
||||||
state int idx = 0;
|
state int idx = 0;
|
||||||
state int successIdx = 0;
|
state int successIndex = 0;
|
||||||
state Optional<double> incorrectTime;
|
state Optional<double> incorrectTime;
|
||||||
state std::vector<UID> lastCommitProxyUIDs;
|
state std::vector<UID> lastCommitProxyUIDs;
|
||||||
state std::vector<CommitProxyInterface> lastCommitProxies;
|
state std::vector<CommitProxyInterface> lastCommitProxies;
|
||||||
|
@ -840,11 +826,11 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
||||||
auto& ni = rep.get().mutate();
|
auto& ni = rep.get().mutate();
|
||||||
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
||||||
clientInfo->set(ni);
|
clientInfo->set(ni);
|
||||||
successIdx = idx;
|
successIndex = idx;
|
||||||
} else {
|
} else {
|
||||||
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cannot talk to cluster controller
|
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cant talk to cluster controller
|
||||||
idx = (idx + 1) % addrs.size();
|
idx = (idx + 1) % addrs.size();
|
||||||
if (idx == successIdx) {
|
if (idx == successIndex) {
|
||||||
wait(delay(CLIENT_KNOBS->COORDINATOR_RECONNECTION_DELAY));
|
wait(delay(CLIENT_KNOBS->COORDINATOR_RECONNECTION_DELAY));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,17 +61,23 @@ struct MonitorLeaderInfo {
|
||||||
: hasConnected(false), intermediateConnFile(intermediateConnFile) {}
|
: hasConnected(false), intermediateConnFile(intermediateConnFile) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Monitors the given coordination group's leader election process and provides a best current guess
|
Optional<std::pair<LeaderInfo, bool>> getLeader(const std::vector<Optional<LeaderInfo>>& nominees);
|
||||||
// of the current leader. If a leader is elected for long enough and communication with a quorum of
|
|
||||||
// coordinators is possible, eventually outKnownLeader will be that leader's interface.
|
// This is one place where the leader election algorithm is run. The coodinator contacts all coodinators to collect
|
||||||
|
// nominees, the nominee with the most nomination is the leader. This function also monitors the change of the leader.
|
||||||
|
// If a leader is elected for long enough and communication with a quorum of coordinators is possible, eventually
|
||||||
|
// outKnownLeader will be that leader's interface.
|
||||||
template <class LeaderInterface>
|
template <class LeaderInterface>
|
||||||
Future<Void> monitorLeader(Reference<ClusterConnectionFile> const& connFile,
|
Future<Void> monitorLeader(Reference<ClusterConnectionFile> const& connFile,
|
||||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader);
|
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader);
|
||||||
|
|
||||||
Future<Void> monitorLeaderForProxies(Value const& key,
|
// This is one place where the leader election algorithm is run. The coodinator contacts all coodinators to collect
|
||||||
vector<NetworkAddress> const& coordinators,
|
// nominees, the nominee with the most nomination is the leader, and collects client data from the leader. This function
|
||||||
ClientData* const& clientData,
|
// also monitors the change of the leader.
|
||||||
Reference<AsyncVar<Optional<LeaderInfo>>> const& leaderInfo);
|
Future<Void> monitorLeaderAndGetClientInfo(Value const& key,
|
||||||
|
std::vector<NetworkAddress> const& coordinators,
|
||||||
|
ClientData* const& clientData,
|
||||||
|
Reference<AsyncVar<Optional<LeaderInfo>>> const& leaderInfo);
|
||||||
|
|
||||||
Future<Void> monitorProxies(
|
Future<Void> monitorProxies(
|
||||||
Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile,
|
Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile,
|
||||||
|
|
|
@ -606,7 +606,7 @@ void DLApi::addNetworkThreadCompletionHook(void (*hook)(void*), void* hookParame
|
||||||
// MultiVersionTransaction
|
// MultiVersionTransaction
|
||||||
MultiVersionTransaction::MultiVersionTransaction(Reference<MultiVersionDatabase> db,
|
MultiVersionTransaction::MultiVersionTransaction(Reference<MultiVersionDatabase> db,
|
||||||
UniqueOrderedOptionList<FDBTransactionOptions> defaultOptions)
|
UniqueOrderedOptionList<FDBTransactionOptions> defaultOptions)
|
||||||
: db(db) {
|
: db(db), startTime(timer_monotonic()), timeoutTsav(new ThreadSingleAssignmentVar<Void>()) {
|
||||||
setDefaultOptions(defaultOptions);
|
setDefaultOptions(defaultOptions);
|
||||||
updateTransaction();
|
updateTransaction();
|
||||||
}
|
}
|
||||||
|
@ -622,20 +622,23 @@ void MultiVersionTransaction::updateTransaction() {
|
||||||
TransactionInfo newTr;
|
TransactionInfo newTr;
|
||||||
if (currentDb.value) {
|
if (currentDb.value) {
|
||||||
newTr.transaction = currentDb.value->createTransaction();
|
newTr.transaction = currentDb.value->createTransaction();
|
||||||
|
}
|
||||||
|
|
||||||
Optional<StringRef> timeout;
|
Optional<StringRef> timeout;
|
||||||
for (auto option : persistentOptions) {
|
for (auto option : persistentOptions) {
|
||||||
if (option.first == FDBTransactionOptions::TIMEOUT) {
|
if (option.first == FDBTransactionOptions::TIMEOUT) {
|
||||||
timeout = option.second.castTo<StringRef>();
|
timeout = option.second.castTo<StringRef>();
|
||||||
} else {
|
} else if (currentDb.value) {
|
||||||
newTr.transaction->setOption(option.first, option.second.castTo<StringRef>());
|
newTr.transaction->setOption(option.first, option.second.castTo<StringRef>());
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Setting a timeout can immediately cause a transaction to fail. The only timeout
|
// Setting a timeout can immediately cause a transaction to fail. The only timeout
|
||||||
// that matters is the one most recently set, so we ignore any earlier set timeouts
|
// that matters is the one most recently set, so we ignore any earlier set timeouts
|
||||||
// that might inadvertently fail the transaction.
|
// that might inadvertently fail the transaction.
|
||||||
if (timeout.present()) {
|
if (timeout.present()) {
|
||||||
|
setTimeout(timeout);
|
||||||
|
if (currentDb.value) {
|
||||||
newTr.transaction->setOption(FDBTransactionOptions::TIMEOUT, timeout);
|
newTr.transaction->setOption(FDBTransactionOptions::TIMEOUT, timeout);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -670,19 +673,19 @@ void MultiVersionTransaction::setVersion(Version v) {
|
||||||
}
|
}
|
||||||
ThreadFuture<Version> MultiVersionTransaction::getReadVersion() {
|
ThreadFuture<Version> MultiVersionTransaction::getReadVersion() {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getReadVersion() : ThreadFuture<Version>(Never());
|
auto f = tr.transaction ? tr.transaction->getReadVersion() : makeTimeout<Version>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadFuture<Optional<Value>> MultiVersionTransaction::get(const KeyRef& key, bool snapshot) {
|
ThreadFuture<Optional<Value>> MultiVersionTransaction::get(const KeyRef& key, bool snapshot) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->get(key, snapshot) : ThreadFuture<Optional<Value>>(Never());
|
auto f = tr.transaction ? tr.transaction->get(key, snapshot) : makeTimeout<Optional<Value>>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadFuture<Key> MultiVersionTransaction::getKey(const KeySelectorRef& key, bool snapshot) {
|
ThreadFuture<Key> MultiVersionTransaction::getKey(const KeySelectorRef& key, bool snapshot) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getKey(key, snapshot) : ThreadFuture<Key>(Never());
|
auto f = tr.transaction ? tr.transaction->getKey(key, snapshot) : makeTimeout<Key>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -692,8 +695,8 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeySelectorRef
|
||||||
bool snapshot,
|
bool snapshot,
|
||||||
bool reverse) {
|
bool reverse) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getRange(begin, end, limit, snapshot, reverse)
|
auto f =
|
||||||
: ThreadFuture<RangeResult>(Never());
|
tr.transaction ? tr.transaction->getRange(begin, end, limit, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -703,8 +706,8 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeySelectorRef
|
||||||
bool snapshot,
|
bool snapshot,
|
||||||
bool reverse) {
|
bool reverse) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getRange(begin, end, limits, snapshot, reverse)
|
auto f =
|
||||||
: ThreadFuture<RangeResult>(Never());
|
tr.transaction ? tr.transaction->getRange(begin, end, limits, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -713,8 +716,7 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeyRangeRef& k
|
||||||
bool snapshot,
|
bool snapshot,
|
||||||
bool reverse) {
|
bool reverse) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f =
|
auto f = tr.transaction ? tr.transaction->getRange(keys, limit, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||||
tr.transaction ? tr.transaction->getRange(keys, limit, snapshot, reverse) : ThreadFuture<RangeResult>(Never());
|
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -723,21 +725,20 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeyRangeRef& k
|
||||||
bool snapshot,
|
bool snapshot,
|
||||||
bool reverse) {
|
bool reverse) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f =
|
auto f = tr.transaction ? tr.transaction->getRange(keys, limits, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||||
tr.transaction ? tr.transaction->getRange(keys, limits, snapshot, reverse) : ThreadFuture<RangeResult>(Never());
|
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadFuture<Standalone<StringRef>> MultiVersionTransaction::getVersionstamp() {
|
ThreadFuture<Standalone<StringRef>> MultiVersionTransaction::getVersionstamp() {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getVersionstamp() : ThreadFuture<Standalone<StringRef>>(Never());
|
auto f = tr.transaction ? tr.transaction->getVersionstamp() : makeTimeout<Standalone<StringRef>>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadFuture<Standalone<VectorRef<const char*>>> MultiVersionTransaction::getAddressesForKey(const KeyRef& key) {
|
ThreadFuture<Standalone<VectorRef<const char*>>> MultiVersionTransaction::getAddressesForKey(const KeyRef& key) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getAddressesForKey(key)
|
auto f =
|
||||||
: ThreadFuture<Standalone<VectorRef<const char*>>>(Never());
|
tr.transaction ? tr.transaction->getAddressesForKey(key) : makeTimeout<Standalone<VectorRef<const char*>>>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -750,7 +751,7 @@ void MultiVersionTransaction::addReadConflictRange(const KeyRangeRef& keys) {
|
||||||
|
|
||||||
ThreadFuture<int64_t> MultiVersionTransaction::getEstimatedRangeSizeBytes(const KeyRangeRef& keys) {
|
ThreadFuture<int64_t> MultiVersionTransaction::getEstimatedRangeSizeBytes(const KeyRangeRef& keys) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getEstimatedRangeSizeBytes(keys) : ThreadFuture<int64_t>(Never());
|
auto f = tr.transaction ? tr.transaction->getEstimatedRangeSizeBytes(keys) : makeTimeout<int64_t>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -758,7 +759,7 @@ ThreadFuture<Standalone<VectorRef<KeyRef>>> MultiVersionTransaction::getRangeSpl
|
||||||
int64_t chunkSize) {
|
int64_t chunkSize) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getRangeSplitPoints(range, chunkSize)
|
auto f = tr.transaction ? tr.transaction->getRangeSplitPoints(range, chunkSize)
|
||||||
: ThreadFuture<Standalone<VectorRef<KeyRef>>>(Never());
|
: makeTimeout<Standalone<VectorRef<KeyRef>>>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -799,7 +800,7 @@ void MultiVersionTransaction::clear(const KeyRef& key) {
|
||||||
|
|
||||||
ThreadFuture<Void> MultiVersionTransaction::watch(const KeyRef& key) {
|
ThreadFuture<Void> MultiVersionTransaction::watch(const KeyRef& key) {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->watch(key) : ThreadFuture<Void>(Never());
|
auto f = tr.transaction ? tr.transaction->watch(key) : makeTimeout<Void>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -812,7 +813,7 @@ void MultiVersionTransaction::addWriteConflictRange(const KeyRangeRef& keys) {
|
||||||
|
|
||||||
ThreadFuture<Void> MultiVersionTransaction::commit() {
|
ThreadFuture<Void> MultiVersionTransaction::commit() {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->commit() : ThreadFuture<Void>(Never());
|
auto f = tr.transaction ? tr.transaction->commit() : makeTimeout<Void>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -827,7 +828,7 @@ Version MultiVersionTransaction::getCommittedVersion() {
|
||||||
|
|
||||||
ThreadFuture<int64_t> MultiVersionTransaction::getApproximateSize() {
|
ThreadFuture<int64_t> MultiVersionTransaction::getApproximateSize() {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->getApproximateSize() : ThreadFuture<int64_t>(Never());
|
auto f = tr.transaction ? tr.transaction->getApproximateSize() : makeTimeout<int64_t>();
|
||||||
return abortableFuture(f, tr.onChange);
|
return abortableFuture(f, tr.onChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -841,6 +842,11 @@ void MultiVersionTransaction::setOption(FDBTransactionOptions::Option option, Op
|
||||||
if (MultiVersionApi::apiVersionAtLeast(610) && itr->second.persistent) {
|
if (MultiVersionApi::apiVersionAtLeast(610) && itr->second.persistent) {
|
||||||
persistentOptions.emplace_back(option, value.castTo<Standalone<StringRef>>());
|
persistentOptions.emplace_back(option, value.castTo<Standalone<StringRef>>());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (itr->first == FDBTransactionOptions::TIMEOUT) {
|
||||||
|
setTimeout(value);
|
||||||
|
}
|
||||||
|
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
if (tr.transaction) {
|
if (tr.transaction) {
|
||||||
tr.transaction->setOption(option, value);
|
tr.transaction->setOption(option, value);
|
||||||
|
@ -853,7 +859,7 @@ ThreadFuture<Void> MultiVersionTransaction::onError(Error const& e) {
|
||||||
return ThreadFuture<Void>(Void());
|
return ThreadFuture<Void>(Void());
|
||||||
} else {
|
} else {
|
||||||
auto tr = getTransaction();
|
auto tr = getTransaction();
|
||||||
auto f = tr.transaction ? tr.transaction->onError(e) : ThreadFuture<Void>(Never());
|
auto f = tr.transaction ? tr.transaction->onError(e) : makeTimeout<Void>();
|
||||||
f = abortableFuture(f, tr.onChange);
|
f = abortableFuture(f, tr.onChange);
|
||||||
|
|
||||||
return flatMapThreadFuture<Void, Void>(f, [this, e](ErrorOr<Void> ready) {
|
return flatMapThreadFuture<Void, Void>(f, [this, e](ErrorOr<Void> ready) {
|
||||||
|
@ -871,12 +877,100 @@ ThreadFuture<Void> MultiVersionTransaction::onError(Error const& e) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Waits for the specified duration and signals the assignment variable with a timed out error
|
||||||
|
// This will be canceled if a new timeout is set, in which case the tsav will not be signaled.
|
||||||
|
ACTOR Future<Void> timeoutImpl(Reference<ThreadSingleAssignmentVar<Void>> tsav, double duration) {
|
||||||
|
wait(delay(duration));
|
||||||
|
|
||||||
|
tsav->trySendError(transaction_timed_out());
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure a timeout based on the options set for this transaction. This timeout only applies
|
||||||
|
// if we don't have an underlying database object to connect with.
|
||||||
|
void MultiVersionTransaction::setTimeout(Optional<StringRef> value) {
|
||||||
|
double timeoutDuration = extractIntOption(value, 0, std::numeric_limits<int>::max()) / 1000.0;
|
||||||
|
|
||||||
|
ThreadFuture<Void> prevTimeout;
|
||||||
|
double transactionStartTime = startTime;
|
||||||
|
|
||||||
|
{ // lock scope
|
||||||
|
ThreadSpinLockHolder holder(timeoutLock);
|
||||||
|
|
||||||
|
Reference<ThreadSingleAssignmentVar<Void>> tsav = timeoutTsav;
|
||||||
|
ThreadFuture<Void> newTimeout = onMainThread([transactionStartTime, tsav, timeoutDuration]() {
|
||||||
|
return timeoutImpl(tsav, timeoutDuration - std::max(0.0, now() - transactionStartTime));
|
||||||
|
});
|
||||||
|
|
||||||
|
prevTimeout = currentTimeout;
|
||||||
|
currentTimeout = newTimeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel the previous timeout now that we have a new one. This means that changing the timeout
|
||||||
|
// affects in-flight operations, which is consistent with the behavior in RYW.
|
||||||
|
if (prevTimeout.isValid()) {
|
||||||
|
prevTimeout.cancel();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a ThreadFuture<T> that will signal an error if the transaction times out.
|
||||||
|
template <class T>
|
||||||
|
ThreadFuture<T> MultiVersionTransaction::makeTimeout() {
|
||||||
|
ThreadFuture<Void> f;
|
||||||
|
|
||||||
|
{ // lock scope
|
||||||
|
ThreadSpinLockHolder holder(timeoutLock);
|
||||||
|
|
||||||
|
// Our ThreadFuture holds a reference to this TSAV,
|
||||||
|
// but the ThreadFuture does not increment the ref count
|
||||||
|
timeoutTsav->addref();
|
||||||
|
f = ThreadFuture<Void>(timeoutTsav.getPtr());
|
||||||
|
}
|
||||||
|
|
||||||
|
// When our timeoutTsav gets set, map it to the appropriate type
|
||||||
|
return mapThreadFuture<Void, T>(f, [](ErrorOr<Void> v) {
|
||||||
|
ASSERT(v.isError());
|
||||||
|
return ErrorOr<T>(v.getError());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void MultiVersionTransaction::reset() {
|
void MultiVersionTransaction::reset() {
|
||||||
persistentOptions.clear();
|
persistentOptions.clear();
|
||||||
|
|
||||||
|
// Reset the timeout state
|
||||||
|
Reference<ThreadSingleAssignmentVar<Void>> prevTimeoutTsav;
|
||||||
|
ThreadFuture<Void> prevTimeout;
|
||||||
|
startTime = timer_monotonic();
|
||||||
|
|
||||||
|
{ // lock scope
|
||||||
|
ThreadSpinLockHolder holder(timeoutLock);
|
||||||
|
|
||||||
|
prevTimeoutTsav = timeoutTsav;
|
||||||
|
timeoutTsav = makeReference<ThreadSingleAssignmentVar<Void>>();
|
||||||
|
|
||||||
|
prevTimeout = currentTimeout;
|
||||||
|
currentTimeout = ThreadFuture<Void>();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel any outstanding operations if they don't have an underlying transaction object to cancel them
|
||||||
|
prevTimeoutTsav->trySendError(transaction_cancelled());
|
||||||
|
if (prevTimeout.isValid()) {
|
||||||
|
prevTimeout.cancel();
|
||||||
|
}
|
||||||
|
|
||||||
setDefaultOptions(db->dbState->transactionDefaultOptions);
|
setDefaultOptions(db->dbState->transactionDefaultOptions);
|
||||||
updateTransaction();
|
updateTransaction();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MultiVersionTransaction::~MultiVersionTransaction() {
|
||||||
|
timeoutTsav->trySendError(transaction_cancelled());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MultiVersionTransaction::isValid() {
|
||||||
|
auto tr = getTransaction();
|
||||||
|
return tr.transaction.isValid();
|
||||||
|
}
|
||||||
|
|
||||||
// MultiVersionDatabase
|
// MultiVersionDatabase
|
||||||
MultiVersionDatabase::MultiVersionDatabase(MultiVersionApi* api,
|
MultiVersionDatabase::MultiVersionDatabase(MultiVersionApi* api,
|
||||||
int threadIdx,
|
int threadIdx,
|
||||||
|
@ -998,12 +1092,17 @@ ThreadFuture<Void> MultiVersionDatabase::createSnapshot(const StringRef& uid, co
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get network thread busyness
|
// Get network thread busyness
|
||||||
|
// Return the busyness for the main thread. When using external clients, take the larger of the local client
|
||||||
|
// and the external client's busyness.
|
||||||
double MultiVersionDatabase::getMainThreadBusyness() {
|
double MultiVersionDatabase::getMainThreadBusyness() {
|
||||||
|
ASSERT(g_network);
|
||||||
|
|
||||||
|
double localClientBusyness = g_network->networkInfo.metrics.networkBusyness;
|
||||||
if (dbState->db) {
|
if (dbState->db) {
|
||||||
return dbState->db->getMainThreadBusyness();
|
return std::max(dbState->db->getMainThreadBusyness(), localClientBusyness);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return localClientBusyness;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the protocol version reported by the coordinator this client is connected to
|
// Returns the protocol version reported by the coordinator this client is connected to
|
||||||
|
@ -1886,8 +1985,28 @@ void MultiVersionApi::loadEnvironmentVariableNetworkOptions() {
|
||||||
std::string valueStr;
|
std::string valueStr;
|
||||||
try {
|
try {
|
||||||
if (platform::getEnvironmentVar(("FDB_NETWORK_OPTION_" + option.second.name).c_str(), valueStr)) {
|
if (platform::getEnvironmentVar(("FDB_NETWORK_OPTION_" + option.second.name).c_str(), valueStr)) {
|
||||||
|
FDBOptionInfo::ParamType curParamType = option.second.paramType;
|
||||||
for (auto value : parseOptionValues(valueStr)) {
|
for (auto value : parseOptionValues(valueStr)) {
|
||||||
Standalone<StringRef> currentValue = StringRef(value);
|
Standalone<StringRef> currentValue;
|
||||||
|
int64_t intParamVal;
|
||||||
|
if (curParamType == FDBOptionInfo::ParamType::Int) {
|
||||||
|
try {
|
||||||
|
size_t nextIdx;
|
||||||
|
intParamVal = std::stoll(value, &nextIdx);
|
||||||
|
if (nextIdx != value.length()) {
|
||||||
|
throw invalid_option_value();
|
||||||
|
}
|
||||||
|
} catch (std::exception e) {
|
||||||
|
TraceEvent(SevError, "EnvironmentVariableParseIntegerFailed")
|
||||||
|
.detail("Option", option.second.name)
|
||||||
|
.detail("Value", valueStr)
|
||||||
|
.detail("Error", e.what());
|
||||||
|
throw invalid_option_value();
|
||||||
|
}
|
||||||
|
currentValue = StringRef(reinterpret_cast<uint8_t*>(&intParamVal), 8);
|
||||||
|
} else {
|
||||||
|
currentValue = StringRef(value);
|
||||||
|
}
|
||||||
{ // lock scope
|
{ // lock scope
|
||||||
MutexHolder holder(lock);
|
MutexHolder holder(lock);
|
||||||
if (setEnvOptions[option.first].count(currentValue) == 0) {
|
if (setEnvOptions[option.first].count(currentValue) == 0) {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue