Merge branch 'master' into improve-exlude-failed-servers
This commit is contained in:
commit
c7eb611fd4
|
@ -1,6 +1,6 @@
|
|||
add_subdirectory(c)
|
||||
if(NOT OPEN_FOR_IDE)
|
||||
# flow bindings currently doesn't support that
|
||||
add_subdirectory(c)
|
||||
add_subdirectory(flow)
|
||||
endif()
|
||||
if(WITH_PYTHON_BINDING)
|
||||
|
|
|
@ -39,6 +39,8 @@ else()
|
|||
strip_debug_symbols(fdb_c)
|
||||
endif()
|
||||
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
|
||||
add_dependencies(fdbclient fdb_c_options)
|
||||
add_dependencies(fdbclient_sampling fdb_c_options)
|
||||
target_link_libraries(fdb_c PUBLIC $<BUILD_INTERFACE:fdbclient>)
|
||||
if(APPLE)
|
||||
set(symbols ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.symbols)
|
||||
|
@ -130,9 +132,14 @@ if(NOT WIN32)
|
|||
target_link_libraries(fdb_c90_test PRIVATE fdb_c)
|
||||
endif()
|
||||
|
||||
if(OPEN_FOR_IDE)
|
||||
set(FDB_C_TARGET $<TARGET_OBJECTS:fdb_c>)
|
||||
else()
|
||||
set(FDB_C_TARGET $<TARGET_FILE:fdb_c>)
|
||||
endif()
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_c> ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${FDB_C_TARGET} ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||
DEPENDS fdb_c
|
||||
COMMENT "Copy libfdb_c to use as external client for test")
|
||||
add_custom_target(external_client DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so)
|
||||
|
|
|
@ -128,7 +128,8 @@ function(add_fdb_test)
|
|||
-n ${test_name}
|
||||
-b ${PROJECT_BINARY_DIR}
|
||||
-t ${test_type}
|
||||
-O ${OLD_FDBSERVER_BINARY}
|
||||
-O ${OLD_FDBSERVER_BINARY}
|
||||
--config "@CTEST_CONFIGURATION_TYPE@"
|
||||
--crash
|
||||
--aggregate-traces ${TEST_AGGREGATE_TRACES}
|
||||
--log-format ${TEST_LOG_FORMAT}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# FindRocksDB
|
||||
|
||||
find_package(RocksDB)
|
||||
find_package(RocksDB 6.22.1)
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
|
|
|
@ -4,5 +4,20 @@ find_path(ROCKSDB_INCLUDE_DIR
|
|||
NAMES rocksdb/db.h
|
||||
PATH_SUFFIXES include)
|
||||
|
||||
if(ROCKSDB_INCLUDE_DIR AND EXISTS "${ROCKSDB_INCLUDE_DIR}/rocksdb/version.h")
|
||||
foreach(ver "MAJOR" "MINOR" "PATCH")
|
||||
file(STRINGS "${ROCKSDB_INCLUDE_DIR}/rocksdb/version.h" ROCKSDB_VER_${ver}_LINE
|
||||
REGEX "^#define[ \t]+ROCKSDB_${ver}[ \t]+[0-9]+$")
|
||||
string(REGEX REPLACE "^#define[ \t]+ROCKSDB_${ver}[ \t]+([0-9]+)$"
|
||||
"\\1" ROCKSDB_VERSION_${ver} "${ROCKSDB_VER_${ver}_LINE}")
|
||||
unset(${ROCKSDB_VER_${ver}_LINE})
|
||||
endforeach()
|
||||
set(ROCKSDB_VERSION_STRING
|
||||
"${ROCKSDB_VERSION_MAJOR}.${ROCKSDB_VERSION_MINOR}.${ROCKSDB_VERSION_PATCH}")
|
||||
|
||||
message(STATUS "Found RocksDB version: ${ROCKSDB_VERSION_STRING}")
|
||||
endif()
|
||||
|
||||
find_package_handle_standard_args(RocksDB
|
||||
DEFAULT_MSG ROCKSDB_INCLUDE_DIR)
|
||||
REQUIRED_VARS ROCKSDB_INCLUDE_DIR
|
||||
VERSION_VAR ROCKSDB_VERSION_STRING)
|
||||
|
|
|
@ -215,17 +215,17 @@ set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
|
|||
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DEPENDS clients-versioned)
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_REQUIRES
|
||||
"foundationdb-clients-${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH} = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
"foundationdb${PROJECT_VERSION}-clients")
|
||||
|
||||
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DISPLAY_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DISPLAY_NAME "foundationdb${PROJECT_VERSION}-server")
|
||||
|
||||
set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
||||
set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb${PROJECT_VERSION}-clients")
|
||||
|
||||
|
||||
# MacOS needs a file exiension for the LICENSE file
|
||||
|
@ -246,14 +246,21 @@ else()
|
|||
set(prerelease_string "-1")
|
||||
endif()
|
||||
|
||||
|
||||
#############
|
||||
# Filenames #
|
||||
#############
|
||||
set(unversioned_postfix "${PROJECT_VERSION}${prerelease_string}")
|
||||
# RPM filenames
|
||||
set(rpm-clients-filename "foundationdb-clients-${PROJECT_VERSION}${prerelease_string}")
|
||||
set(rpm-server-filename "foundationdb-server-${PROJECT_VERSION}${prerelease_string}")
|
||||
set(rpm-clients-filename "foundationdb-clients-${unversioned_postfix}")
|
||||
set(rpm-server-filename "foundationdb-server-${unversioned_postfix}")
|
||||
set(rpm-clients-versioned-filename "foundationdb${PROJECT_VERSION}-clients${prerelease_string}")
|
||||
set(rpm-server-versioned-filename "foundationdb${PROJECT_VERSION}-server${prerelease_string}")
|
||||
|
||||
# Deb filenames
|
||||
set(deb-clients-filename "foundationdb-clients_${PROJECT_VERSION}${prerelease_string}")
|
||||
set(deb-server-filename "foundationdb-server_${PROJECT_VERSION}${prerelease_string}")
|
||||
set(deb-clients-filename "foundationdb-clients_${unversioned_postfix}")
|
||||
set(deb-server-filename "foundationdb-server_${unversioned_postfix}")
|
||||
set(deb-clients-versioned-filename "foundationdb${PROJECT_VERSION}-clients${prerelease_string}")
|
||||
set(deb-server-versioned-filename "foundationdb${PROJECT_VERSION}-server${prerelease_string}")
|
||||
|
||||
################################################################################
|
||||
# Configuration for RPM
|
||||
|
@ -264,18 +271,18 @@ set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0")
|
|||
set(CPACK_RPM_PACKAGE_NAME "foundationdb")
|
||||
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients")
|
||||
set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-server")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-clients")
|
||||
|
||||
set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${rpm-clients-filename}.el7.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${rpm-clients-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${rpm-clients-versioned-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_SERVER-EL7_FILE_NAME "${rpm-server-filename}.el7.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_FILE_NAME "${rpm-server-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_FILE_NAME "${rpm-server-versioned-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
|
||||
set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.el7-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-clients-versioned-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_SERVER-EL7_DEBUGINFO_FILE_NAME "${rpm-server-filename}.el7-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-server-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-server-versioned-filename}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
|
||||
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir")
|
||||
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server)
|
||||
|
@ -347,13 +354,13 @@ set(CPACK_RPM_CLIENTS-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE
|
|||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
|
||||
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${deb-clients-filename}_amd64.deb")
|
||||
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${deb-server-filename}_amd64.deb")
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-filename}.versioned_amd64.deb")
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-filename}.versioned_amd64.deb")
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-versioned-filename}.versioned_amd64.deb")
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-versioned-filename}.versioned_amd64.deb")
|
||||
else()
|
||||
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${deb-clients-filename}_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${deb-server-filename}_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "${deb-clients-versioned-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "${deb-server-versioned-filename}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
|
||||
endif()
|
||||
|
||||
set(CPACK_DEB_COMPONENT_INSTALL ON)
|
||||
|
@ -363,8 +370,8 @@ set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
|||
|
||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_NAME "foundationdb-server")
|
||||
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_NAME "foundationdb-clients")
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-server")
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb${PROJECT_VERSION}-clients")
|
||||
|
||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), foundationdb-clients (= ${FDB_VERSION})")
|
||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_RECOMMENDS "python (>= 2.6)")
|
||||
|
|
|
@ -1,23 +1,24 @@
|
|||
add_subdirectory(tutorial)
|
||||
if(WIN32)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# build a virtualenv
|
||||
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
|
||||
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
|
||||
set(EXE_SUFFIX "")
|
||||
if(WIN32)
|
||||
set(venv_bin_dir ${CMAKE_CURRENT_BINARY_DIR}/venv/Scripts)
|
||||
set(activate_script ${venv_bin_dir}/activate.bat)
|
||||
set(EXE_SUFFIX ".exe")
|
||||
else()
|
||||
set(venv_bin_dir ${CMAKE_CURRENT_BINARY_DIR}/venv/bin)
|
||||
set(activate_script . ${venv_bin_dir}/activate)
|
||||
set(EXE_SUFFIX "")
|
||||
endif()
|
||||
set(pip_command ${venv_dir}/bin/pip${EXE_SUFFIX})
|
||||
set(python_command ${venv_dir}/bin/python${EXE_SUFFIX})
|
||||
set(python_command ${venv_bin_dir}/python${EXE_SUFFIX})
|
||||
set(pip_command ${venv_bin_dir}/pip${EXE_SUFFIX})
|
||||
|
||||
add_custom_command(OUTPUT ${venv_dir}/venv_setup
|
||||
COMMAND ${Python3_EXECUTABLE} -m venv venv &&
|
||||
${CMAKE_COMMAND} -E copy ${sphinx_dir}/.pip.conf ${venv_dir}/pip.conf &&
|
||||
. ${venv_dir}/bin/activate &&
|
||||
${pip_command} install --upgrade pip &&
|
||||
${activate_script} &&
|
||||
${python_command} -m pip install --upgrade pip &&
|
||||
${pip_command} install --upgrade -r ${sphinx_dir}/requirements.txt &&
|
||||
${pip_command} install sphinx-autobuild && # somehow this is missing in requirements.txt
|
||||
${CMAKE_COMMAND} -E touch ${venv_dir}/venv_setup
|
||||
|
@ -36,9 +37,9 @@ function(add_documentation_target)
|
|||
message(ERROR "GENERATOR is a required argument to add_documentation_target")
|
||||
endif()
|
||||
set(target ${ADT_GENERATOR})
|
||||
set(SPHINX_COMMAND "${venv_dir}/bin/sphinx-build")
|
||||
set(SPHINX_COMMAND "${venv_bin_dir}/sphinx-build${EXE_SUFFIX}")
|
||||
if(ADT_SPHINX_COMMAND)
|
||||
set(SPHINX_COMMAND "${venv_dir}/bin/${ADT_SPHINX_COMMAND}")
|
||||
set(SPHINX_COMMAND "${venv_bin_dir}/${ADT_SPHINX_COMMAND}")
|
||||
endif()
|
||||
set(doctree "doctree")
|
||||
if (ADT_DOCTREE)
|
||||
|
|
|
@ -24,7 +24,7 @@ Let's consider an **AP** database. In such a database, reads and writes would al
|
|||
|
||||
However, the downside is stark. Imagine a simple distributed database consisting of two nodes and a network partition making them unable to communicate. To be Available, each of the two nodes must continue to accept writes from clients.
|
||||
|
||||
.. figure:: /images/AP_Partition.png
|
||||
.. figure:: images/AP_Partition.png
|
||||
|
||||
Data divergence in an AP system during partition
|
||||
|
||||
|
@ -62,7 +62,7 @@ Imagine that a rack-top switch fails, and A is partitioned from the network. A w
|
|||
|
||||
However, for all other clients, the database servers can reach a majority of coordination servers, B and C. The replication configuration has ensured there is a full copy of the data available even without A. For these clients, the database will remain available for reads and writes and the web servers will continue to serve traffic.
|
||||
|
||||
.. figure:: /images/FDB_Partition.png
|
||||
.. figure:: images/FDB_Partition.png
|
||||
|
||||
Maintenance of availability during partition
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ The *LogPushData* class is used to hold serialized mutations on a per transactio
|
|||
|
||||
*LogPushData.writeTypedMessage* is the function that serializes each mutation and writes it to the correct binary stream to be sent to the corresponding transaction log. Each serialized mutation contains additional metadata about the message, with the format:
|
||||
|
||||
.. image:: /images/serialized_mutation_metadata_format.png
|
||||
.. image:: images/serialized_mutation_metadata_format.png
|
||||
|
||||
* Message size: size of the message, in bytes, excluding the four bytes used for the message size
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ Scaling
|
|||
|
||||
FoundationDB scales linearly with the number of cores in a cluster over a wide range of sizes.
|
||||
|
||||
.. image:: /images/scaling.png
|
||||
.. image:: images/scaling.png
|
||||
|
||||
Here, a cluster of commodity hardware scales to **8.2 million** operations/sec doing a 90% read and 10% write workload with 16 byte keys and values between 8 and 100 bytes.
|
||||
|
||||
|
@ -24,7 +24,7 @@ Latency
|
|||
|
||||
FoundationDB has low latencies over a broad range of workloads that only increase modestly as the cluster approaches saturation.
|
||||
|
||||
.. image:: /images/latency.png
|
||||
.. image:: images/latency.png
|
||||
|
||||
When run at less than **75% load**, FoundationDB typically has the following latencies:
|
||||
|
||||
|
@ -53,7 +53,7 @@ Throughput (per core)
|
|||
|
||||
FoundationDB provides good throughput for the full range of read and write workloads, with two fully durable storage engine options.
|
||||
|
||||
.. image:: /images/throughput.png
|
||||
.. image:: images/throughput.png
|
||||
|
||||
FoundationDB offers two :ref:`storage engines <configuration-storage-engine>`, optimized for distinct use cases, both of which write to disk before reporting transactions committed. For each storage engine, the graph shows throughput of a single FoundationDB process running on a **single core** with saturating read/write workloads ranging from 100% reads to 100% writes, all with 16 byte keys and values between 8 and 100 bytes. Throughput for the unmixed workloads is about:
|
||||
|
||||
|
@ -79,7 +79,7 @@ Concurrency
|
|||
|
||||
FoundationDB is designed to achieve great performance under high concurrency from a large number of clients.
|
||||
|
||||
.. image:: /images/concurrency.png
|
||||
.. image:: images/concurrency.png
|
||||
|
||||
Its asynchronous design allows it to handle very high concurrency, and for a typical workload with 90% reads and 10% writes, maximum throughput is reached at about 200 concurrent operations. This number of operations was achieved with **20** concurrent transactions per FoundationDB process each running 10 operations with 16 byte keys and values between 8 and 100 bytes.
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ The processing order of multiple transactions is important because it affects th
|
|||
The content is based on FDB 6.2 and is true for FDB 6.3. A new timestamp proxy role is introduced in post FDB 6.3,
|
||||
which affects the read path. We will discuss the timestamp proxy role in the future version of this document.
|
||||
|
||||
.. image:: /images/FDB_read_path.png
|
||||
.. image:: images/FDB_read_path.png
|
||||
|
||||
Components
|
||||
=================
|
||||
|
@ -198,7 +198,7 @@ Write path of a transaction
|
|||
Suppose a client has a write-only transaction. Fig. 2 below shows the write path in a non-HA cluster.
|
||||
We will discuss how a transaction with both read and write works in the next section.
|
||||
|
||||
.. image:: /images/FDB_write_path.png
|
||||
.. image:: images/FDB_write_path.png
|
||||
|
||||
To simplify the explanation, the steps below do not include transaction batching on proxy,
|
||||
which is a typical database technique to increase transaction throughput.
|
||||
|
@ -461,7 +461,7 @@ The ordering is enforced in the timestamp generator, the concurrency control com
|
|||
We use the following example and draw its swimlane diagram to illustrate how two write transactions are ordered in FDB.
|
||||
The diagram with notes can be viewed at `here <https://lucid.app/lucidchart/6336dbe3-cff4-4c46-995a-4ca3d9260696/view?page=0_0#?folder_id=home&browser=icon>`_.
|
||||
|
||||
.. image:: /images/FDB_multiple_txn_swimlane_diagram.png
|
||||
.. image:: images/FDB_multiple_txn_swimlane_diagram.png
|
||||
|
||||
Reference
|
||||
============
|
||||
|
|
|
@ -2,6 +2,11 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.3.20
|
||||
======
|
||||
* Several minor problems with the versioned packages have been fixed. `(PR 5607) <https://github.com/apple/foundationdb/pull/5607>`_
|
||||
* A client might not honor transaction timeouts when using the multi-version client if it cannot connect to the cluster. `(Issue #5595) <https://github.com/apple/foundationdb/issues/5595>`_
|
||||
|
||||
6.3.19
|
||||
======
|
||||
* Add the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5330) <https://github.com/apple/foundationdb/pull/5330>`_
|
||||
|
|
|
@ -64,6 +64,7 @@ Fixes
|
|||
* If a restore is done using a prefix to remove and specific key ranges to restore, the key range boundaries must begin with the prefix to remove. `(PR #4684) <https://github.com/apple/foundationdb/pull/4684>`_
|
||||
* The multi-version client API would not propagate errors that occurred when creating databases on external clients. This could result in a invalid memory accesses. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
||||
* Fixed a race between the multi-version client connecting to a cluster and destroying the database that could cause an assertion failure. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
||||
* A client might not honor transaction timeouts when using the multi-version client if it cannot connect to the cluster. `(Issue #5595) <https://github.com/apple/foundationdb/issues/5595>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
|
|
@ -33,6 +33,12 @@
|
|||
|
||||
NetworkAddress serverAddress;
|
||||
|
||||
enum TutorialWellKnownEndpoints {
|
||||
WLTOKEN_SIMPLE_KV_SERVER = WLTOKEN_FIRST_AVAILABLE,
|
||||
WLTOKEN_ECHO_SERVER,
|
||||
WLTOKEN_COUNT_IN_TUTORIAL
|
||||
};
|
||||
|
||||
// this is a simple actor that will report how long
|
||||
// it is already running once a second.
|
||||
ACTOR Future<Void> simpleTimer() {
|
||||
|
@ -153,7 +159,7 @@ struct StreamReply : ReplyPromiseStreamReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, index);
|
||||
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, ReplyPromiseStreamReply::sequence, index);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -171,7 +177,7 @@ uint64_t tokenCounter = 1;
|
|||
|
||||
ACTOR Future<Void> echoServer() {
|
||||
state EchoServerInterface echoServer;
|
||||
echoServer.getInterface.makeWellKnownEndpoint(UID(-1, ++tokenCounter), TaskPriority::DefaultEndpoint);
|
||||
echoServer.getInterface.makeWellKnownEndpoint(WLTOKEN_ECHO_SERVER, TaskPriority::DefaultEndpoint);
|
||||
loop {
|
||||
try {
|
||||
choose {
|
||||
|
@ -204,7 +210,8 @@ ACTOR Future<Void> echoServer() {
|
|||
|
||||
ACTOR Future<Void> echoClient() {
|
||||
state EchoServerInterface server;
|
||||
server.getInterface = RequestStream<GetInterfaceRequest>(Endpoint({ serverAddress }, UID(-1, ++tokenCounter)));
|
||||
server.getInterface =
|
||||
RequestStream<GetInterfaceRequest>(Endpoint::wellKnown({ serverAddress }, WLTOKEN_ECHO_SERVER));
|
||||
EchoServerInterface s = wait(server.getInterface.getReply(GetInterfaceRequest()));
|
||||
server = s;
|
||||
EchoRequest echoRequest;
|
||||
|
@ -291,7 +298,7 @@ struct ClearRequest {
|
|||
ACTOR Future<Void> kvStoreServer() {
|
||||
state SimpleKeyValueStoreInteface inf;
|
||||
state std::map<std::string, std::string> store;
|
||||
inf.connect.makeWellKnownEndpoint(UID(-1, ++tokenCounter), TaskPriority::DefaultEndpoint);
|
||||
inf.connect.makeWellKnownEndpoint(WLTOKEN_SIMPLE_KV_SERVER, TaskPriority::DefaultEndpoint);
|
||||
loop {
|
||||
choose {
|
||||
when(GetKVInterface req = waitNext(inf.connect.getFuture())) {
|
||||
|
@ -328,7 +335,7 @@ ACTOR Future<Void> kvStoreServer() {
|
|||
ACTOR Future<SimpleKeyValueStoreInteface> connect() {
|
||||
std::cout << format("%llu: Connect...\n", uint64_t(g_network->now()));
|
||||
SimpleKeyValueStoreInteface c;
|
||||
c.connect = RequestStream<GetKVInterface>(Endpoint({ serverAddress }, UID(-1, ++tokenCounter)));
|
||||
c.connect = RequestStream<GetKVInterface>(Endpoint::wellKnown({ serverAddress }, WLTOKEN_SIMPLE_KV_SERVER));
|
||||
SimpleKeyValueStoreInteface result = wait(c.connect.getReply(GetKVInterface()));
|
||||
std::cout << format("%llu: done..\n", uint64_t(g_network->now()));
|
||||
return result;
|
||||
|
@ -562,7 +569,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
platformInit();
|
||||
g_network = newNet2(TLSConfig(), false, true);
|
||||
FlowTransport::createInstance(!isServer, 0);
|
||||
FlowTransport::createInstance(!isServer, 0, WLTOKEN_COUNT_IN_TUTORIAL);
|
||||
NetworkAddress publicAddress = NetworkAddress::parse("0.0.0.0:0");
|
||||
if (isServer) {
|
||||
publicAddress = NetworkAddress::parse("0.0.0.0:" + port);
|
||||
|
|
|
@ -52,8 +52,6 @@
|
|||
#include <string>
|
||||
#include <iostream>
|
||||
#include <ctime>
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
|
||||
#ifdef _WIN32
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
|
@ -4235,14 +4233,14 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
#ifdef ALLOC_INSTRUMENTATION
|
||||
{
|
||||
cout << "Page Counts: " << FastAllocator<16>::pageCount << " " << FastAllocator<32>::pageCount << " "
|
||||
<< FastAllocator<64>::pageCount << " " << FastAllocator<128>::pageCount << " "
|
||||
<< FastAllocator<256>::pageCount << " " << FastAllocator<512>::pageCount << " "
|
||||
<< FastAllocator<1024>::pageCount << " " << FastAllocator<2048>::pageCount << " "
|
||||
<< FastAllocator<4096>::pageCount << " " << FastAllocator<8192>::pageCount << " "
|
||||
<< FastAllocator<16384>::pageCount << endl;
|
||||
std::cout << "Page Counts: " << FastAllocator<16>::pageCount << " " << FastAllocator<32>::pageCount << " "
|
||||
<< FastAllocator<64>::pageCount << " " << FastAllocator<128>::pageCount << " "
|
||||
<< FastAllocator<256>::pageCount << " " << FastAllocator<512>::pageCount << " "
|
||||
<< FastAllocator<1024>::pageCount << " " << FastAllocator<2048>::pageCount << " "
|
||||
<< FastAllocator<4096>::pageCount << " " << FastAllocator<8192>::pageCount << " "
|
||||
<< FastAllocator<16384>::pageCount << std::endl;
|
||||
|
||||
vector<std::pair<std::string, const char*>> typeNames;
|
||||
std::vector<std::pair<std::string, const char*>> typeNames;
|
||||
for (auto i = allocInstr.begin(); i != allocInstr.end(); ++i) {
|
||||
std::string s;
|
||||
|
||||
|
|
|
@ -3,11 +3,13 @@ set(FDBCLI_SRCS
|
|||
fdbcli.actor.h
|
||||
AdvanceVersionCommand.actor.cpp
|
||||
CacheRangeCommand.actor.cpp
|
||||
ConfigureCommand.actor.cpp
|
||||
ConsistencyCheckCommand.actor.cpp
|
||||
CoordinatorsCommand.actor.cpp
|
||||
DataDistributionCommand.actor.cpp
|
||||
ExcludeCommand.actor.cpp
|
||||
ExpensiveDataCheckCommand.actor.cpp
|
||||
FileConfigureCommand.actor.cpp
|
||||
FlowLineNoise.actor.cpp
|
||||
FlowLineNoise.h
|
||||
ForceRecoveryWithDataLossCommand.actor.cpp
|
||||
|
|
|
@ -0,0 +1,298 @@
|
|||
/*
|
||||
* ConfigureCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/FlowLineNoise.h"
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
|
||||
Database localDb,
|
||||
std::vector<StringRef> tokens,
|
||||
LineNoise* linenoise,
|
||||
Future<Void> warn) {
|
||||
state ConfigurationResult result;
|
||||
state StatusObject s;
|
||||
state int startToken = 1;
|
||||
state bool force = false;
|
||||
if (tokens.size() < 2)
|
||||
result = ConfigurationResult::NO_OPTIONS_PROVIDED;
|
||||
else {
|
||||
if (tokens[startToken] == LiteralStringRef("FORCE")) {
|
||||
force = true;
|
||||
startToken = 2;
|
||||
}
|
||||
|
||||
state Optional<ConfigureAutoResult> conf;
|
||||
if (tokens[startToken] == LiteralStringRef("auto")) {
|
||||
// get cluster status
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
if (!tr->isValid()) {
|
||||
StatusObject _s = wait(StatusClient::statusFetcher(localDb));
|
||||
s = _s;
|
||||
} else {
|
||||
state ThreadFuture<Optional<Value>> statusValueF = tr->get(LiteralStringRef("\xff\xff/status/json"));
|
||||
Optional<Value> statusValue = wait(safeThreadFutureToFuture(statusValueF));
|
||||
if (!statusValue.present()) {
|
||||
fprintf(stderr, "ERROR: Failed to get status json from the cluster\n");
|
||||
return false;
|
||||
}
|
||||
json_spirit::mValue mv;
|
||||
json_spirit::read_string(statusValue.get().toString(), mv);
|
||||
s = StatusObject(mv.get_obj());
|
||||
}
|
||||
|
||||
if (warn.isValid())
|
||||
warn.cancel();
|
||||
|
||||
conf = parseConfig(s);
|
||||
|
||||
if (!conf.get().isValid()) {
|
||||
printf("Unable to provide advice for the current configuration.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool noChanges = conf.get().old_replication == conf.get().auto_replication &&
|
||||
conf.get().old_logs == conf.get().auto_logs &&
|
||||
conf.get().old_commit_proxies == conf.get().auto_commit_proxies &&
|
||||
conf.get().old_grv_proxies == conf.get().auto_grv_proxies &&
|
||||
conf.get().old_resolvers == conf.get().auto_resolvers &&
|
||||
conf.get().old_processes_with_transaction == conf.get().auto_processes_with_transaction &&
|
||||
conf.get().old_machines_with_transaction == conf.get().auto_machines_with_transaction;
|
||||
|
||||
bool noDesiredChanges = noChanges && conf.get().old_logs == conf.get().desired_logs &&
|
||||
conf.get().old_commit_proxies == conf.get().desired_commit_proxies &&
|
||||
conf.get().old_grv_proxies == conf.get().desired_grv_proxies &&
|
||||
conf.get().old_resolvers == conf.get().desired_resolvers;
|
||||
|
||||
std::string outputString;
|
||||
|
||||
outputString += "\nYour cluster has:\n\n";
|
||||
outputString += format(" processes %d\n", conf.get().processes);
|
||||
outputString += format(" machines %d\n", conf.get().machines);
|
||||
|
||||
if (noDesiredChanges)
|
||||
outputString += "\nConfigure recommends keeping your current configuration:\n\n";
|
||||
else if (noChanges)
|
||||
outputString +=
|
||||
"\nConfigure cannot modify the configuration because some parameters have been set manually:\n\n";
|
||||
else
|
||||
outputString += "\nConfigure recommends the following changes:\n\n";
|
||||
outputString += " ------------------------------------------------------------------- \n";
|
||||
outputString += "| parameter | old | new |\n";
|
||||
outputString += " ------------------------------------------------------------------- \n";
|
||||
outputString += format("| replication | %16s | %16s |\n",
|
||||
conf.get().old_replication.c_str(),
|
||||
conf.get().auto_replication.c_str());
|
||||
outputString +=
|
||||
format("| logs | %16d | %16d |", conf.get().old_logs, conf.get().auto_logs);
|
||||
outputString += conf.get().auto_logs != conf.get().desired_logs
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_logs)
|
||||
: "\n";
|
||||
outputString += format("| commit_proxies | %16d | %16d |",
|
||||
conf.get().old_commit_proxies,
|
||||
conf.get().auto_commit_proxies);
|
||||
outputString += conf.get().auto_commit_proxies != conf.get().desired_commit_proxies
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_commit_proxies)
|
||||
: "\n";
|
||||
outputString += format("| grv_proxies | %16d | %16d |",
|
||||
conf.get().old_grv_proxies,
|
||||
conf.get().auto_grv_proxies);
|
||||
outputString += conf.get().auto_grv_proxies != conf.get().desired_grv_proxies
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_grv_proxies)
|
||||
: "\n";
|
||||
outputString += format(
|
||||
"| resolvers | %16d | %16d |", conf.get().old_resolvers, conf.get().auto_resolvers);
|
||||
outputString += conf.get().auto_resolvers != conf.get().desired_resolvers
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_resolvers)
|
||||
: "\n";
|
||||
outputString += format("| transaction-class processes | %16d | %16d |\n",
|
||||
conf.get().old_processes_with_transaction,
|
||||
conf.get().auto_processes_with_transaction);
|
||||
outputString += format("| transaction-class machines | %16d | %16d |\n",
|
||||
conf.get().old_machines_with_transaction,
|
||||
conf.get().auto_machines_with_transaction);
|
||||
outputString += " ------------------------------------------------------------------- \n\n";
|
||||
|
||||
std::printf("%s", outputString.c_str());
|
||||
|
||||
if (noChanges)
|
||||
return true;
|
||||
|
||||
// TODO: disable completion
|
||||
Optional<std::string> line = wait(linenoise->read("Would you like to make these changes? [y/n]> "));
|
||||
|
||||
if (!line.present() || (line.get() != "y" && line.get() != "Y")) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
ConfigurationResult r = wait(ManagementAPI::changeConfig(
|
||||
db, std::vector<StringRef>(tokens.begin() + startToken, tokens.end()), conf, force));
|
||||
result = r;
|
||||
}
|
||||
|
||||
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
||||
// there are various results specific to changeConfig() that we need to report:
|
||||
bool ret = true;
|
||||
switch (result) {
|
||||
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
||||
case ConfigurationResult::CONFLICTING_OPTIONS:
|
||||
case ConfigurationResult::UNKNOWN_OPTION:
|
||||
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
||||
printUsage(LiteralStringRef("configure"));
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::INVALID_CONFIGURATION:
|
||||
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
||||
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_CREATED:
|
||||
printf("Database created\n");
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||
fprintf(stderr, "ERROR: The database is unavailable\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||
fprintf(stderr,
|
||||
"ERROR: When usable_regions > 1, all regions with priority >= 0 must be fully replicated "
|
||||
"before changing the configuration\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
fprintf(stderr,
|
||||
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
printf("Configuration changed\n");
|
||||
break;
|
||||
case ConfigurationResult::LOCKED_NOT_NEW:
|
||||
fprintf(stderr, "ERROR: `only new databases can be configured as locked`\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS_WARN_PPW_GRADUAL:
|
||||
printf("Configuration changed, with warnings\n");
|
||||
fprintf(stderr,
|
||||
"WARN: To make progress toward the desired storage type with storage_migration_type=gradual, the "
|
||||
"Perpetual Wiggle must be enabled.\n");
|
||||
fprintf(stderr,
|
||||
"Type `configure perpetual_storage_wiggle=1' to enable the perpetual wiggle, or `configure "
|
||||
"storage_migration_type=gradual' to set the gradual migration type.\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS_WARN_CHANGE_STORAGE_NOMIGRATE:
|
||||
printf("Configuration changed, with warnings\n");
|
||||
fprintf(stderr,
|
||||
"WARN: Storage engine type changed, but nothing will be migrated because "
|
||||
"storage_migration_mode=disabled.\n");
|
||||
fprintf(stderr,
|
||||
"Type `configure perpetual_storage_wiggle=1 storage_migration_type=gradual' to enable gradual "
|
||||
"migration with the perpetual wiggle, or `configure "
|
||||
"storage_migration_type=aggressive' for aggressive migration.\n");
|
||||
ret = false;
|
||||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
ret = false;
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
CommandFactory configureFactory(
|
||||
"configure",
|
||||
CommandHelp(
|
||||
"configure [new|tss]"
|
||||
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|proxies=<PROXIES>|"
|
||||
"commit_proxies=<COMMIT_PROXIES>|grv_proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*|"
|
||||
"count=<TSS_COUNT>|perpetual_storage_wiggle=<WIGGLE_SPEED>|storage_migration_type={disabled|gradual|"
|
||||
"aggressive}",
|
||||
"change the database configuration",
|
||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
|
||||
"specified.\n\ntss: when enabled, configures the testing storage server for the cluster instead."
|
||||
"When used with new to set up tss for the first time, it requires both a count and a storage engine."
|
||||
"To disable the testing storage server, run \"configure tss count=0\"\n\n"
|
||||
"Redundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies "
|
||||
"of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - "
|
||||
"See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage "
|
||||
"engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small "
|
||||
"datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. The proxy role is being "
|
||||
"deprecated and split into GRV proxy and Commit proxy, now prefer configure 'grv_proxies' and 'commit_proxies' "
|
||||
"separately. Generally we should follow that 'commit_proxies' is three times of 'grv_proxies' and "
|
||||
"'grv_proxies' "
|
||||
"should be not more than 4. If 'proxies' is specified, it will be converted to 'grv_proxies' and "
|
||||
"'commit_proxies'. "
|
||||
"Must be at least 2 (1 GRV proxy, 1 Commit proxy), or set to -1 which restores the number of proxies to the "
|
||||
"default value.\n\ncommit_proxies=<COMMIT_PROXIES>: Sets the desired number of commit proxies in the cluster. "
|
||||
"Must be at least 1, or set to -1 which restores the number of commit proxies to the default "
|
||||
"value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the desired number of GRV proxies in the cluster. Must be at least "
|
||||
"1, or set to -1 which restores the number of GRV proxies to the default value.\n\nlogs=<LOGS>: Sets the "
|
||||
"desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of "
|
||||
"logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. "
|
||||
"Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\n"
|
||||
"perpetual_storage_wiggle=<WIGGLE_SPEED>: Set the value speed (a.k.a., the number of processes that the Data "
|
||||
"Distributor should wiggle at a time). Currently, only 0 and 1 are supported. The value 0 means to disable the "
|
||||
"perpetual storage wiggle.\n\n"
|
||||
"See the FoundationDB Administration Guide for more information."));
|
||||
|
||||
} // namespace fdb_cli
|
|
@ -83,7 +83,7 @@ ACTOR Future<bool> excludeServersAndLocalities(Reference<IDatabase> db,
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<vector<std::string>> getExcludedServers(Reference<IDatabase> db) {
|
||||
ACTOR Future<std::vector<std::string>> getExcludedServers(Reference<IDatabase> db) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
try {
|
||||
|
@ -96,7 +96,7 @@ ACTOR Future<vector<std::string>> getExcludedServers(Reference<IDatabase> db) {
|
|||
state RangeResult r2 = wait(safeThreadFutureToFuture(resultFuture2));
|
||||
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
vector<std::string> exclusions;
|
||||
std::vector<std::string> exclusions;
|
||||
for (const auto& i : r) {
|
||||
auto addr = i.key.removePrefix(fdb_cli::excludedServersSpecialKeyRange.begin).toString();
|
||||
exclusions.push_back(addr);
|
||||
|
@ -113,7 +113,7 @@ ACTOR Future<vector<std::string>> getExcludedServers(Reference<IDatabase> db) {
|
|||
}
|
||||
|
||||
// Get the list of excluded localities by reading the keys.
|
||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Reference<IDatabase> db) {
|
||||
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Reference<IDatabase> db) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
try {
|
||||
|
@ -126,7 +126,7 @@ ACTOR Future<vector<std::string>> getExcludedLocalities(Reference<IDatabase> db)
|
|||
state RangeResult r2 = wait(safeThreadFutureToFuture(resultFuture2));
|
||||
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
vector<std::string> excludedLocalities;
|
||||
std::vector<std::string> excludedLocalities;
|
||||
for (const auto& i : r) {
|
||||
auto locality = i.key.removePrefix(fdb_cli::excludedLocalitySpecialKeyRange.begin).toString();
|
||||
excludedLocalities.push_back(locality);
|
||||
|
@ -143,7 +143,7 @@ ACTOR Future<vector<std::string>> getExcludedLocalities(Reference<IDatabase> db)
|
|||
}
|
||||
|
||||
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Reference<IDatabase> db,
|
||||
vector<AddressExclusion> excl,
|
||||
std::vector<AddressExclusion> excl,
|
||||
bool waitForAllExcluded) {
|
||||
state std::set<AddressExclusion> exclusions(excl.begin(), excl.end());
|
||||
state std::set<NetworkAddress> inProgressExclusion;
|
||||
|
@ -171,7 +171,7 @@ ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Reference<IDatab
|
|||
return inProgressExclusion;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> checkForCoordinators(Reference<IDatabase> db, vector<AddressExclusion> exclusionVector) {
|
||||
ACTOR Future<Void> checkForCoordinators(Reference<IDatabase> db, std::vector<AddressExclusion> exclusionVector) {
|
||||
|
||||
state bool foundCoordinator = false;
|
||||
state std::vector<NetworkAddress> coordinatorList;
|
||||
|
@ -224,8 +224,8 @@ const KeyRangeRef exclusionInProgressSpecialKeyRange(LiteralStringRef("\xff\xff/
|
|||
|
||||
ACTOR Future<bool> excludeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens, Future<Void> warn) {
|
||||
if (tokens.size() <= 1) {
|
||||
state vector<std::string> excludedAddresses = wait(getExcludedServers(db));
|
||||
state vector<std::string> excludedLocalities = wait(getExcludedLocalities(db));
|
||||
state std::vector<std::string> excludedAddresses = wait(getExcludedServers(db));
|
||||
state std::vector<std::string> excludedLocalities = wait(getExcludedLocalities(db));
|
||||
|
||||
if (!excludedAddresses.size() && !excludedLocalities.size()) {
|
||||
printf("There are currently no servers or localities excluded from the database.\n"
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* FileConfigureCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/FlowLineNoise.h"
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbclient/Schemas.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> fileConfigureCommandActor(Reference<IDatabase> db,
|
||||
std::string filePath,
|
||||
bool isNewDatabase,
|
||||
bool force) {
|
||||
std::string contents(readFileBytes(filePath, 100000));
|
||||
json_spirit::mValue config;
|
||||
if (!json_spirit::read_string(contents, config)) {
|
||||
fprintf(stderr, "ERROR: Invalid JSON\n");
|
||||
return false;
|
||||
}
|
||||
if (config.type() != json_spirit::obj_type) {
|
||||
fprintf(stderr, "ERROR: Configuration file must contain a JSON object\n");
|
||||
return false;
|
||||
}
|
||||
StatusObject configJSON = config.get_obj();
|
||||
|
||||
json_spirit::mValue schema;
|
||||
if (!json_spirit::read_string(JSONSchemas::clusterConfigurationSchema.toString(), schema)) {
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
std::string errorStr;
|
||||
if (!schemaMatch(schema.get_obj(), configJSON, errorStr)) {
|
||||
printf("%s", errorStr.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string configString;
|
||||
if (isNewDatabase) {
|
||||
configString = "new";
|
||||
}
|
||||
|
||||
for (const auto& [name, value] : configJSON) {
|
||||
if (!configString.empty()) {
|
||||
configString += " ";
|
||||
}
|
||||
if (value.type() == json_spirit::int_type) {
|
||||
configString += name + ":=" + format("%d", value.get_int());
|
||||
} else if (value.type() == json_spirit::str_type) {
|
||||
configString += value.get_str();
|
||||
} else if (value.type() == json_spirit::array_type) {
|
||||
configString +=
|
||||
name + "=" +
|
||||
json_spirit::write_string(json_spirit::mValue(value.get_array()), json_spirit::Output_options::none);
|
||||
} else {
|
||||
printUsage(LiteralStringRef("fileconfigure"));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
ConfigurationResult result = wait(ManagementAPI::changeConfig(db, configString, force));
|
||||
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
||||
// there are various results specific to changeConfig() that we need to report:
|
||||
bool ret = true;
|
||||
switch (result) {
|
||||
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
||||
fprintf(stderr, "ERROR: No options provided\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::CONFLICTING_OPTIONS:
|
||||
fprintf(stderr, "ERROR: Conflicting options\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::UNKNOWN_OPTION:
|
||||
fprintf(stderr, "ERROR: Unknown option\n"); // This should not be possible because of schema match
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
||||
fprintf(stderr,
|
||||
"ERROR: Must specify both a replication level and a storage engine when creating a new database\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::INVALID_CONFIGURATION:
|
||||
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
||||
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_CREATED:
|
||||
printf("Database created\n");
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||
fprintf(stderr, "ERROR: The database is unavailable\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||
fprintf(stderr,
|
||||
"ERROR: When usable_regions > 1, All regions with priority >= 0 must be fully replicated "
|
||||
"before changing the configuration\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
fprintf(stderr,
|
||||
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
printf("Configuration changed\n");
|
||||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
ret = false;
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
CommandFactory fileconfigureFactory(
|
||||
"fileconfigure",
|
||||
CommandHelp(
|
||||
"fileconfigure [new] <FILENAME>",
|
||||
"change the database configuration from a file",
|
||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||
"the configuration of an existing one. Load a JSON document from the provided file, and change the database "
|
||||
"configuration to match the contents of the JSON document. The format should be the same as the value of the "
|
||||
"\"configuration\" entry in status JSON without \"excluded_servers\" or \"coordinators_count\"."));
|
||||
|
||||
} // namespace fdb_cli
|
|
@ -34,7 +34,7 @@ namespace {
|
|||
// Remove the given localities from the exclusion list.
|
||||
// include localities by clearing the keys.
|
||||
ACTOR Future<Void> includeLocalities(Reference<IDatabase> db,
|
||||
vector<std::string> localities,
|
||||
std::vector<std::string> localities,
|
||||
bool failed,
|
||||
bool includeAll) {
|
||||
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||
|
@ -65,7 +65,7 @@ ACTOR Future<Void> includeLocalities(Reference<IDatabase> db,
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> includeServers(Reference<IDatabase> db, vector<AddressExclusion> servers, bool failed) {
|
||||
ACTOR Future<Void> includeServers(Reference<IDatabase> db, std::vector<AddressExclusion> servers, bool failed) {
|
||||
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<Void> triggerddteaminfologCommandActor(Reference<IDatabase> db) {
|
||||
ACTOR Future<bool> triggerddteaminfologCommandActor(Reference<IDatabase> db) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
try {
|
||||
|
@ -41,7 +41,7 @@ ACTOR Future<Void> triggerddteaminfologCommandActor(Reference<IDatabase> db) {
|
|||
tr->set(triggerDDTeamInfoPrintKey, v);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
printf("Triggered team info logging in data distribution.\n");
|
||||
return Void();
|
||||
return true;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
|
|
|
@ -469,47 +469,6 @@ void initHelp() {
|
|||
"clear a range of keys from the database",
|
||||
"All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will "
|
||||
"succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK);
|
||||
helpMap["configure"] = CommandHelp(
|
||||
"configure [new|tss]"
|
||||
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|proxies=<PROXIES>|"
|
||||
"commit_proxies=<COMMIT_PROXIES>|grv_proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*|"
|
||||
"count=<TSS_COUNT>|perpetual_storage_wiggle=<WIGGLE_SPEED>|storage_migration_type={disabled|gradual|"
|
||||
"aggressive}",
|
||||
"change the database configuration",
|
||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
|
||||
"specified.\n\ntss: when enabled, configures the testing storage server for the cluster instead."
|
||||
"When used with new to set up tss for the first time, it requires both a count and a storage engine."
|
||||
"To disable the testing storage server, run \"configure tss count=0\"\n\n"
|
||||
"Redundancy mode:\n single - one copy of the data. Not fault tolerant.\n double - two copies "
|
||||
"of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - "
|
||||
"See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage "
|
||||
"engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small "
|
||||
"datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. The proxy role is being "
|
||||
"deprecated and split into GRV proxy and Commit proxy, now prefer configure 'grv_proxies' and 'commit_proxies' "
|
||||
"separately. Generally we should follow that 'commit_proxies' is three times of 'grv_proxies' and "
|
||||
"'grv_proxies' "
|
||||
"should be not more than 4. If 'proxies' is specified, it will be converted to 'grv_proxies' and "
|
||||
"'commit_proxies'. "
|
||||
"Must be at least 2 (1 GRV proxy, 1 Commit proxy), or set to -1 which restores the number of proxies to the "
|
||||
"default value.\n\ncommit_proxies=<COMMIT_PROXIES>: Sets the desired number of commit proxies in the cluster. "
|
||||
"Must be at least 1, or set to -1 which restores the number of commit proxies to the default "
|
||||
"value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the desired number of GRV proxies in the cluster. Must be at least "
|
||||
"1, or set to -1 which restores the number of GRV proxies to the default value.\n\nlogs=<LOGS>: Sets the "
|
||||
"desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of "
|
||||
"logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. "
|
||||
"Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\n"
|
||||
"perpetual_storage_wiggle=<WIGGLE_SPEED>: Set the value speed (a.k.a., the number of processes that the Data "
|
||||
"Distributor should wiggle at a time). Currently, only 0 and 1 are supported. The value 0 means to disable the "
|
||||
"perpetual storage wiggle.\n\n"
|
||||
"See the FoundationDB Administration Guide for more information.");
|
||||
helpMap["fileconfigure"] = CommandHelp(
|
||||
"fileconfigure [new] <FILENAME>",
|
||||
"change the database configuration from a file",
|
||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||
"the configuration of an existing one. Load a JSON document from the provided file, and change the database "
|
||||
"configuration to match the contents of the JSON document. The format should be the same as the value of the "
|
||||
"\"configuration\" entry in status JSON without \"excluded_servers\" or \"coordinators_count\".");
|
||||
helpMap["exit"] = CommandHelp("exit", "exit the CLI", "");
|
||||
helpMap["quit"] = CommandHelp();
|
||||
helpMap["waitconnected"] = CommandHelp();
|
||||
|
@ -672,349 +631,421 @@ ACTOR Future<Void> commitTransaction(Reference<ITransaction> tr) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<bool> configure(Database db,
|
||||
std::vector<StringRef> tokens,
|
||||
Reference<ClusterConnectionFile> ccf,
|
||||
LineNoise* linenoise,
|
||||
Future<Void> warn) {
|
||||
state ConfigurationResult result;
|
||||
state int startToken = 1;
|
||||
state bool force = false;
|
||||
if (tokens.size() < 2)
|
||||
result = ConfigurationResult::NO_OPTIONS_PROVIDED;
|
||||
else {
|
||||
if (tokens[startToken] == LiteralStringRef("FORCE")) {
|
||||
force = true;
|
||||
startToken = 2;
|
||||
// FIXME: Factor address parsing from coordinators, include, exclude
|
||||
|
||||
ACTOR Future<bool> coordinators(Database db, std::vector<StringRef> tokens, bool isClusterTLS) {
|
||||
state StringRef setName;
|
||||
StringRef nameTokenBegin = LiteralStringRef("description=");
|
||||
for (auto tok = tokens.begin() + 1; tok != tokens.end(); ++tok)
|
||||
if (tok->startsWith(nameTokenBegin)) {
|
||||
setName = tok->substr(nameTokenBegin.size());
|
||||
std::copy(tok + 1, tokens.end(), tok);
|
||||
tokens.resize(tokens.size() - 1);
|
||||
break;
|
||||
}
|
||||
|
||||
state Optional<ConfigureAutoResult> conf;
|
||||
if (tokens[startToken] == LiteralStringRef("auto")) {
|
||||
StatusObject s = wait(makeInterruptable(StatusClient::statusFetcher(db)));
|
||||
if (warn.isValid())
|
||||
warn.cancel();
|
||||
bool automatic = tokens.size() == 2 && tokens[1] == LiteralStringRef("auto");
|
||||
|
||||
conf = parseConfig(s);
|
||||
state Reference<IQuorumChange> change;
|
||||
if (tokens.size() == 1 && setName.size()) {
|
||||
change = noQuorumChange();
|
||||
} else if (automatic) {
|
||||
// Automatic quorum change
|
||||
change = autoQuorumChange();
|
||||
} else {
|
||||
state std::set<NetworkAddress> addresses;
|
||||
state std::vector<StringRef>::iterator t;
|
||||
for (t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
||||
try {
|
||||
// SOMEDAY: Check for keywords
|
||||
auto const& addr = NetworkAddress::parse(t->toString());
|
||||
if (addresses.count(addr)) {
|
||||
fprintf(stderr, "ERROR: passed redundant coordinators: `%s'\n", addr.toString().c_str());
|
||||
return true;
|
||||
}
|
||||
addresses.insert(addr);
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_connection_string_invalid) {
|
||||
fprintf(stderr, "ERROR: '%s' is not a valid network endpoint address\n", t->toString().c_str());
|
||||
return true;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
if (!conf.get().isValid()) {
|
||||
printf("Unable to provide advice for the current configuration.\n");
|
||||
std::vector<NetworkAddress> addressesVec(addresses.begin(), addresses.end());
|
||||
change = specifiedQuorumChange(addressesVec);
|
||||
}
|
||||
if (setName.size())
|
||||
change = nameQuorumChange(setName.toString(), change);
|
||||
|
||||
CoordinatorsResult r = wait(makeInterruptable(changeQuorum(db, change)));
|
||||
|
||||
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
||||
// there are various results specific to changeConfig() that we need to report:
|
||||
bool err = true;
|
||||
switch (r) {
|
||||
case CoordinatorsResult::INVALID_NETWORK_ADDRESSES:
|
||||
fprintf(stderr, "ERROR: The specified network addresses are invalid\n");
|
||||
break;
|
||||
case CoordinatorsResult::SAME_NETWORK_ADDRESSES:
|
||||
printf("No change (existing configuration satisfies request)\n");
|
||||
err = false;
|
||||
break;
|
||||
case CoordinatorsResult::NOT_COORDINATORS:
|
||||
fprintf(stderr, "ERROR: Coordination servers are not running on the specified network addresses\n");
|
||||
break;
|
||||
case CoordinatorsResult::DATABASE_UNREACHABLE:
|
||||
fprintf(stderr, "ERROR: Database unreachable\n");
|
||||
break;
|
||||
case CoordinatorsResult::BAD_DATABASE_STATE:
|
||||
fprintf(stderr,
|
||||
"ERROR: The database is in an unexpected state from which changing coordinators might be unsafe\n");
|
||||
break;
|
||||
case CoordinatorsResult::COORDINATOR_UNREACHABLE:
|
||||
fprintf(stderr, "ERROR: One of the specified coordinators is unreachable\n");
|
||||
break;
|
||||
case CoordinatorsResult::SUCCESS:
|
||||
printf("Coordination state changed\n");
|
||||
err = false;
|
||||
break;
|
||||
case CoordinatorsResult::NOT_ENOUGH_MACHINES:
|
||||
fprintf(stderr, "ERROR: Too few fdbserver machines to provide coordination at the current redundancy level\n");
|
||||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
};
|
||||
return err;
|
||||
}
|
||||
|
||||
// Includes the servers that could be IP addresses or localities back to the cluster.
|
||||
ACTOR Future<bool> include(Database db, std::vector<StringRef> tokens) {
|
||||
std::vector<AddressExclusion> addresses;
|
||||
state std::vector<std::string> localities;
|
||||
state bool failed = false;
|
||||
state bool all = false;
|
||||
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
||||
if (*t == LiteralStringRef("all")) {
|
||||
all = true;
|
||||
} else if (*t == LiteralStringRef("failed")) {
|
||||
failed = true;
|
||||
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) && t->toString().find(':') != std::string::npos) {
|
||||
// if the token starts with 'locality_' prefix.
|
||||
localities.push_back(t->toString());
|
||||
} else {
|
||||
auto a = AddressExclusion::parse(*t);
|
||||
if (!a.isValid()) {
|
||||
fprintf(stderr,
|
||||
"ERROR: '%s' is neither a valid network endpoint address nor a locality\n",
|
||||
t->toString().c_str());
|
||||
if (t->toString().find(":tls") != std::string::npos)
|
||||
printf(" Do not include the `:tls' suffix when naming a process\n");
|
||||
return true;
|
||||
}
|
||||
addresses.push_back(a);
|
||||
}
|
||||
}
|
||||
if (all) {
|
||||
std::vector<AddressExclusion> includeAll;
|
||||
includeAll.push_back(AddressExclusion());
|
||||
wait(makeInterruptable(includeServers(db, includeAll, failed)));
|
||||
wait(makeInterruptable(includeLocalities(db, localities, failed, all)));
|
||||
} else {
|
||||
if (!addresses.empty()) {
|
||||
wait(makeInterruptable(includeServers(db, addresses, failed)));
|
||||
}
|
||||
if (!localities.empty()) {
|
||||
// includes the servers that belong to given localities.
|
||||
wait(makeInterruptable(includeLocalities(db, localities, failed, all)));
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
ACTOR Future<bool> exclude(Database db,
|
||||
std::vector<StringRef> tokens,
|
||||
Reference<ClusterConnectionFile> ccf,
|
||||
Future<Void> warn) {
|
||||
if (tokens.size() <= 1) {
|
||||
state Future<std::vector<AddressExclusion>> fexclAddresses = makeInterruptable(getExcludedServers(db));
|
||||
state Future<std::vector<std::string>> fexclLocalities = makeInterruptable(getExcludedLocalities(db));
|
||||
|
||||
wait(success(fexclAddresses) && success(fexclLocalities));
|
||||
std::vector<AddressExclusion> exclAddresses = fexclAddresses.get();
|
||||
std::vector<std::string> exclLocalities = fexclLocalities.get();
|
||||
|
||||
if (!exclAddresses.size() && !exclLocalities.size()) {
|
||||
printf("There are currently no servers or localities excluded from the database.\n"
|
||||
"To learn how to exclude a server, type `help exclude'.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
printf("There are currently %zu servers or localities being excluded from the database:\n",
|
||||
exclAddresses.size() + exclLocalities.size());
|
||||
for (const auto& e : exclAddresses)
|
||||
printf(" %s\n", e.toString().c_str());
|
||||
for (const auto& e : exclLocalities)
|
||||
printf(" %s\n", e.c_str());
|
||||
|
||||
printf("To find out whether it is safe to remove one or more of these\n"
|
||||
"servers from the cluster, type `exclude <addresses>'.\n"
|
||||
"To return one of these servers to the cluster, type `include <addresses>'.\n");
|
||||
|
||||
return false;
|
||||
} else {
|
||||
state std::vector<AddressExclusion> exclusionVector;
|
||||
state std::set<AddressExclusion> exclusionSet;
|
||||
state std::vector<AddressExclusion> exclusionAddresses;
|
||||
state std::unordered_set<std::string> exclusionLocalities;
|
||||
state std::vector<std::string> noMatchLocalities;
|
||||
state bool force = false;
|
||||
state bool waitForAllExcluded = true;
|
||||
state bool markFailed = false;
|
||||
state std::vector<ProcessData> workers = wait(makeInterruptable(getWorkers(db)));
|
||||
for (auto t = tokens.begin() + 1; t != tokens.end(); ++t) {
|
||||
if (*t == LiteralStringRef("FORCE")) {
|
||||
force = true;
|
||||
} else if (*t == LiteralStringRef("no_wait")) {
|
||||
waitForAllExcluded = false;
|
||||
} else if (*t == LiteralStringRef("failed")) {
|
||||
markFailed = true;
|
||||
} else if (t->startsWith(LocalityData::ExcludeLocalityPrefix) &&
|
||||
t->toString().find(':') != std::string::npos) {
|
||||
std::set<AddressExclusion> localityAddresses = getAddressesByLocality(workers, t->toString());
|
||||
if (localityAddresses.empty()) {
|
||||
noMatchLocalities.push_back(t->toString());
|
||||
} else {
|
||||
// add all the server ipaddresses that belong to the given localities to the exclusionSet.
|
||||
exclusionVector.insert(exclusionVector.end(), localityAddresses.begin(), localityAddresses.end());
|
||||
exclusionSet.insert(localityAddresses.begin(), localityAddresses.end());
|
||||
}
|
||||
exclusionLocalities.insert(t->toString());
|
||||
} else {
|
||||
auto a = AddressExclusion::parse(*t);
|
||||
if (!a.isValid()) {
|
||||
fprintf(stderr,
|
||||
"ERROR: '%s' is neither a valid network endpoint address nor a locality\n",
|
||||
t->toString().c_str());
|
||||
if (t->toString().find(":tls") != std::string::npos)
|
||||
printf(" Do not include the `:tls' suffix when naming a process\n");
|
||||
return true;
|
||||
}
|
||||
exclusionVector.push_back(a);
|
||||
exclusionSet.insert(a);
|
||||
exclusionAddresses.push_back(a);
|
||||
}
|
||||
}
|
||||
|
||||
if (exclusionAddresses.empty() && exclusionLocalities.empty()) {
|
||||
fprintf(stderr, "ERROR: At least one valid network endpoint address or a locality is not provided\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!force) {
|
||||
if (markFailed) {
|
||||
state bool safe;
|
||||
try {
|
||||
bool _safe = wait(makeInterruptable(checkSafeExclusions(db, exclusionVector)));
|
||||
safe = _safe;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
TraceEvent("CheckSafeExclusionsError").error(e);
|
||||
safe = false;
|
||||
}
|
||||
if (!safe) {
|
||||
std::string errorStr =
|
||||
"ERROR: It is unsafe to exclude the specified servers at this time.\n"
|
||||
"Please check that this exclusion does not bring down an entire storage team.\n"
|
||||
"Please also ensure that the exclusion will keep a majority of coordinators alive.\n"
|
||||
"You may add more storage processes or coordinators to make the operation safe.\n"
|
||||
"Type `exclude FORCE failed <ADDRESS...>' to exclude without performing safety checks.\n";
|
||||
printf("%s", errorStr.c_str());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
StatusObject status = wait(makeInterruptable(StatusClient::statusFetcher(db)));
|
||||
|
||||
state std::string errorString =
|
||||
"ERROR: Could not calculate the impact of this exclude on the total free space in the cluster.\n"
|
||||
"Please try the exclude again in 30 seconds.\n"
|
||||
"Type `exclude FORCE <ADDRESS...>' to exclude without checking free space.\n";
|
||||
|
||||
StatusObjectReader statusObj(status);
|
||||
|
||||
StatusObjectReader statusObjCluster;
|
||||
if (!statusObj.get("cluster", statusObjCluster)) {
|
||||
fprintf(stderr, "%s", errorString.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool noChanges = conf.get().old_replication == conf.get().auto_replication &&
|
||||
conf.get().old_logs == conf.get().auto_logs &&
|
||||
conf.get().old_commit_proxies == conf.get().auto_commit_proxies &&
|
||||
conf.get().old_grv_proxies == conf.get().auto_grv_proxies &&
|
||||
conf.get().old_resolvers == conf.get().auto_resolvers &&
|
||||
conf.get().old_processes_with_transaction == conf.get().auto_processes_with_transaction &&
|
||||
conf.get().old_machines_with_transaction == conf.get().auto_machines_with_transaction;
|
||||
StatusObjectReader processesMap;
|
||||
if (!statusObjCluster.get("processes", processesMap)) {
|
||||
fprintf(stderr, "%s", errorString.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool noDesiredChanges = noChanges && conf.get().old_logs == conf.get().desired_logs &&
|
||||
conf.get().old_commit_proxies == conf.get().desired_commit_proxies &&
|
||||
conf.get().old_grv_proxies == conf.get().desired_grv_proxies &&
|
||||
conf.get().old_resolvers == conf.get().desired_resolvers;
|
||||
state int ssTotalCount = 0;
|
||||
state int ssExcludedCount = 0;
|
||||
state double worstFreeSpaceRatio = 1.0;
|
||||
try {
|
||||
for (auto proc : processesMap.obj()) {
|
||||
bool storageServer = false;
|
||||
StatusArray rolesArray = proc.second.get_obj()["roles"].get_array();
|
||||
for (StatusObjectReader role : rolesArray) {
|
||||
if (role["role"].get_str() == "storage") {
|
||||
storageServer = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Skip non-storage servers in free space calculation
|
||||
if (!storageServer)
|
||||
continue;
|
||||
|
||||
std::string outputString;
|
||||
StatusObjectReader process(proc.second);
|
||||
std::string addrStr;
|
||||
if (!process.get("address", addrStr)) {
|
||||
fprintf(stderr, "%s", errorString.c_str());
|
||||
return true;
|
||||
}
|
||||
NetworkAddress addr = NetworkAddress::parse(addrStr);
|
||||
bool excluded =
|
||||
(process.has("excluded") && process.last().get_bool()) || addressExcluded(exclusionSet, addr);
|
||||
ssTotalCount++;
|
||||
if (excluded)
|
||||
ssExcludedCount++;
|
||||
|
||||
outputString += "\nYour cluster has:\n\n";
|
||||
outputString += format(" processes %d\n", conf.get().processes);
|
||||
outputString += format(" machines %d\n", conf.get().machines);
|
||||
if (!excluded) {
|
||||
StatusObjectReader disk;
|
||||
if (!process.get("disk", disk)) {
|
||||
fprintf(stderr, "%s", errorString.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
if (noDesiredChanges)
|
||||
outputString += "\nConfigure recommends keeping your current configuration:\n\n";
|
||||
else if (noChanges)
|
||||
outputString +=
|
||||
"\nConfigure cannot modify the configuration because some parameters have been set manually:\n\n";
|
||||
else
|
||||
outputString += "\nConfigure recommends the following changes:\n\n";
|
||||
outputString += " ------------------------------------------------------------------- \n";
|
||||
outputString += "| parameter | old | new |\n";
|
||||
outputString += " ------------------------------------------------------------------- \n";
|
||||
outputString += format("| replication | %16s | %16s |\n",
|
||||
conf.get().old_replication.c_str(),
|
||||
conf.get().auto_replication.c_str());
|
||||
outputString +=
|
||||
format("| logs | %16d | %16d |", conf.get().old_logs, conf.get().auto_logs);
|
||||
outputString += conf.get().auto_logs != conf.get().desired_logs
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_logs)
|
||||
: "\n";
|
||||
outputString += format("| commit_proxies | %16d | %16d |",
|
||||
conf.get().old_commit_proxies,
|
||||
conf.get().auto_commit_proxies);
|
||||
outputString += conf.get().auto_commit_proxies != conf.get().desired_commit_proxies
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_commit_proxies)
|
||||
: "\n";
|
||||
outputString += format("| grv_proxies | %16d | %16d |",
|
||||
conf.get().old_grv_proxies,
|
||||
conf.get().auto_grv_proxies);
|
||||
outputString += conf.get().auto_grv_proxies != conf.get().desired_grv_proxies
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_grv_proxies)
|
||||
: "\n";
|
||||
outputString += format(
|
||||
"| resolvers | %16d | %16d |", conf.get().old_resolvers, conf.get().auto_resolvers);
|
||||
outputString += conf.get().auto_resolvers != conf.get().desired_resolvers
|
||||
? format(" (manually set; would be %d)\n", conf.get().desired_resolvers)
|
||||
: "\n";
|
||||
outputString += format("| transaction-class processes | %16d | %16d |\n",
|
||||
conf.get().old_processes_with_transaction,
|
||||
conf.get().auto_processes_with_transaction);
|
||||
outputString += format("| transaction-class machines | %16d | %16d |\n",
|
||||
conf.get().old_machines_with_transaction,
|
||||
conf.get().auto_machines_with_transaction);
|
||||
outputString += " ------------------------------------------------------------------- \n\n";
|
||||
int64_t total_bytes;
|
||||
if (!disk.get("total_bytes", total_bytes)) {
|
||||
fprintf(stderr, "%s", errorString.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
std::printf("%s", outputString.c_str());
|
||||
int64_t free_bytes;
|
||||
if (!disk.get("free_bytes", free_bytes)) {
|
||||
fprintf(stderr, "%s", errorString.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
if (noChanges)
|
||||
return false;
|
||||
worstFreeSpaceRatio = std::min(worstFreeSpaceRatio, double(free_bytes) / total_bytes);
|
||||
}
|
||||
}
|
||||
} catch (...) // std::exception
|
||||
{
|
||||
fprintf(stderr, "%s", errorString.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: disable completion
|
||||
Optional<std::string> line = wait(linenoise->read("Would you like to make these changes? [y/n]> "));
|
||||
|
||||
if (!line.present() || (line.get() != "y" && line.get() != "Y")) {
|
||||
return false;
|
||||
if (ssExcludedCount == ssTotalCount ||
|
||||
(1 - worstFreeSpaceRatio) * ssTotalCount / (ssTotalCount - ssExcludedCount) > 0.9) {
|
||||
fprintf(stderr,
|
||||
"ERROR: This exclude may cause the total free space in the cluster to drop below 10%%.\n"
|
||||
"Type `exclude FORCE <ADDRESS...>' to exclude without checking free space.\n");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
ConfigurationResult r = wait(makeInterruptable(
|
||||
changeConfig(db, std::vector<StringRef>(tokens.begin() + startToken, tokens.end()), conf, force)));
|
||||
result = r;
|
||||
}
|
||||
|
||||
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
||||
// there are various results specific to changeConfig() that we need to report:
|
||||
bool ret;
|
||||
switch (result) {
|
||||
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
||||
case ConfigurationResult::CONFLICTING_OPTIONS:
|
||||
case ConfigurationResult::UNKNOWN_OPTION:
|
||||
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
||||
printUsage(LiteralStringRef("configure"));
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::INVALID_CONFIGURATION:
|
||||
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
||||
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_CREATED:
|
||||
printf("Database created\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||
fprintf(stderr, "ERROR: The database is unavailable\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||
fprintf(stderr,
|
||||
"ERROR: When usable_regions > 1, all regions with priority >= 0 must be fully replicated "
|
||||
"before changing the configuration\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
fprintf(stderr,
|
||||
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
||||
fprintf(stderr, "Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
printf("Configuration changed\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::LOCKED_NOT_NEW:
|
||||
fprintf(stderr, "ERROR: `only new databases can be configured as locked`\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS_WARN_PPW_GRADUAL:
|
||||
printf("Configuration changed, with warnings\n");
|
||||
fprintf(stderr,
|
||||
"WARN: To make progress toward the desired storage type with storage_migration_type=gradual, the "
|
||||
"Perpetual Wiggle must be enabled.\n");
|
||||
fprintf(stderr,
|
||||
"Type `configure perpetual_storage_wiggle=1' to enable the perpetual wiggle, or `configure "
|
||||
"storage_migration_type=gradual' to set the gradual migration type.\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS_WARN_CHANGE_STORAGE_NOMIGRATE:
|
||||
printf("Configuration changed, with warnings\n");
|
||||
fprintf(stderr,
|
||||
"WARN: Storage engine type changed, but nothing will be migrated because "
|
||||
"storage_migration_mode=disabled.\n");
|
||||
fprintf(stderr,
|
||||
"Type `configure perpetual_storage_wiggle=1 storage_migration_type=gradual' to enable gradual "
|
||||
"migration with the perpetual wiggle, or `configure "
|
||||
"storage_migration_type=aggressive' for aggressive migration.\n");
|
||||
ret = true;
|
||||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
ret = true;
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDatabase, bool force) {
|
||||
std::string contents(readFileBytes(filePath, 100000));
|
||||
json_spirit::mValue config;
|
||||
if (!json_spirit::read_string(contents, config)) {
|
||||
fprintf(stderr, "ERROR: Invalid JSON\n");
|
||||
return true;
|
||||
}
|
||||
if (config.type() != json_spirit::obj_type) {
|
||||
fprintf(stderr, "ERROR: Configuration file must contain a JSON object\n");
|
||||
return true;
|
||||
}
|
||||
StatusObject configJSON = config.get_obj();
|
||||
|
||||
json_spirit::mValue schema;
|
||||
if (!json_spirit::read_string(JSONSchemas::clusterConfigurationSchema.toString(), schema)) {
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
std::string errorStr;
|
||||
if (!schemaMatch(schema.get_obj(), configJSON, errorStr)) {
|
||||
printf("%s", errorStr.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string configString;
|
||||
if (isNewDatabase) {
|
||||
configString = "new";
|
||||
}
|
||||
|
||||
for (const auto& [name, value] : configJSON) {
|
||||
if (!configString.empty()) {
|
||||
configString += " ";
|
||||
if (!exclusionAddresses.empty()) {
|
||||
wait(makeInterruptable(excludeServers(db, exclusionAddresses, markFailed)));
|
||||
}
|
||||
if (value.type() == json_spirit::int_type) {
|
||||
configString += name + ":=" + format("%d", value.get_int());
|
||||
} else if (value.type() == json_spirit::str_type) {
|
||||
configString += value.get_str();
|
||||
} else if (value.type() == json_spirit::array_type) {
|
||||
configString +=
|
||||
name + "=" +
|
||||
json_spirit::write_string(json_spirit::mValue(value.get_array()), json_spirit::Output_options::none);
|
||||
} else {
|
||||
printUsage(LiteralStringRef("fileconfigure"));
|
||||
return true;
|
||||
if (!exclusionLocalities.empty()) {
|
||||
wait(makeInterruptable(excludeLocalities(db, exclusionLocalities, markFailed)));
|
||||
}
|
||||
|
||||
if (waitForAllExcluded) {
|
||||
printf("Waiting for state to be removed from all excluded servers. This may take a while.\n");
|
||||
printf("(Interrupting this wait with CTRL+C will not cancel the data movement.)\n");
|
||||
}
|
||||
|
||||
if (warn.isValid())
|
||||
warn.cancel();
|
||||
|
||||
state std::set<NetworkAddress> notExcludedServers =
|
||||
wait(makeInterruptable(checkForExcludingServers(db, exclusionVector, waitForAllExcluded)));
|
||||
std::map<IPAddress, std::set<uint16_t>> workerPorts;
|
||||
for (auto addr : workers)
|
||||
workerPorts[addr.address.ip].insert(addr.address.port);
|
||||
|
||||
// Print a list of all excluded addresses that don't have a corresponding worker
|
||||
std::set<AddressExclusion> absentExclusions;
|
||||
for (const auto& addr : exclusionVector) {
|
||||
auto worker = workerPorts.find(addr.ip);
|
||||
if (worker == workerPorts.end())
|
||||
absentExclusions.insert(addr);
|
||||
else if (addr.port > 0 && worker->second.count(addr.port) == 0)
|
||||
absentExclusions.insert(addr);
|
||||
}
|
||||
|
||||
for (const auto& exclusion : exclusionVector) {
|
||||
if (absentExclusions.find(exclusion) != absentExclusions.end()) {
|
||||
if (exclusion.port == 0) {
|
||||
fprintf(stderr,
|
||||
" %s(Whole machine) ---- WARNING: Missing from cluster!Be sure that you excluded the "
|
||||
"correct machines before removing them from the cluster!\n",
|
||||
exclusion.ip.toString().c_str());
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
" %s ---- WARNING: Missing from cluster! Be sure that you excluded the correct processes "
|
||||
"before removing them from the cluster!\n",
|
||||
exclusion.toString().c_str());
|
||||
}
|
||||
} else if (std::any_of(notExcludedServers.begin(), notExcludedServers.end(), [&](const NetworkAddress& a) {
|
||||
return addressExcluded({ exclusion }, a);
|
||||
})) {
|
||||
if (exclusion.port == 0) {
|
||||
fprintf(stderr,
|
||||
" %s(Whole machine) ---- WARNING: Exclusion in progress! It is not safe to remove this "
|
||||
"machine from the cluster\n",
|
||||
exclusion.ip.toString().c_str());
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
" %s ---- WARNING: Exclusion in progress! It is not safe to remove this process from the "
|
||||
"cluster\n",
|
||||
exclusion.toString().c_str());
|
||||
}
|
||||
} else {
|
||||
if (exclusion.port == 0) {
|
||||
printf(" %s(Whole machine) ---- Successfully excluded. It is now safe to remove this machine "
|
||||
"from the cluster.\n",
|
||||
exclusion.ip.toString().c_str());
|
||||
} else {
|
||||
printf(
|
||||
" %s ---- Successfully excluded. It is now safe to remove this process from the cluster.\n",
|
||||
exclusion.toString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& locality : noMatchLocalities) {
|
||||
fprintf(
|
||||
stderr,
|
||||
" %s ---- WARNING: Currently no servers found with this locality match! Be sure that you excluded "
|
||||
"the correct locality.\n",
|
||||
locality.c_str());
|
||||
}
|
||||
|
||||
bool foundCoordinator = false;
|
||||
auto ccs = ClusterConnectionFile(ccf->getFilename()).getConnectionString();
|
||||
for (const auto& c : ccs.coordinators()) {
|
||||
if (std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip, c.port)) ||
|
||||
std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip))) {
|
||||
fprintf(stderr, "WARNING: %s is a coordinator!\n", c.toString().c_str());
|
||||
foundCoordinator = true;
|
||||
}
|
||||
}
|
||||
if (foundCoordinator)
|
||||
printf("Type `help coordinators' for information on how to change the\n"
|
||||
"cluster's coordination servers before removing them.\n");
|
||||
|
||||
return false;
|
||||
}
|
||||
ConfigurationResult result = wait(makeInterruptable(changeConfig(db, configString, force)));
|
||||
// Real errors get thrown from makeInterruptable and printed by the catch block in cli(), but
|
||||
// there are various results specific to changeConfig() that we need to report:
|
||||
bool ret;
|
||||
switch (result) {
|
||||
case ConfigurationResult::NO_OPTIONS_PROVIDED:
|
||||
fprintf(stderr, "ERROR: No options provided\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::CONFLICTING_OPTIONS:
|
||||
fprintf(stderr, "ERROR: Conflicting options\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::UNKNOWN_OPTION:
|
||||
fprintf(stderr, "ERROR: Unknown option\n"); // This should not be possible because of schema match
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::INCOMPLETE_CONFIGURATION:
|
||||
fprintf(stderr,
|
||||
"ERROR: Must specify both a replication level and a storage engine when creating a new database\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::INVALID_CONFIGURATION:
|
||||
fprintf(stderr, "ERROR: These changes would make the configuration invalid\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_ALREADY_CREATED:
|
||||
fprintf(stderr, "ERROR: Database already exists! To change configuration, don't say `new'\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_CREATED:
|
||||
printf("Database created\n");
|
||||
ret = false;
|
||||
break;
|
||||
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||
fprintf(stderr, "ERROR: The database is unavailable\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||
fprintf(stderr, "ERROR: All storage servers must be in one of the known regions\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||
fprintf(stderr,
|
||||
"ERROR: When usable_regions > 1, All regions with priority >= 0 must be fully replicated "
|
||||
"before changing the configuration\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||
fprintf(stderr, "ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
fprintf(stderr,
|
||||
"ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
fprintf(stderr, "ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
fprintf(stderr, "ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
fprintf(stderr, "ERROR: `No storage servers in one of the specified regions\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret = true;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
printf("Configuration changed\n");
|
||||
ret = false;
|
||||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
ret = true;
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
ACTOR Future<bool> createSnapshot(Database db, std::vector<StringRef> tokens) {
|
||||
|
@ -1720,7 +1751,8 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
|
||||
if (tokencmp(tokens[0], "waitopen")) {
|
||||
wait(success(safeThreadFutureToFuture(getTransaction(db, tr, options, intrans)->getReadVersion())));
|
||||
wait(makeInterruptable(
|
||||
success(safeThreadFutureToFuture(getTransaction(db, tr, options, intrans)->getReadVersion()))));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1752,7 +1784,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
|
||||
if (tokencmp(tokens[0], "triggerddteaminfolog")) {
|
||||
wait(triggerddteaminfologCommandActor(db));
|
||||
wait(success(makeInterruptable(triggerddteaminfologCommandActor(db))));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1764,8 +1796,9 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
|
||||
if (tokencmp(tokens[0], "configure")) {
|
||||
bool err = wait(configure(localDb, tokens, localDb->getConnectionFile(), &linenoise, warn));
|
||||
if (err)
|
||||
bool _result =
|
||||
wait(makeInterruptable(configureCommandActor(db, localDb, tokens, &linenoise, warn)));
|
||||
if (!_result)
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
|
@ -1773,11 +1806,12 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
if (tokencmp(tokens[0], "fileconfigure")) {
|
||||
if (tokens.size() == 2 || (tokens.size() == 3 && (tokens[1] == LiteralStringRef("new") ||
|
||||
tokens[1] == LiteralStringRef("FORCE")))) {
|
||||
bool err = wait(fileConfigure(localDb,
|
||||
tokens.back().toString(),
|
||||
tokens[1] == LiteralStringRef("new"),
|
||||
tokens[1] == LiteralStringRef("FORCE")));
|
||||
if (err)
|
||||
bool _result =
|
||||
wait(makeInterruptable(fileConfigureCommandActor(db,
|
||||
tokens.back().toString(),
|
||||
tokens[1] == LiteralStringRef("new"),
|
||||
tokens[1] == LiteralStringRef("FORCE"))));
|
||||
if (!_result)
|
||||
is_error = true;
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
|
@ -1808,14 +1842,14 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
|
||||
if (tokencmp(tokens[0], "snapshot")) {
|
||||
bool _result = wait(snapshotCommandActor(db, tokens));
|
||||
bool _result = wait(makeInterruptable(snapshotCommandActor(db, tokens)));
|
||||
if (!_result)
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "lock")) {
|
||||
bool _result = wait(lockCommandActor(db, tokens));
|
||||
bool _result = wait(makeInterruptable(lockCommandActor(db, tokens)));
|
||||
if (!_result)
|
||||
is_error = true;
|
||||
continue;
|
||||
|
@ -2227,7 +2261,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
|
||||
if (tokencmp(tokens[0], "throttle")) {
|
||||
bool _result = wait(throttleCommandActor(db, tokens));
|
||||
bool _result = wait(makeInterruptable(throttleCommandActor(db, tokens)));
|
||||
if (!_result)
|
||||
is_error = true;
|
||||
continue;
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#elif !defined(FDBCLI_FDBCLI_ACTOR_H)
|
||||
#define FDBCLI_FDBCLI_ACTOR_H
|
||||
|
||||
#include "fdbcli/FlowLineNoise.h"
|
||||
|
||||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/StatusClient.h"
|
||||
|
@ -119,10 +121,17 @@ void printStatus(StatusObjectReader statusObj,
|
|||
bool hideErrorMessages = false);
|
||||
|
||||
// All fdbcli commands (alphabetically)
|
||||
// All below actors return true if the command is executed successfully
|
||||
// advanceversion command
|
||||
ACTOR Future<bool> advanceVersionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// cache_range command
|
||||
ACTOR Future<bool> cacheRangeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// configure command
|
||||
ACTOR Future<bool> configureCommandActor(Reference<IDatabase> db,
|
||||
Database localDb,
|
||||
std::vector<StringRef> tokens,
|
||||
LineNoise* linenoise,
|
||||
Future<Void> warn);
|
||||
// consistency command
|
||||
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
|
@ -139,6 +148,11 @@ ACTOR Future<bool> expensiveDataCheckCommandActor(
|
|||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||
// fileconfigure command
|
||||
ACTOR Future<bool> fileConfigureCommandActor(Reference<IDatabase> db,
|
||||
std::string filePath,
|
||||
bool isNewDatabase,
|
||||
bool force);
|
||||
// force_recovery_with_data_loss command
|
||||
ACTOR Future<bool> forceRecoveryWithDataLossCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// include command
|
||||
|
@ -176,7 +190,7 @@ ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
|
|||
// throttle command
|
||||
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// triggerteaminfolog command
|
||||
ACTOR Future<Void> triggerddteaminfologCommandActor(Reference<IDatabase> db);
|
||||
ACTOR Future<bool> triggerddteaminfologCommandActor(Reference<IDatabase> db);
|
||||
// tssq command
|
||||
ACTOR Future<bool> tssqCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
|
||||
|
|
|
@ -127,6 +127,7 @@ set(FDBCLIENT_SRCS
|
|||
VersionedMap.actor.h
|
||||
VersionedMap.h
|
||||
VersionedMap.cpp
|
||||
WellKnownEndpoints.h
|
||||
WriteMap.h
|
||||
json_spirit/json_spirit_error_position.h
|
||||
json_spirit/json_spirit_reader_template.h
|
||||
|
@ -182,14 +183,14 @@ if(BUILD_AZURE_BACKUP)
|
|||
endif()
|
||||
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||
add_dependencies(fdbclient fdboptions fdb_c_options)
|
||||
add_dependencies(fdbclient fdboptions)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc msgpack)
|
||||
|
||||
# Create a separate fdbclient library with sampling enabled. This lets
|
||||
# fdbserver retain sampling functionality in client code while disabling
|
||||
# sampling for pure clients.
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient_sampling SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||
add_dependencies(fdbclient_sampling fdboptions fdb_c_options)
|
||||
add_dependencies(fdbclient_sampling fdboptions)
|
||||
target_link_libraries(fdbclient_sampling PUBLIC fdbrpc_sampling msgpack)
|
||||
target_compile_definitions(fdbclient_sampling PRIVATE -DENABLE_SAMPLING)
|
||||
if(WIN32)
|
||||
|
|
|
@ -267,7 +267,7 @@ struct StatusRequest {
|
|||
|
||||
struct GetClientWorkersRequest {
|
||||
constexpr static FileIdentifier file_identifier = 10771791;
|
||||
ReplyPromise<vector<ClientWorkerInterface>> reply;
|
||||
ReplyPromise<std::vector<ClientWorkerInterface>> reply;
|
||||
|
||||
GetClientWorkersRequest() {}
|
||||
|
||||
|
|
|
@ -109,12 +109,12 @@ struct CommitProxyInterface {
|
|||
struct ClientDBInfo {
|
||||
constexpr static FileIdentifier file_identifier = 5355080;
|
||||
UID id; // Changes each time anything else changes
|
||||
vector<GrvProxyInterface> grvProxies;
|
||||
vector<CommitProxyInterface> commitProxies;
|
||||
std::vector<GrvProxyInterface> grvProxies;
|
||||
std::vector<CommitProxyInterface> commitProxies;
|
||||
Optional<CommitProxyInterface>
|
||||
firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk
|
||||
Optional<Value> forward;
|
||||
vector<VersionHistory> history;
|
||||
std::vector<VersionHistory> history;
|
||||
|
||||
ClientDBInfo() {}
|
||||
|
||||
|
@ -285,7 +285,7 @@ struct GetReadVersionRequest : TimedRequest {
|
|||
struct GetKeyServerLocationsReply {
|
||||
constexpr static FileIdentifier file_identifier = 10636023;
|
||||
Arena arena;
|
||||
std::vector<std::pair<KeyRangeRef, vector<StorageServerInterface>>> results;
|
||||
std::vector<std::pair<KeyRangeRef, std::vector<StorageServerInterface>>> results;
|
||||
|
||||
// if any storage servers in results have a TSS pair, that mapping is in here
|
||||
std::vector<std::pair<UID, StorageServerInterface>> resultsTssMapping;
|
||||
|
@ -499,11 +499,11 @@ struct ExclusionSafetyCheckReply {
|
|||
|
||||
struct ExclusionSafetyCheckRequest {
|
||||
constexpr static FileIdentifier file_identifier = 13852702;
|
||||
vector<AddressExclusion> exclusions;
|
||||
std::vector<AddressExclusion> exclusions;
|
||||
ReplyPromise<ExclusionSafetyCheckReply> reply;
|
||||
|
||||
ExclusionSafetyCheckRequest() {}
|
||||
explicit ExclusionSafetyCheckRequest(vector<AddressExclusion> exclusions) : exclusions(exclusions) {}
|
||||
explicit ExclusionSafetyCheckRequest(std::vector<AddressExclusion> exclusions) : exclusions(exclusions) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
|
|
@ -34,10 +34,11 @@ void ConfigTransactionInterface::setupWellKnownEndpoints() {
|
|||
}
|
||||
|
||||
ConfigTransactionInterface::ConfigTransactionInterface(NetworkAddress const& remote)
|
||||
: getGeneration(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETGENERATION)),
|
||||
get(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GET)), getClasses(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETCLASSES)),
|
||||
getKnobs(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETKNOBS)), commit(Endpoint({ remote }, WLTOKEN_CONFIGTXN_COMMIT)) {
|
||||
}
|
||||
: getGeneration(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GETGENERATION)),
|
||||
get(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GET)),
|
||||
getClasses(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GETCLASSES)),
|
||||
getKnobs(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_GETKNOBS)),
|
||||
commit(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGTXN_COMMIT)) {}
|
||||
|
||||
bool ConfigTransactionInterface::operator==(ConfigTransactionInterface const& rhs) const {
|
||||
return _id == rhs._id;
|
||||
|
|
|
@ -27,23 +27,10 @@
|
|||
#include "fdbrpc/Locality.h"
|
||||
#include "fdbclient/CommitProxyInterface.h"
|
||||
#include "fdbclient/ClusterInterface.h"
|
||||
#include "fdbclient/WellKnownEndpoints.h"
|
||||
|
||||
const int MAX_CLUSTER_FILE_BYTES = 60000;
|
||||
|
||||
// well known endpoints published to the client.
|
||||
constexpr UID WLTOKEN_CLIENTLEADERREG_GETLEADER(-1, 2);
|
||||
constexpr UID WLTOKEN_CLIENTLEADERREG_OPENDATABASE(-1, 3);
|
||||
|
||||
// the value of this endpoint should be stable and not change.
|
||||
constexpr UID WLTOKEN_PROTOCOL_INFO(-1, 10);
|
||||
constexpr UID WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE(-1, 11);
|
||||
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GETGENERATION(-1, 12);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GET(-1, 13);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GETCLASSES(-1, 14);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GETKNOBS(-1, 15);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_COMMIT(-1, 16);
|
||||
|
||||
struct ClientLeaderRegInterface {
|
||||
RequestStream<struct GetLeaderRequest> getLeader;
|
||||
RequestStream<struct OpenDatabaseCoordRequest> openDatabase;
|
||||
|
@ -62,8 +49,8 @@ class ClusterConnectionString {
|
|||
public:
|
||||
ClusterConnectionString() {}
|
||||
ClusterConnectionString(std::string const& connectionString);
|
||||
ClusterConnectionString(vector<NetworkAddress>, Key);
|
||||
vector<NetworkAddress> const& coordinators() const { return coord; }
|
||||
ClusterConnectionString(std::vector<NetworkAddress>, Key);
|
||||
std::vector<NetworkAddress> const& coordinators() const { return coord; }
|
||||
Key clusterKey() const { return key; }
|
||||
Key clusterKeyName() const {
|
||||
return keyDesc;
|
||||
|
@ -74,7 +61,7 @@ public:
|
|||
private:
|
||||
void parseKey(std::string const& key);
|
||||
|
||||
vector<NetworkAddress> coord;
|
||||
std::vector<NetworkAddress> coord;
|
||||
Key key, keyDesc;
|
||||
};
|
||||
|
||||
|
@ -199,7 +186,7 @@ struct OpenDatabaseCoordRequest {
|
|||
Standalone<VectorRef<ClientVersionRef>> supportedVersions;
|
||||
UID knownClientInfoID;
|
||||
Key clusterKey;
|
||||
vector<NetworkAddress> coordinators;
|
||||
std::vector<NetworkAddress> coordinators;
|
||||
ReplyPromise<CachedSerialization<struct ClientDBInfo>> reply;
|
||||
|
||||
template <class Ar>
|
||||
|
@ -210,7 +197,7 @@ struct OpenDatabaseCoordRequest {
|
|||
|
||||
class ClientCoordinators {
|
||||
public:
|
||||
vector<ClientLeaderRegInterface> clientLeaderServers;
|
||||
std::vector<ClientLeaderRegInterface> clientLeaderServers;
|
||||
Key clusterKey;
|
||||
Reference<ClusterConnectionFile> ccf;
|
||||
|
||||
|
|
|
@ -199,8 +199,8 @@ bool DatabaseConfiguration::isValid() const {
|
|||
(usableRegions == 1 || regions.size() == 2) && (regions.size() == 0 || regions[0].priority >= 0) &&
|
||||
(regions.size() == 0 || tLogPolicy->info() != "dcid^2 x zoneid^2 x 1") &&
|
||||
// We cannot specify regions with three_datacenter replication
|
||||
(perpetualStorageWiggleSpeed == 0 || perpetualStorageWiggleSpeed == 1)) &&
|
||||
storageMigrationType != StorageMigrationType::UNSET) {
|
||||
(perpetualStorageWiggleSpeed == 0 || perpetualStorageWiggleSpeed == 1) &&
|
||||
storageMigrationType != StorageMigrationType::UNSET)) {
|
||||
return false;
|
||||
}
|
||||
std::set<Key> dcIds;
|
||||
|
|
|
@ -182,10 +182,10 @@ public:
|
|||
|
||||
std::pair<KeyRange, Reference<LocationInfo>> getCachedLocation(const KeyRef&, Reverse isBackward = Reverse::False);
|
||||
bool getCachedLocations(const KeyRangeRef&,
|
||||
vector<std::pair<KeyRange, Reference<LocationInfo>>>&,
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>>&,
|
||||
int limit,
|
||||
Reverse reverse);
|
||||
Reference<LocationInfo> setCachedLocation(const KeyRangeRef&, const vector<struct StorageServerInterface>&);
|
||||
Reference<LocationInfo> setCachedLocation(const KeyRangeRef&, const std::vector<struct StorageServerInterface>&);
|
||||
void invalidateCache(const KeyRef&, Reverse isBackward = Reverse::False);
|
||||
void invalidateCache(const KeyRangeRef&);
|
||||
|
||||
|
|
|
@ -42,15 +42,20 @@ struct FDBOptionInfo {
|
|||
// be no cumulative effects from calling multiple times).
|
||||
int defaultFor;
|
||||
|
||||
enum class ParamType { None, String, Int, Bytes };
|
||||
|
||||
ParamType paramType;
|
||||
|
||||
FDBOptionInfo(std::string name,
|
||||
std::string comment,
|
||||
std::string parameterComment,
|
||||
bool hasParameter,
|
||||
bool hidden,
|
||||
bool persistent,
|
||||
int defaultFor)
|
||||
int defaultFor,
|
||||
ParamType paramType)
|
||||
: name(name), comment(comment), parameterComment(parameterComment), hasParameter(hasParameter), hidden(hidden),
|
||||
persistent(persistent), defaultFor(defaultFor) {}
|
||||
persistent(persistent), defaultFor(defaultFor), paramType(paramType) {}
|
||||
|
||||
FDBOptionInfo() {}
|
||||
};
|
||||
|
@ -103,8 +108,9 @@ public:
|
|||
typename OptionList::const_iterator end() const { return options.cend(); }
|
||||
};
|
||||
|
||||
#define ADD_OPTION_INFO(type, var, name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor) \
|
||||
#define ADD_OPTION_INFO( \
|
||||
type, var, name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor, paramType) \
|
||||
type::optionInfo.insert( \
|
||||
var, FDBOptionInfo(name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor));
|
||||
var, FDBOptionInfo(name, comment, parameterComment, hasParameter, hidden, persistent, defaultFor, paramType));
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2743,7 +2743,7 @@ struct StartFullBackupTaskFunc : BackupTaskFuncBase {
|
|||
if (!backupWorkerEnabled && partitionedLog.get().present() && partitionedLog.get().get()) {
|
||||
// Change configuration only when we set to use partitioned logs and
|
||||
// the flag was not set before.
|
||||
wait(success(changeConfig(cx, "backup_worker_enabled:=1", true)));
|
||||
wait(success(ManagementAPI::changeConfig(cx.getReference(), "backup_worker_enabled:=1", true)));
|
||||
backupWorkerEnabled = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "fdbclient/ReadYourWrites.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
void KeyRangeActorMap::getRangesAffectedByInsertion(const KeyRangeRef& keys, vector<KeyRange>& affectedRanges) {
|
||||
void KeyRangeActorMap::getRangesAffectedByInsertion(const KeyRangeRef& keys, std::vector<KeyRange>& affectedRanges) {
|
||||
auto s = map.rangeContaining(keys.begin);
|
||||
if (s.begin() != keys.begin && s.value().isValid() && !s.value().isReady())
|
||||
affectedRanges.push_back(KeyRangeRef(s.begin(), keys.begin));
|
||||
|
@ -176,7 +176,7 @@ static Future<Void> krmSetRangeCoalescing_(Transaction* tr,
|
|||
state KeyRange maxWithPrefix =
|
||||
KeyRangeRef(mapPrefix.toString() + maxRange.begin.toString(), mapPrefix.toString() + maxRange.end.toString());
|
||||
|
||||
state vector<Future<RangeResult>> keys;
|
||||
state std::vector<Future<RangeResult>> keys;
|
||||
keys.push_back(
|
||||
tr->getRange(lastLessThan(withPrefix.begin), firstGreaterOrEqual(withPrefix.begin), 1, Snapshot::True));
|
||||
keys.push_back(
|
||||
|
|
|
@ -111,7 +111,7 @@ public:
|
|||
|
||||
class KeyRangeActorMap {
|
||||
public:
|
||||
void getRangesAffectedByInsertion(const KeyRangeRef& keys, vector<KeyRange>& affectedRanges);
|
||||
void getRangesAffectedByInsertion(const KeyRangeRef& keys, std::vector<KeyRange>& affectedRanges);
|
||||
void insert(const KeyRangeRef& keys, const Future<Void>& value) { map.insert(keys, value); }
|
||||
void cancel(const KeyRangeRef& keys) { insert(keys, Future<Void>()); }
|
||||
bool liveActorAt(const KeyRef& key) {
|
||||
|
|
|
@ -427,285 +427,6 @@ ACTOR Future<DatabaseConfiguration> getDatabaseConfiguration(Database cx) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<ConfigurationResult> changeConfig(Database cx, std::map<std::string, std::string> m, bool force) {
|
||||
state StringRef initIdKey = LiteralStringRef("\xff/init_id");
|
||||
state Transaction tr(cx);
|
||||
|
||||
if (!m.size()) {
|
||||
return ConfigurationResult::NO_OPTIONS_PROVIDED;
|
||||
}
|
||||
|
||||
// make sure we have essential configuration options
|
||||
std::string initKey = configKeysPrefix.toString() + "initialized";
|
||||
state bool creating = m.count(initKey) != 0;
|
||||
state Optional<UID> locked;
|
||||
{
|
||||
auto iter = m.find(databaseLockedKey.toString());
|
||||
if (iter != m.end()) {
|
||||
if (!creating) {
|
||||
return ConfigurationResult::LOCKED_NOT_NEW;
|
||||
}
|
||||
locked = UID::fromString(iter->second);
|
||||
m.erase(iter);
|
||||
}
|
||||
}
|
||||
if (creating) {
|
||||
m[initIdKey.toString()] = deterministicRandom()->randomUniqueID().toString();
|
||||
if (!isCompleteConfiguration(m)) {
|
||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION;
|
||||
}
|
||||
}
|
||||
|
||||
state Future<Void> tooLong = delay(60);
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||
state bool oldReplicationUsesDcId = false;
|
||||
state bool warnPPWGradual = false;
|
||||
state bool warnChangeStorageNoMigrate = false;
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
||||
if (!creating && !force) {
|
||||
state Future<RangeResult> fConfig = tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
state Future<vector<ProcessData>> fWorkers = getWorkers(&tr);
|
||||
wait(success(fConfig) || tooLong);
|
||||
|
||||
if (!fConfig.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
if (fConfig.isReady()) {
|
||||
ASSERT(fConfig.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||
state DatabaseConfiguration oldConfig;
|
||||
oldConfig.fromKeyValues((VectorRef<KeyValueRef>)fConfig.get());
|
||||
state DatabaseConfiguration newConfig = oldConfig;
|
||||
for (auto kv : m) {
|
||||
newConfig.set(kv.first, kv.second);
|
||||
}
|
||||
if (!newConfig.isValid()) {
|
||||
return ConfigurationResult::INVALID_CONFIGURATION;
|
||||
}
|
||||
|
||||
if (newConfig.tLogPolicy->attributeKeys().count("dcid") && newConfig.regions.size() > 0) {
|
||||
return ConfigurationResult::REGION_REPLICATION_MISMATCH;
|
||||
}
|
||||
|
||||
oldReplicationUsesDcId =
|
||||
oldReplicationUsesDcId || oldConfig.tLogPolicy->attributeKeys().count("dcid");
|
||||
|
||||
if (oldConfig.usableRegions != newConfig.usableRegions) {
|
||||
// cannot change region configuration
|
||||
std::map<Key, int32_t> dcId_priority;
|
||||
for (auto& it : newConfig.regions) {
|
||||
dcId_priority[it.dcId] = it.priority;
|
||||
}
|
||||
for (auto& it : oldConfig.regions) {
|
||||
if (!dcId_priority.count(it.dcId) || dcId_priority[it.dcId] != it.priority) {
|
||||
return ConfigurationResult::REGIONS_CHANGED;
|
||||
}
|
||||
}
|
||||
|
||||
// must only have one region with priority >= 0
|
||||
int activeRegionCount = 0;
|
||||
for (auto& it : newConfig.regions) {
|
||||
if (it.priority >= 0) {
|
||||
activeRegionCount++;
|
||||
}
|
||||
}
|
||||
if (activeRegionCount > 1) {
|
||||
return ConfigurationResult::MULTIPLE_ACTIVE_REGIONS;
|
||||
}
|
||||
}
|
||||
|
||||
state Future<RangeResult> fServerList = (newConfig.regions.size())
|
||||
? tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY)
|
||||
: Future<RangeResult>();
|
||||
|
||||
if (newConfig.usableRegions == 2) {
|
||||
if (oldReplicationUsesDcId) {
|
||||
state Future<RangeResult> fLocalityList =
|
||||
tr.getRange(tagLocalityListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
wait(success(fLocalityList) || tooLong);
|
||||
if (!fLocalityList.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
RangeResult localityList = fLocalityList.get();
|
||||
ASSERT(!localityList.more && localityList.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
std::set<Key> localityDcIds;
|
||||
for (auto& s : localityList) {
|
||||
auto dc = decodeTagLocalityListKey(s.key);
|
||||
if (dc.present()) {
|
||||
localityDcIds.insert(dc.get());
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& it : newConfig.regions) {
|
||||
if (localityDcIds.count(it.dcId) == 0) {
|
||||
return ConfigurationResult::DCID_MISSING;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// all regions with priority >= 0 must be fully replicated
|
||||
state std::vector<Future<Optional<Value>>> replicasFutures;
|
||||
for (auto& it : newConfig.regions) {
|
||||
if (it.priority >= 0) {
|
||||
replicasFutures.push_back(tr.get(datacenterReplicasKeyFor(it.dcId)));
|
||||
}
|
||||
}
|
||||
wait(waitForAll(replicasFutures) || tooLong);
|
||||
|
||||
for (auto& it : replicasFutures) {
|
||||
if (!it.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
if (!it.get().present()) {
|
||||
return ConfigurationResult::REGION_NOT_FULLY_REPLICATED;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (newConfig.regions.size()) {
|
||||
// all storage servers must be in one of the regions
|
||||
wait(success(fServerList) || tooLong);
|
||||
if (!fServerList.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
RangeResult serverList = fServerList.get();
|
||||
ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
std::set<Key> newDcIds;
|
||||
for (auto& it : newConfig.regions) {
|
||||
newDcIds.insert(it.dcId);
|
||||
}
|
||||
std::set<Optional<Key>> missingDcIds;
|
||||
for (auto& s : serverList) {
|
||||
auto ssi = decodeServerListValue(s.value);
|
||||
if (!ssi.locality.dcId().present() || !newDcIds.count(ssi.locality.dcId().get())) {
|
||||
missingDcIds.insert(ssi.locality.dcId());
|
||||
}
|
||||
}
|
||||
if (missingDcIds.size() > (oldReplicationUsesDcId ? 1 : 0)) {
|
||||
return ConfigurationResult::STORAGE_IN_UNKNOWN_DCID;
|
||||
}
|
||||
}
|
||||
|
||||
wait(success(fWorkers) || tooLong);
|
||||
if (!fWorkers.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
if (newConfig.regions.size()) {
|
||||
std::map<Optional<Key>, std::set<Optional<Key>>> dcId_zoneIds;
|
||||
for (auto& it : fWorkers.get()) {
|
||||
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
||||
dcId_zoneIds[it.locality.dcId()].insert(it.locality.zoneId());
|
||||
}
|
||||
}
|
||||
for (auto& region : newConfig.regions) {
|
||||
if (dcId_zoneIds[region.dcId].size() <
|
||||
std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
if (region.satelliteTLogReplicationFactor > 0 && region.priority >= 0) {
|
||||
int totalSatelliteProcesses = 0;
|
||||
for (auto& sat : region.satellites) {
|
||||
totalSatelliteProcesses += dcId_zoneIds[sat.dcId].size();
|
||||
}
|
||||
if (totalSatelliteProcesses < region.satelliteTLogReplicationFactor) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::set<Optional<Key>> zoneIds;
|
||||
for (auto& it : fWorkers.get()) {
|
||||
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
||||
zoneIds.insert(it.locality.zoneId());
|
||||
}
|
||||
}
|
||||
if (zoneIds.size() < std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
}
|
||||
|
||||
if (newConfig.storageServerStoreType != oldConfig.storageServerStoreType &&
|
||||
newConfig.storageMigrationType == StorageMigrationType::DISABLED) {
|
||||
warnChangeStorageNoMigrate = true;
|
||||
} else if ((newConfig.storageMigrationType == StorageMigrationType::GRADUAL &&
|
||||
newConfig.perpetualStorageWiggleSpeed == 0) ||
|
||||
(newConfig.perpetualStorageWiggleSpeed > 0 &&
|
||||
newConfig.storageMigrationType == StorageMigrationType::DISABLED)) {
|
||||
warnPPWGradual = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (creating) {
|
||||
tr.setOption(FDBTransactionOptions::INITIALIZE_NEW_DATABASE);
|
||||
tr.addReadConflictRange(singleKeyRange(initIdKey));
|
||||
} else if (m.size()) {
|
||||
// might be used in an emergency transaction, so make sure it is retry-self-conflicting and
|
||||
// CAUSAL_WRITE_RISKY
|
||||
tr.setOption(FDBTransactionOptions::CAUSAL_WRITE_RISKY);
|
||||
tr.addReadConflictRange(singleKeyRange(m.begin()->first));
|
||||
}
|
||||
|
||||
if (locked.present()) {
|
||||
ASSERT(creating);
|
||||
tr.atomicOp(databaseLockedKey,
|
||||
BinaryWriter::toValue(locked.get(), Unversioned())
|
||||
.withPrefix(LiteralStringRef("0123456789"))
|
||||
.withSuffix(LiteralStringRef("\x00\x00\x00\x00")),
|
||||
MutationRef::SetVersionstampedValue);
|
||||
}
|
||||
|
||||
for (auto i = m.begin(); i != m.end(); ++i) {
|
||||
tr.set(StringRef(i->first), StringRef(i->second));
|
||||
}
|
||||
|
||||
tr.addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
||||
tr.set(moveKeysLockOwnerKey, versionKey);
|
||||
|
||||
wait(tr.commit());
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
state Error e1(e);
|
||||
if ((e.code() == error_code_not_committed || e.code() == error_code_transaction_too_old) && creating) {
|
||||
// The database now exists. Determine whether we created it or it was already existing/created by
|
||||
// someone else. The latter is an error.
|
||||
tr.reset();
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
||||
Optional<Value> v = wait(tr.get(initIdKey));
|
||||
if (v != m[initIdKey.toString()])
|
||||
return ConfigurationResult::DATABASE_ALREADY_CREATED;
|
||||
else
|
||||
return ConfigurationResult::DATABASE_CREATED;
|
||||
} catch (Error& e2) {
|
||||
wait(tr.onError(e2));
|
||||
}
|
||||
}
|
||||
}
|
||||
wait(tr.onError(e1));
|
||||
}
|
||||
}
|
||||
|
||||
if (warnPPWGradual) {
|
||||
return ConfigurationResult::SUCCESS_WARN_PPW_GRADUAL;
|
||||
} else if (warnChangeStorageNoMigrate) {
|
||||
return ConfigurationResult::SUCCESS_WARN_CHANGE_STORAGE_NOMIGRATE;
|
||||
} else {
|
||||
return ConfigurationResult::SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
ConfigureAutoResult parseConfig(StatusObject const& status) {
|
||||
ConfigureAutoResult result;
|
||||
StatusObjectReader statusObj(status);
|
||||
|
@ -975,97 +696,7 @@ ConfigureAutoResult parseConfig(StatusObject const& status) {
|
|||
return result;
|
||||
}
|
||||
|
||||
ACTOR Future<ConfigurationResult> autoConfig(Database cx, ConfigureAutoResult conf) {
|
||||
state Transaction tr(cx);
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||
|
||||
if (!conf.address_class.size())
|
||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION; // FIXME: correct return type
|
||||
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
||||
vector<ProcessData> workers = wait(getWorkers(&tr));
|
||||
std::map<NetworkAddress, Optional<Standalone<StringRef>>> address_processId;
|
||||
for (auto& w : workers) {
|
||||
address_processId[w.address] = w.locality.processId();
|
||||
}
|
||||
|
||||
for (auto& it : conf.address_class) {
|
||||
if (it.second.classSource() == ProcessClass::CommandLineSource) {
|
||||
tr.clear(processClassKeyFor(address_processId[it.first].get()));
|
||||
} else {
|
||||
tr.set(processClassKeyFor(address_processId[it.first].get()), processClassValue(it.second));
|
||||
}
|
||||
}
|
||||
|
||||
if (conf.address_class.size())
|
||||
tr.set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
|
||||
|
||||
if (conf.auto_logs != conf.old_logs)
|
||||
tr.set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
|
||||
|
||||
if (conf.auto_commit_proxies != conf.old_commit_proxies)
|
||||
tr.set(configKeysPrefix.toString() + "auto_commit_proxies", format("%d", conf.auto_commit_proxies));
|
||||
|
||||
if (conf.auto_grv_proxies != conf.old_grv_proxies)
|
||||
tr.set(configKeysPrefix.toString() + "auto_grv_proxies", format("%d", conf.auto_grv_proxies));
|
||||
|
||||
if (conf.auto_resolvers != conf.old_resolvers)
|
||||
tr.set(configKeysPrefix.toString() + "auto_resolvers", format("%d", conf.auto_resolvers));
|
||||
|
||||
if (conf.auto_replication != conf.old_replication) {
|
||||
std::vector<StringRef> modes;
|
||||
modes.push_back(conf.auto_replication);
|
||||
std::map<std::string, std::string> m;
|
||||
auto r = buildConfiguration(modes, m);
|
||||
if (r != ConfigurationResult::SUCCESS)
|
||||
return r;
|
||||
|
||||
for (auto& kv : m)
|
||||
tr.set(kv.first, kv.second);
|
||||
}
|
||||
|
||||
tr.addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
||||
tr.set(moveKeysLockOwnerKey, versionKey);
|
||||
|
||||
wait(tr.commit());
|
||||
return ConfigurationResult::SUCCESS;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<ConfigurationResult> changeConfig(Database const& cx,
|
||||
std::vector<StringRef> const& modes,
|
||||
Optional<ConfigureAutoResult> const& conf,
|
||||
bool force) {
|
||||
if (modes.size() && modes[0] == LiteralStringRef("auto") && conf.present()) {
|
||||
return autoConfig(cx, conf.get());
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> m;
|
||||
auto r = buildConfiguration(modes, m);
|
||||
if (r != ConfigurationResult::SUCCESS)
|
||||
return r;
|
||||
return changeConfig(cx, m, force);
|
||||
}
|
||||
|
||||
Future<ConfigurationResult> changeConfig(Database const& cx, std::string const& modes, bool force) {
|
||||
TraceEvent("ChangeConfig").detail("Mode", modes);
|
||||
std::map<std::string, std::string> m;
|
||||
auto r = buildConfiguration(modes, m);
|
||||
if (r != ConfigurationResult::SUCCESS)
|
||||
return r;
|
||||
return changeConfig(cx, m, force);
|
||||
}
|
||||
|
||||
ACTOR Future<vector<ProcessData>> getWorkers(Transaction* tr) {
|
||||
ACTOR Future<std::vector<ProcessData>> getWorkers(Transaction* tr) {
|
||||
state Future<RangeResult> processClasses = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
state Future<RangeResult> processData = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
|
@ -1096,14 +727,14 @@ ACTOR Future<vector<ProcessData>> getWorkers(Transaction* tr) {
|
|||
return results;
|
||||
}
|
||||
|
||||
ACTOR Future<vector<ProcessData>> getWorkers(Database cx) {
|
||||
ACTOR Future<std::vector<ProcessData>> getWorkers(Database cx) {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); // necessary?
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
vector<ProcessData> workers = wait(getWorkers(&tr));
|
||||
std::vector<ProcessData> workers = wait(getWorkers(&tr));
|
||||
return workers;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
|
@ -1181,7 +812,7 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
|||
}
|
||||
}
|
||||
|
||||
vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
ClientCoordinators coord(Reference<ClusterConnectionFile>(new ClusterConnectionFile(conn)));
|
||||
|
||||
leaderServers.reserve(coord.clientLeaderServers.size());
|
||||
|
@ -1267,7 +898,7 @@ ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChan
|
|||
TEST(old.clusterKeyName() != conn.clusterKeyName()); // Quorum change with new name
|
||||
TEST(old.clusterKeyName() == conn.clusterKeyName()); // Quorum change with unchanged name
|
||||
|
||||
state vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
state std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
state ClientCoordinators coord(Reference<ClusterConnectionFile>(new ClusterConnectionFile(conn)));
|
||||
// check if allowed to modify the cluster descriptor
|
||||
if (!change->getDesiredClusterKeyName().empty()) {
|
||||
|
@ -1299,24 +930,24 @@ ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChan
|
|||
}
|
||||
|
||||
struct SpecifiedQuorumChange final : IQuorumChange {
|
||||
vector<NetworkAddress> desired;
|
||||
explicit SpecifiedQuorumChange(vector<NetworkAddress> const& desired) : desired(desired) {}
|
||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile>,
|
||||
CoordinatorsResult&) override {
|
||||
std::vector<NetworkAddress> desired;
|
||||
explicit SpecifiedQuorumChange(std::vector<NetworkAddress> const& desired) : desired(desired) {}
|
||||
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile>,
|
||||
CoordinatorsResult&) override {
|
||||
return desired;
|
||||
}
|
||||
};
|
||||
Reference<IQuorumChange> specifiedQuorumChange(vector<NetworkAddress> const& addresses) {
|
||||
Reference<IQuorumChange> specifiedQuorumChange(std::vector<NetworkAddress> const& addresses) {
|
||||
return Reference<IQuorumChange>(new SpecifiedQuorumChange(addresses));
|
||||
}
|
||||
|
||||
struct NoQuorumChange final : IQuorumChange {
|
||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile>,
|
||||
CoordinatorsResult&) override {
|
||||
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile>,
|
||||
CoordinatorsResult&) override {
|
||||
return oldCoordinators;
|
||||
}
|
||||
};
|
||||
|
@ -1329,10 +960,10 @@ struct NameQuorumChange final : IQuorumChange {
|
|||
Reference<IQuorumChange> otherChange;
|
||||
explicit NameQuorumChange(std::string const& newName, Reference<IQuorumChange> const& otherChange)
|
||||
: newName(newName), otherChange(otherChange) {}
|
||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile> cf,
|
||||
CoordinatorsResult& t) override {
|
||||
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile> cf,
|
||||
CoordinatorsResult& t) override {
|
||||
return otherChange->getDesiredCoordinators(tr, oldCoordinators, cf, t);
|
||||
}
|
||||
std::string getDesiredClusterKeyName() const override { return newName; }
|
||||
|
@ -1345,10 +976,10 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
int desired;
|
||||
explicit AutoQuorumChange(int desired) : desired(desired) {}
|
||||
|
||||
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile> ccf,
|
||||
CoordinatorsResult& err) override {
|
||||
Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile> ccf,
|
||||
CoordinatorsResult& err) override {
|
||||
return getDesired(Reference<AutoQuorumChange>::addRef(this), tr, oldCoordinators, ccf, &err);
|
||||
}
|
||||
|
||||
|
@ -1366,7 +997,7 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
|
||||
ACTOR static Future<bool> isAcceptable(AutoQuorumChange* self,
|
||||
Transaction* tr,
|
||||
vector<NetworkAddress> oldCoordinators,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile> ccf,
|
||||
int desiredCount,
|
||||
std::set<AddressExclusion>* excluded) {
|
||||
|
@ -1378,14 +1009,14 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
|
||||
// Check availability
|
||||
ClientCoordinators coord(ccf);
|
||||
vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
leaderServers.reserve(coord.clientLeaderServers.size());
|
||||
for (int i = 0; i < coord.clientLeaderServers.size(); i++) {
|
||||
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader,
|
||||
GetLeaderRequest(coord.clusterKey, UID()),
|
||||
TaskPriority::CoordinationReply));
|
||||
}
|
||||
Optional<vector<Optional<LeaderInfo>>> results =
|
||||
Optional<std::vector<Optional<LeaderInfo>>> results =
|
||||
wait(timeout(getAll(leaderServers), CLIENT_KNOBS->IS_ACCEPTABLE_DELAY));
|
||||
if (!results.present()) {
|
||||
return false;
|
||||
|
@ -1412,11 +1043,11 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
return true; // The status quo seems fine
|
||||
}
|
||||
|
||||
ACTOR static Future<vector<NetworkAddress>> getDesired(Reference<AutoQuorumChange> self,
|
||||
Transaction* tr,
|
||||
vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile> ccf,
|
||||
CoordinatorsResult* err) {
|
||||
ACTOR static Future<std::vector<NetworkAddress>> getDesired(Reference<AutoQuorumChange> self,
|
||||
Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile> ccf,
|
||||
CoordinatorsResult* err) {
|
||||
state int desiredCount = self->desired;
|
||||
|
||||
if (desiredCount == -1) {
|
||||
|
@ -1427,8 +1058,8 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
std::vector<AddressExclusion> excl = wait(getExcludedServers(tr));
|
||||
state std::set<AddressExclusion> excluded(excl.begin(), excl.end());
|
||||
|
||||
vector<ProcessData> _workers = wait(getWorkers(tr));
|
||||
state vector<ProcessData> workers = _workers;
|
||||
std::vector<ProcessData> _workers = wait(getWorkers(tr));
|
||||
state std::vector<ProcessData> workers = _workers;
|
||||
|
||||
std::map<NetworkAddress, LocalityData> addr_locality;
|
||||
for (auto w : workers)
|
||||
|
@ -1464,7 +1095,7 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
.detail("DesiredCoordinators", desiredCount)
|
||||
.detail("CurrentCoordinators", oldCoordinators.size());
|
||||
*err = CoordinatorsResult::NOT_ENOUGH_MACHINES;
|
||||
return vector<NetworkAddress>();
|
||||
return std::vector<NetworkAddress>();
|
||||
}
|
||||
chosen.resize((chosen.size() - 1) | 1);
|
||||
}
|
||||
|
@ -1476,11 +1107,11 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
// (1) the number of workers at each locality type (e.g., dcid) <= desiredCount; and
|
||||
// (2) prefer workers at a locality where less workers has been chosen than other localities: evenly distribute
|
||||
// workers.
|
||||
void addDesiredWorkers(vector<NetworkAddress>& chosen,
|
||||
const vector<ProcessData>& workers,
|
||||
void addDesiredWorkers(std::vector<NetworkAddress>& chosen,
|
||||
const std::vector<ProcessData>& workers,
|
||||
int desiredCount,
|
||||
const std::set<AddressExclusion>& excluded) {
|
||||
vector<ProcessData> remainingWorkers(workers);
|
||||
std::vector<ProcessData> remainingWorkers(workers);
|
||||
deterministicRandom()->randomShuffle(remainingWorkers);
|
||||
|
||||
std::partition(remainingWorkers.begin(), remainingWorkers.end(), [](const ProcessData& data) {
|
||||
|
@ -1503,10 +1134,10 @@ struct AutoQuorumChange final : IQuorumChange {
|
|||
std::map<StringRef, std::map<StringRef, int>> currentCounts;
|
||||
std::map<StringRef, int> hardLimits;
|
||||
|
||||
vector<StringRef> fields({ LiteralStringRef("dcid"),
|
||||
LiteralStringRef("data_hall"),
|
||||
LiteralStringRef("zoneid"),
|
||||
LiteralStringRef("machineid") });
|
||||
std::vector<StringRef> fields({ LiteralStringRef("dcid"),
|
||||
LiteralStringRef("data_hall"),
|
||||
LiteralStringRef("zoneid"),
|
||||
LiteralStringRef("machineid") });
|
||||
|
||||
for (auto field = fields.begin(); field != fields.end(); field++) {
|
||||
if (field->toString() == "zoneid") {
|
||||
|
@ -1570,7 +1201,7 @@ Reference<IQuorumChange> autoQuorumChange(int desired) {
|
|||
return Reference<IQuorumChange>(new AutoQuorumChange(desired));
|
||||
}
|
||||
|
||||
void excludeServers(Transaction& tr, vector<AddressExclusion>& servers, bool failed) {
|
||||
void excludeServers(Transaction& tr, std::vector<AddressExclusion>& servers, bool failed) {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
@ -1589,7 +1220,7 @@ void excludeServers(Transaction& tr, vector<AddressExclusion>& servers, bool fai
|
|||
TraceEvent("ExcludeServersCommit").detail("Servers", describe(servers)).detail("ExcludeFailed", failed);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> excludeServers(Database cx, vector<AddressExclusion> servers, bool failed) {
|
||||
ACTOR Future<Void> excludeServers(Database cx, std::vector<AddressExclusion> servers, bool failed) {
|
||||
if (cx->apiVersionAtLeast(700)) {
|
||||
state ReadYourWritesTransaction ryw(cx);
|
||||
loop {
|
||||
|
@ -1692,7 +1323,7 @@ ACTOR Future<Void> excludeLocalities(Database cx, std::unordered_set<std::string
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> includeServers(Database cx, vector<AddressExclusion> servers, bool failed) {
|
||||
ACTOR Future<Void> includeServers(Database cx, std::vector<AddressExclusion> servers, bool failed) {
|
||||
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||
if (cx->apiVersionAtLeast(700)) {
|
||||
state ReadYourWritesTransaction ryw(cx);
|
||||
|
@ -1795,7 +1426,7 @@ ACTOR Future<Void> includeServers(Database cx, vector<AddressExclusion> servers,
|
|||
|
||||
// Remove the given localities from the exclusion list.
|
||||
// include localities by clearing the keys.
|
||||
ACTOR Future<Void> includeLocalities(Database cx, vector<std::string> localities, bool failed, bool includeAll) {
|
||||
ACTOR Future<Void> includeLocalities(Database cx, std::vector<std::string> localities, bool failed, bool includeAll) {
|
||||
state std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||
if (cx->apiVersionAtLeast(700)) {
|
||||
state ReadYourWritesTransaction ryw(cx);
|
||||
|
@ -1889,7 +1520,7 @@ ACTOR Future<Void> setClass(Database cx, AddressExclusion server, ProcessClass p
|
|||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
||||
vector<ProcessData> workers = wait(getWorkers(&tr));
|
||||
std::vector<ProcessData> workers = wait(getWorkers(&tr));
|
||||
|
||||
bool foundChange = false;
|
||||
for (int i = 0; i < workers.size(); i++) {
|
||||
|
@ -1914,13 +1545,13 @@ ACTOR Future<Void> setClass(Database cx, AddressExclusion server, ProcessClass p
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Transaction* tr) {
|
||||
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Transaction* tr) {
|
||||
state RangeResult r = wait(tr->getRange(excludedServersKeys, CLIENT_KNOBS->TOO_MANY));
|
||||
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
state RangeResult r2 = wait(tr->getRange(failedServersKeys, CLIENT_KNOBS->TOO_MANY));
|
||||
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
vector<AddressExclusion> exclusions;
|
||||
std::vector<AddressExclusion> exclusions;
|
||||
for (auto i = r.begin(); i != r.end(); ++i) {
|
||||
auto a = decodeExcludedServersKey(i->key);
|
||||
if (a.isValid())
|
||||
|
@ -1935,14 +1566,14 @@ ACTOR Future<vector<AddressExclusion>> getExcludedServers(Transaction* tr) {
|
|||
return exclusions;
|
||||
}
|
||||
|
||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Database cx) {
|
||||
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Database cx) {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); // necessary?
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
vector<AddressExclusion> exclusions = wait(getExcludedServers(&tr));
|
||||
std::vector<AddressExclusion> exclusions = wait(getExcludedServers(&tr));
|
||||
return exclusions;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
|
@ -1951,13 +1582,13 @@ ACTOR Future<vector<AddressExclusion>> getExcludedServers(Database cx) {
|
|||
}
|
||||
|
||||
// Get the current list of excluded localities by reading the keys.
|
||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Transaction* tr) {
|
||||
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Transaction* tr) {
|
||||
state RangeResult r = wait(tr->getRange(excludedLocalityKeys, CLIENT_KNOBS->TOO_MANY));
|
||||
ASSERT(!r.more && r.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
state RangeResult r2 = wait(tr->getRange(failedLocalityKeys, CLIENT_KNOBS->TOO_MANY));
|
||||
ASSERT(!r2.more && r2.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
vector<std::string> excludedLocalities;
|
||||
std::vector<std::string> excludedLocalities;
|
||||
for (const auto& i : r) {
|
||||
auto a = decodeExcludedLocalityKey(i.key);
|
||||
excludedLocalities.push_back(a);
|
||||
|
@ -1971,14 +1602,14 @@ ACTOR Future<vector<std::string>> getExcludedLocalities(Transaction* tr) {
|
|||
}
|
||||
|
||||
// Get the list of excluded localities by reading the keys.
|
||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Database cx) {
|
||||
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Database cx) {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
vector<std::string> exclusions = wait(getExcludedLocalities(&tr));
|
||||
std::vector<std::string> exclusions = wait(getExcludedLocalities(&tr));
|
||||
return exclusions;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
|
@ -2208,7 +1839,7 @@ ACTOR Future<bool> checkForExcludingServersTxActor(ReadYourWritesTransaction* tr
|
|||
}
|
||||
|
||||
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Database cx,
|
||||
vector<AddressExclusion> excl,
|
||||
std::vector<AddressExclusion> excl,
|
||||
bool waitForAllExcluded) {
|
||||
state std::set<AddressExclusion> exclusions(excl.begin(), excl.end());
|
||||
state std::set<NetworkAddress> inProgressExclusion;
|
||||
|
|
|
@ -121,31 +121,17 @@ ConfigurationResult buildConfiguration(
|
|||
|
||||
bool isCompleteConfiguration(std::map<std::string, std::string> const& options);
|
||||
|
||||
// All versions of changeConfig apply the given set of configuration tokens to the database, and return a
|
||||
// ConfigurationResult (or error).
|
||||
Future<ConfigurationResult> changeConfig(Database const& cx,
|
||||
std::string const& configMode,
|
||||
bool force); // Accepts tokens separated by spaces in a single string
|
||||
|
||||
ConfigureAutoResult parseConfig(StatusObject const& status);
|
||||
Future<ConfigurationResult> changeConfig(Database const& cx,
|
||||
std::vector<StringRef> const& modes,
|
||||
Optional<ConfigureAutoResult> const& conf,
|
||||
bool force); // Accepts a vector of configuration tokens
|
||||
ACTOR Future<ConfigurationResult> changeConfig(
|
||||
Database cx,
|
||||
std::map<std::string, std::string> m,
|
||||
bool force); // Accepts a full configuration in key/value format (from buildConfiguration)
|
||||
|
||||
ACTOR Future<DatabaseConfiguration> getDatabaseConfiguration(Database cx);
|
||||
ACTOR Future<Void> waitForFullReplication(Database cx);
|
||||
|
||||
struct IQuorumChange : ReferenceCounted<IQuorumChange> {
|
||||
virtual ~IQuorumChange() {}
|
||||
virtual Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile>,
|
||||
CoordinatorsResult&) = 0;
|
||||
virtual Future<std::vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
|
||||
std::vector<NetworkAddress> oldCoordinators,
|
||||
Reference<ClusterConnectionFile>,
|
||||
CoordinatorsResult&) = 0;
|
||||
virtual std::string getDesiredClusterKeyName() const { return std::string(); }
|
||||
};
|
||||
|
||||
|
@ -156,14 +142,14 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
|||
ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChange> change);
|
||||
Reference<IQuorumChange> autoQuorumChange(int desired = -1);
|
||||
Reference<IQuorumChange> noQuorumChange();
|
||||
Reference<IQuorumChange> specifiedQuorumChange(vector<NetworkAddress> const&);
|
||||
Reference<IQuorumChange> specifiedQuorumChange(std::vector<NetworkAddress> const&);
|
||||
Reference<IQuorumChange> nameQuorumChange(std::string const& name, Reference<IQuorumChange> const& other);
|
||||
|
||||
// Exclude the given set of servers from use as state servers. Returns as soon as the change is durable, without
|
||||
// necessarily waiting for the servers to be evacuated. A NetworkAddress with a port of 0 means all servers on the
|
||||
// given IP.
|
||||
ACTOR Future<Void> excludeServers(Database cx, vector<AddressExclusion> servers, bool failed = false);
|
||||
void excludeServers(Transaction& tr, vector<AddressExclusion>& servers, bool failed = false);
|
||||
ACTOR Future<Void> excludeServers(Database cx, std::vector<AddressExclusion> servers, bool failed = false);
|
||||
void excludeServers(Transaction& tr, std::vector<AddressExclusion>& servers, bool failed = false);
|
||||
|
||||
// Exclude the servers matching the given set of localities from use as state servers. Returns as soon as the change
|
||||
// is durable, without necessarily waiting for the servers to be evacuated.
|
||||
|
@ -172,11 +158,11 @@ void excludeLocalities(Transaction& tr, std::unordered_set<std::string> localiti
|
|||
|
||||
// Remove the given servers from the exclusion list. A NetworkAddress with a port of 0 means all servers on the given
|
||||
// IP. A NetworkAddress() means all servers (don't exclude anything)
|
||||
ACTOR Future<Void> includeServers(Database cx, vector<AddressExclusion> servers, bool failed = false);
|
||||
ACTOR Future<Void> includeServers(Database cx, std::vector<AddressExclusion> servers, bool failed = false);
|
||||
|
||||
// Remove the given localities from the exclusion list.
|
||||
ACTOR Future<Void> includeLocalities(Database cx,
|
||||
vector<std::string> localities,
|
||||
std::vector<std::string> localities,
|
||||
bool failed = false,
|
||||
bool includeAll = false);
|
||||
|
||||
|
@ -185,12 +171,12 @@ ACTOR Future<Void> includeLocalities(Database cx,
|
|||
ACTOR Future<Void> setClass(Database cx, AddressExclusion server, ProcessClass processClass);
|
||||
|
||||
// Get the current list of excluded servers
|
||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Database cx);
|
||||
ACTOR Future<vector<AddressExclusion>> getExcludedServers(Transaction* tr);
|
||||
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Database cx);
|
||||
ACTOR Future<std::vector<AddressExclusion>> getExcludedServers(Transaction* tr);
|
||||
|
||||
// Get the current list of excluded localities
|
||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Database cx);
|
||||
ACTOR Future<vector<std::string>> getExcludedLocalities(Transaction* tr);
|
||||
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Database cx);
|
||||
ACTOR Future<std::vector<std::string>> getExcludedLocalities(Transaction* tr);
|
||||
|
||||
std::set<AddressExclusion> getAddressesByLocality(const std::vector<ProcessData>& workers, const std::string& locality);
|
||||
|
||||
|
@ -198,15 +184,15 @@ std::set<AddressExclusion> getAddressesByLocality(const std::vector<ProcessData>
|
|||
// true, this actor returns once it is safe to shut down all such machines without impacting fault tolerance, until and
|
||||
// unless any of them are explicitly included with includeServers()
|
||||
ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Database cx,
|
||||
vector<AddressExclusion> servers,
|
||||
std::vector<AddressExclusion> servers,
|
||||
bool waitForAllExcluded);
|
||||
ACTOR Future<bool> checkForExcludingServersTxActor(ReadYourWritesTransaction* tr,
|
||||
std::set<AddressExclusion>* exclusions,
|
||||
std::set<NetworkAddress>* inProgressExclusion);
|
||||
|
||||
// Gets a list of all workers in the cluster (excluding testers)
|
||||
ACTOR Future<vector<ProcessData>> getWorkers(Database cx);
|
||||
ACTOR Future<vector<ProcessData>> getWorkers(Transaction* tr);
|
||||
ACTOR Future<std::vector<ProcessData>> getWorkers(Database cx);
|
||||
ACTOR Future<std::vector<ProcessData>> getWorkers(Transaction* tr);
|
||||
|
||||
ACTOR Future<Void> timeKeeperSetDisable(Database cx);
|
||||
|
||||
|
@ -324,6 +310,436 @@ Future<Void> removeCachedRange(Reference<DB> db, KeyRangeRef range) {
|
|||
return changeCachedRange(db, range, false);
|
||||
}
|
||||
|
||||
ACTOR template <class Tr>
|
||||
Future<std::vector<ProcessData>> getWorkers(Reference<Tr> tr,
|
||||
typename Tr::template FutureT<RangeResult> processClassesF,
|
||||
typename Tr::template FutureT<RangeResult> processDataF) {
|
||||
// processClassesF and processDataF are used to hold standalone memory
|
||||
processClassesF = tr->getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
processDataF = tr->getRange(workerListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
state Future<RangeResult> processClasses = safeThreadFutureToFuture(processClassesF);
|
||||
state Future<RangeResult> processData = safeThreadFutureToFuture(processDataF);
|
||||
|
||||
wait(success(processClasses) && success(processData));
|
||||
ASSERT(!processClasses.get().more && processClasses.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||
ASSERT(!processData.get().more && processData.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, ProcessClass> id_class;
|
||||
for (int i = 0; i < processClasses.get().size(); i++) {
|
||||
id_class[decodeProcessClassKey(processClasses.get()[i].key)] =
|
||||
decodeProcessClassValue(processClasses.get()[i].value);
|
||||
}
|
||||
|
||||
std::vector<ProcessData> results;
|
||||
|
||||
for (int i = 0; i < processData.get().size(); i++) {
|
||||
ProcessData data = decodeWorkerListValue(processData.get()[i].value);
|
||||
ProcessClass processClass = id_class[data.locality.processId()];
|
||||
|
||||
if (processClass.classSource() == ProcessClass::DBSource ||
|
||||
data.processClass.classType() == ProcessClass::UnsetClass)
|
||||
data.processClass = processClass;
|
||||
|
||||
if (data.processClass.classType() != ProcessClass::TesterClass)
|
||||
results.push_back(data);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// All versions of changeConfig apply the given set of configuration tokens to the database, and return a
|
||||
// ConfigurationResult (or error).
|
||||
|
||||
// Accepts a full configuration in key/value format (from buildConfiguration)
|
||||
ACTOR template <class DB>
|
||||
Future<ConfigurationResult> changeConfig(Reference<DB> db, std::map<std::string, std::string> m, bool force) {
|
||||
state StringRef initIdKey = LiteralStringRef("\xff/init_id");
|
||||
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||
|
||||
if (!m.size()) {
|
||||
return ConfigurationResult::NO_OPTIONS_PROVIDED;
|
||||
}
|
||||
|
||||
// make sure we have essential configuration options
|
||||
std::string initKey = configKeysPrefix.toString() + "initialized";
|
||||
state bool creating = m.count(initKey) != 0;
|
||||
state Optional<UID> locked;
|
||||
{
|
||||
auto iter = m.find(databaseLockedKey.toString());
|
||||
if (iter != m.end()) {
|
||||
if (!creating) {
|
||||
return ConfigurationResult::LOCKED_NOT_NEW;
|
||||
}
|
||||
locked = UID::fromString(iter->second);
|
||||
m.erase(iter);
|
||||
}
|
||||
}
|
||||
if (creating) {
|
||||
m[initIdKey.toString()] = deterministicRandom()->randomUniqueID().toString();
|
||||
if (!isCompleteConfiguration(m)) {
|
||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION;
|
||||
}
|
||||
}
|
||||
|
||||
state Future<Void> tooLong = delay(60);
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||
state bool oldReplicationUsesDcId = false;
|
||||
state bool warnPPWGradual = false;
|
||||
state bool warnChangeStorageNoMigrate = false;
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
||||
if (!creating && !force) {
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> fConfigF =
|
||||
tr->getRange(configKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
state Future<RangeResult> fConfig = safeThreadFutureToFuture(fConfigF);
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> processClassesF;
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> processDataF;
|
||||
state Future<std::vector<ProcessData>> fWorkers = getWorkers(tr, processClassesF, processDataF);
|
||||
wait(success(fConfig) || tooLong);
|
||||
|
||||
if (!fConfig.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
if (fConfig.isReady()) {
|
||||
ASSERT(fConfig.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||
state DatabaseConfiguration oldConfig;
|
||||
oldConfig.fromKeyValues((VectorRef<KeyValueRef>)fConfig.get());
|
||||
state DatabaseConfiguration newConfig = oldConfig;
|
||||
for (auto kv : m) {
|
||||
newConfig.set(kv.first, kv.second);
|
||||
}
|
||||
if (!newConfig.isValid()) {
|
||||
return ConfigurationResult::INVALID_CONFIGURATION;
|
||||
}
|
||||
|
||||
if (newConfig.tLogPolicy->attributeKeys().count("dcid") && newConfig.regions.size() > 0) {
|
||||
return ConfigurationResult::REGION_REPLICATION_MISMATCH;
|
||||
}
|
||||
|
||||
oldReplicationUsesDcId =
|
||||
oldReplicationUsesDcId || oldConfig.tLogPolicy->attributeKeys().count("dcid");
|
||||
|
||||
if (oldConfig.usableRegions != newConfig.usableRegions) {
|
||||
// cannot change region configuration
|
||||
std::map<Key, int32_t> dcId_priority;
|
||||
for (auto& it : newConfig.regions) {
|
||||
dcId_priority[it.dcId] = it.priority;
|
||||
}
|
||||
for (auto& it : oldConfig.regions) {
|
||||
if (!dcId_priority.count(it.dcId) || dcId_priority[it.dcId] != it.priority) {
|
||||
return ConfigurationResult::REGIONS_CHANGED;
|
||||
}
|
||||
}
|
||||
|
||||
// must only have one region with priority >= 0
|
||||
int activeRegionCount = 0;
|
||||
for (auto& it : newConfig.regions) {
|
||||
if (it.priority >= 0) {
|
||||
activeRegionCount++;
|
||||
}
|
||||
}
|
||||
if (activeRegionCount > 1) {
|
||||
return ConfigurationResult::MULTIPLE_ACTIVE_REGIONS;
|
||||
}
|
||||
}
|
||||
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> fServerListF =
|
||||
tr->getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
state Future<RangeResult> fServerList =
|
||||
(newConfig.regions.size()) ? safeThreadFutureToFuture(fServerListF) : Future<RangeResult>();
|
||||
|
||||
if (newConfig.usableRegions == 2) {
|
||||
if (oldReplicationUsesDcId) {
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> fLocalityListF =
|
||||
tr->getRange(tagLocalityListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
state Future<RangeResult> fLocalityList = safeThreadFutureToFuture(fLocalityListF);
|
||||
wait(success(fLocalityList) || tooLong);
|
||||
if (!fLocalityList.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
RangeResult localityList = fLocalityList.get();
|
||||
ASSERT(!localityList.more && localityList.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
std::set<Key> localityDcIds;
|
||||
for (auto& s : localityList) {
|
||||
auto dc = decodeTagLocalityListKey(s.key);
|
||||
if (dc.present()) {
|
||||
localityDcIds.insert(dc.get());
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& it : newConfig.regions) {
|
||||
if (localityDcIds.count(it.dcId) == 0) {
|
||||
return ConfigurationResult::DCID_MISSING;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// all regions with priority >= 0 must be fully replicated
|
||||
state std::vector<typename DB::TransactionT::template FutureT<Optional<Value>>>
|
||||
replicasFuturesF;
|
||||
state std::vector<Future<Optional<Value>>> replicasFutures;
|
||||
for (auto& it : newConfig.regions) {
|
||||
if (it.priority >= 0) {
|
||||
replicasFuturesF.push_back(tr->get(datacenterReplicasKeyFor(it.dcId)));
|
||||
replicasFutures.push_back(safeThreadFutureToFuture(replicasFuturesF.back()));
|
||||
}
|
||||
}
|
||||
wait(waitForAll(replicasFutures) || tooLong);
|
||||
|
||||
for (auto& it : replicasFutures) {
|
||||
if (!it.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
if (!it.get().present()) {
|
||||
return ConfigurationResult::REGION_NOT_FULLY_REPLICATED;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (newConfig.regions.size()) {
|
||||
// all storage servers must be in one of the regions
|
||||
wait(success(fServerList) || tooLong);
|
||||
if (!fServerList.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
RangeResult serverList = fServerList.get();
|
||||
ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
std::set<Key> newDcIds;
|
||||
for (auto& it : newConfig.regions) {
|
||||
newDcIds.insert(it.dcId);
|
||||
}
|
||||
std::set<Optional<Key>> missingDcIds;
|
||||
for (auto& s : serverList) {
|
||||
auto ssi = decodeServerListValue(s.value);
|
||||
if (!ssi.locality.dcId().present() || !newDcIds.count(ssi.locality.dcId().get())) {
|
||||
missingDcIds.insert(ssi.locality.dcId());
|
||||
}
|
||||
}
|
||||
if (missingDcIds.size() > (oldReplicationUsesDcId ? 1 : 0)) {
|
||||
return ConfigurationResult::STORAGE_IN_UNKNOWN_DCID;
|
||||
}
|
||||
}
|
||||
|
||||
wait(success(fWorkers) || tooLong);
|
||||
if (!fWorkers.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
if (newConfig.regions.size()) {
|
||||
std::map<Optional<Key>, std::set<Optional<Key>>> dcId_zoneIds;
|
||||
for (auto& it : fWorkers.get()) {
|
||||
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
||||
dcId_zoneIds[it.locality.dcId()].insert(it.locality.zoneId());
|
||||
}
|
||||
}
|
||||
for (auto& region : newConfig.regions) {
|
||||
if (dcId_zoneIds[region.dcId].size() <
|
||||
std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
if (region.satelliteTLogReplicationFactor > 0 && region.priority >= 0) {
|
||||
int totalSatelliteProcesses = 0;
|
||||
for (auto& sat : region.satellites) {
|
||||
totalSatelliteProcesses += dcId_zoneIds[sat.dcId].size();
|
||||
}
|
||||
if (totalSatelliteProcesses < region.satelliteTLogReplicationFactor) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::set<Optional<Key>> zoneIds;
|
||||
for (auto& it : fWorkers.get()) {
|
||||
if (it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit) {
|
||||
zoneIds.insert(it.locality.zoneId());
|
||||
}
|
||||
}
|
||||
if (zoneIds.size() < std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
}
|
||||
|
||||
if (newConfig.storageServerStoreType != oldConfig.storageServerStoreType &&
|
||||
newConfig.storageMigrationType == StorageMigrationType::DISABLED) {
|
||||
warnChangeStorageNoMigrate = true;
|
||||
} else if ((newConfig.storageMigrationType == StorageMigrationType::GRADUAL &&
|
||||
newConfig.perpetualStorageWiggleSpeed == 0) ||
|
||||
(newConfig.perpetualStorageWiggleSpeed > 0 &&
|
||||
newConfig.storageMigrationType == StorageMigrationType::DISABLED)) {
|
||||
warnPPWGradual = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (creating) {
|
||||
tr->setOption(FDBTransactionOptions::INITIALIZE_NEW_DATABASE);
|
||||
tr->addReadConflictRange(singleKeyRange(initIdKey));
|
||||
} else if (m.size()) {
|
||||
// might be used in an emergency transaction, so make sure it is retry-self-conflicting and
|
||||
// CAUSAL_WRITE_RISKY
|
||||
tr->setOption(FDBTransactionOptions::CAUSAL_WRITE_RISKY);
|
||||
tr->addReadConflictRange(singleKeyRange(m.begin()->first));
|
||||
}
|
||||
|
||||
if (locked.present()) {
|
||||
ASSERT(creating);
|
||||
tr->atomicOp(databaseLockedKey,
|
||||
BinaryWriter::toValue(locked.get(), Unversioned())
|
||||
.withPrefix(LiteralStringRef("0123456789"))
|
||||
.withSuffix(LiteralStringRef("\x00\x00\x00\x00")),
|
||||
MutationRef::SetVersionstampedValue);
|
||||
}
|
||||
|
||||
for (auto i = m.begin(); i != m.end(); ++i) {
|
||||
tr->set(StringRef(i->first), StringRef(i->second));
|
||||
}
|
||||
|
||||
tr->addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
||||
tr->set(moveKeysLockOwnerKey, versionKey);
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
state Error e1(e);
|
||||
if ((e.code() == error_code_not_committed || e.code() == error_code_transaction_too_old) && creating) {
|
||||
// The database now exists. Determine whether we created it or it was already existing/created by
|
||||
// someone else. The latter is an error.
|
||||
tr->reset();
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
||||
state typename DB::TransactionT::template FutureT<Optional<Value>> vF = tr->get(initIdKey);
|
||||
Optional<Value> v = wait(safeThreadFutureToFuture(vF));
|
||||
if (v != m[initIdKey.toString()])
|
||||
return ConfigurationResult::DATABASE_ALREADY_CREATED;
|
||||
else
|
||||
return ConfigurationResult::DATABASE_CREATED;
|
||||
} catch (Error& e2) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e2)));
|
||||
}
|
||||
}
|
||||
}
|
||||
wait(safeThreadFutureToFuture(tr->onError(e1)));
|
||||
}
|
||||
}
|
||||
|
||||
if (warnPPWGradual) {
|
||||
return ConfigurationResult::SUCCESS_WARN_PPW_GRADUAL;
|
||||
} else if (warnChangeStorageNoMigrate) {
|
||||
return ConfigurationResult::SUCCESS_WARN_CHANGE_STORAGE_NOMIGRATE;
|
||||
} else {
|
||||
return ConfigurationResult::SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR template <class DB>
|
||||
Future<ConfigurationResult> autoConfig(Reference<DB> db, ConfigureAutoResult conf) {
|
||||
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||
|
||||
if (!conf.address_class.size())
|
||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION; // FIXME: correct return type
|
||||
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> processClassesF;
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> processDataF;
|
||||
std::vector<ProcessData> workers = wait(getWorkers(tr, processClassesF, processDataF));
|
||||
std::map<NetworkAddress, Optional<Standalone<StringRef>>> address_processId;
|
||||
for (auto& w : workers) {
|
||||
address_processId[w.address] = w.locality.processId();
|
||||
}
|
||||
|
||||
for (auto& it : conf.address_class) {
|
||||
if (it.second.classSource() == ProcessClass::CommandLineSource) {
|
||||
tr->clear(processClassKeyFor(address_processId[it.first].get()));
|
||||
} else {
|
||||
tr->set(processClassKeyFor(address_processId[it.first].get()), processClassValue(it.second));
|
||||
}
|
||||
}
|
||||
|
||||
if (conf.address_class.size())
|
||||
tr->set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
|
||||
|
||||
if (conf.auto_logs != conf.old_logs)
|
||||
tr->set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
|
||||
|
||||
if (conf.auto_commit_proxies != conf.old_commit_proxies)
|
||||
tr->set(configKeysPrefix.toString() + "auto_commit_proxies", format("%d", conf.auto_commit_proxies));
|
||||
|
||||
if (conf.auto_grv_proxies != conf.old_grv_proxies)
|
||||
tr->set(configKeysPrefix.toString() + "auto_grv_proxies", format("%d", conf.auto_grv_proxies));
|
||||
|
||||
if (conf.auto_resolvers != conf.old_resolvers)
|
||||
tr->set(configKeysPrefix.toString() + "auto_resolvers", format("%d", conf.auto_resolvers));
|
||||
|
||||
if (conf.auto_replication != conf.old_replication) {
|
||||
std::vector<StringRef> modes;
|
||||
modes.push_back(conf.auto_replication);
|
||||
std::map<std::string, std::string> m;
|
||||
auto r = buildConfiguration(modes, m);
|
||||
if (r != ConfigurationResult::SUCCESS)
|
||||
return r;
|
||||
|
||||
for (auto& kv : m)
|
||||
tr->set(kv.first, kv.second);
|
||||
}
|
||||
|
||||
tr->addReadConflictRange(singleKeyRange(moveKeysLockOwnerKey));
|
||||
tr->set(moveKeysLockOwnerKey, versionKey);
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return ConfigurationResult::SUCCESS;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Accepts tokens separated by spaces in a single string
|
||||
template <class DB>
|
||||
Future<ConfigurationResult> changeConfig(Reference<DB> db, std::string const& modes, bool force) {
|
||||
TraceEvent("ChangeConfig").detail("Mode", modes);
|
||||
std::map<std::string, std::string> m;
|
||||
auto r = buildConfiguration(modes, m);
|
||||
if (r != ConfigurationResult::SUCCESS)
|
||||
return r;
|
||||
return changeConfig(db, m, force);
|
||||
}
|
||||
|
||||
// Accepts a vector of configuration tokens
|
||||
template <class DB>
|
||||
Future<ConfigurationResult> changeConfig(Reference<DB> db,
|
||||
std::vector<StringRef> const& modes,
|
||||
Optional<ConfigureAutoResult> const& conf,
|
||||
bool force) {
|
||||
if (modes.size() && modes[0] == LiteralStringRef("auto") && conf.present()) {
|
||||
return autoConfig(db, conf.get());
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> m;
|
||||
auto r = buildConfiguration(modes, m);
|
||||
if (r != ConfigurationResult::SUCCESS)
|
||||
return r;
|
||||
return changeConfig(db, m, force);
|
||||
}
|
||||
|
||||
// return the corresponding error message for the CoordinatorsResult
|
||||
// used by special keys and fdbcli
|
||||
std::string generateErrorMessage(const CoordinatorsResult& res);
|
||||
|
|
|
@ -328,7 +328,7 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/fuzz") {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ClusterConnectionString::ClusterConnectionString(vector<NetworkAddress> servers, Key key) : coord(servers) {
|
||||
ClusterConnectionString::ClusterConnectionString(std::vector<NetworkAddress> servers, Key key) : coord(servers) {
|
||||
parseKey(key.toString());
|
||||
}
|
||||
|
||||
|
@ -383,9 +383,9 @@ ClientCoordinators::ClientCoordinators(Key clusterKey, std::vector<NetworkAddres
|
|||
}
|
||||
|
||||
ClientLeaderRegInterface::ClientLeaderRegInterface(NetworkAddress remote)
|
||||
: getLeader(Endpoint({ remote }, WLTOKEN_CLIENTLEADERREG_GETLEADER)),
|
||||
openDatabase(Endpoint({ remote }, WLTOKEN_CLIENTLEADERREG_OPENDATABASE)),
|
||||
checkDescriptorMutable(Endpoint({ remote }, WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE)) {}
|
||||
: getLeader(Endpoint::wellKnown({ remote }, WLTOKEN_CLIENTLEADERREG_GETLEADER)),
|
||||
openDatabase(Endpoint::wellKnown({ remote }, WLTOKEN_CLIENTLEADERREG_OPENDATABASE)),
|
||||
checkDescriptorMutable(Endpoint::wellKnown({ remote }, WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE)) {}
|
||||
|
||||
ClientLeaderRegInterface::ClientLeaderRegInterface(INetwork* local) {
|
||||
getLeader.makeWellKnownEndpoint(WLTOKEN_CLIENTLEADERREG_GETLEADER, TaskPriority::Coordination);
|
||||
|
@ -394,9 +394,8 @@ ClientLeaderRegInterface::ClientLeaderRegInterface(INetwork* local) {
|
|||
TaskPriority::Coordination);
|
||||
}
|
||||
|
||||
// Nominee is the worker among all workers that are considered as leader by a coordinator
|
||||
// This function contacts a coordinator coord to ask if the worker is considered as a leader (i.e., if the worker
|
||||
// is a nominee)
|
||||
// Nominee is the worker among all workers that are considered as leader by one coordinator
|
||||
// This function contacts a coordinator coord to ask who is its nominee.
|
||||
ACTOR Future<Void> monitorNominee(Key key,
|
||||
ClientLeaderRegInterface coord,
|
||||
AsyncTrigger* nomineeChange,
|
||||
|
@ -428,13 +427,13 @@ ACTOR Future<Void> monitorNominee(Key key,
|
|||
// Also used in fdbserver/LeaderElection.actor.cpp!
|
||||
// bool represents if the LeaderInfo is a majority answer or not.
|
||||
// This function also masks the first 7 bits of changeId of the nominees and returns the Leader with masked changeId
|
||||
Optional<std::pair<LeaderInfo, bool>> getLeader(const vector<Optional<LeaderInfo>>& nominees) {
|
||||
Optional<std::pair<LeaderInfo, bool>> getLeader(const std::vector<Optional<LeaderInfo>>& nominees) {
|
||||
// If any coordinator says that the quorum is forwarded, then it is
|
||||
for (int i = 0; i < nominees.size(); i++)
|
||||
if (nominees[i].present() && nominees[i].get().forward)
|
||||
return std::pair<LeaderInfo, bool>(nominees[i].get(), true);
|
||||
|
||||
vector<std::pair<UID, int>> maskedNominees;
|
||||
std::vector<std::pair<UID, int>> maskedNominees;
|
||||
maskedNominees.reserve(nominees.size());
|
||||
for (int i = 0; i < nominees.size(); i++) {
|
||||
if (nominees[i].present()) {
|
||||
|
@ -529,18 +528,6 @@ ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration(Reference<ClusterConn
|
|||
}
|
||||
}
|
||||
|
||||
Future<Void> monitorLeaderRemotelyInternal(Reference<ClusterConnectionFile> const& connFile,
|
||||
Reference<AsyncVar<Value>> const& outSerializedLeaderInfo);
|
||||
|
||||
template <class LeaderInterface>
|
||||
Future<Void> monitorLeaderRemotely(Reference<ClusterConnectionFile> const& connFile,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader) {
|
||||
LeaderDeserializer<LeaderInterface> deserializer;
|
||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
||||
Future<Void> m = monitorLeaderRemotelyInternal(connFile, serializedInfo);
|
||||
return m || deserializer(serializedInfo, outKnownLeader);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorLeaderInternal(Reference<ClusterConnectionFile> connFile,
|
||||
Reference<AsyncVar<Value>> outSerializedLeaderInfo) {
|
||||
state MonitorLeaderInfo info(connFile);
|
||||
|
@ -656,7 +643,7 @@ ACTOR Future<Void> getClientInfoFromLeader(Reference<AsyncVar<Optional<ClusterCo
|
|||
choose {
|
||||
when(ClientDBInfo ni =
|
||||
wait(brokenPromiseToNever(knownLeader->get().get().clientInterface.openDatabase.getReply(req)))) {
|
||||
TraceEvent("MonitorLeaderForProxiesGotClientInfo", knownLeader->get().get().clientInterface.id())
|
||||
TraceEvent("GetClientInfoFromLeaderGotClientInfo", knownLeader->get().get().clientInterface.id())
|
||||
.detail("CommitProxy0", ni.commitProxies.size() ? ni.commitProxies[0].id() : UID())
|
||||
.detail("GrvProxy0", ni.grvProxies.size() ? ni.grvProxies[0].id() : UID())
|
||||
.detail("ClientID", ni.id);
|
||||
|
@ -667,11 +654,11 @@ ACTOR Future<Void> getClientInfoFromLeader(Reference<AsyncVar<Optional<ClusterCo
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorLeaderForProxies(Key clusterKey,
|
||||
vector<NetworkAddress> coordinators,
|
||||
ClientData* clientData,
|
||||
Reference<AsyncVar<Optional<LeaderInfo>>> leaderInfo) {
|
||||
state vector<ClientLeaderRegInterface> clientLeaderServers;
|
||||
ACTOR Future<Void> monitorLeaderAndGetClientInfo(Key clusterKey,
|
||||
std::vector<NetworkAddress> coordinators,
|
||||
ClientData* clientData,
|
||||
Reference<AsyncVar<Optional<LeaderInfo>>> leaderInfo) {
|
||||
state std::vector<ClientLeaderRegInterface> clientLeaderServers;
|
||||
state AsyncTrigger nomineeChange;
|
||||
state std::vector<Optional<LeaderInfo>> nominees;
|
||||
state Future<Void> allActors;
|
||||
|
@ -695,7 +682,7 @@ ACTOR Future<Void> monitorLeaderForProxies(Key clusterKey,
|
|||
|
||||
loop {
|
||||
Optional<std::pair<LeaderInfo, bool>> leader = getLeader(nominees);
|
||||
TraceEvent("MonitorLeaderForProxiesChange")
|
||||
TraceEvent("MonitorLeaderAndGetClientInfoLeaderChange")
|
||||
.detail("NewLeader", leader.present() ? leader.get().first.changeID : UID(1, 1))
|
||||
.detail("Key", clusterKey.printable());
|
||||
if (leader.present()) {
|
||||
|
@ -705,7 +692,7 @@ ACTOR Future<Void> monitorLeaderForProxies(Key clusterKey,
|
|||
outInfo.forward = leader.get().first.serializedInfo;
|
||||
clientData->clientInfo->set(CachedSerialization<ClientDBInfo>(outInfo));
|
||||
leaderInfo->set(leader.get().first);
|
||||
TraceEvent("MonitorLeaderForProxiesForwarding")
|
||||
TraceEvent("MonitorLeaderAndGetClientInfoForwarding")
|
||||
.detail("NewConnStr", leader.get().first.serializedInfo.toString());
|
||||
return Void();
|
||||
}
|
||||
|
@ -762,7 +749,6 @@ void shrinkProxyList(ClientDBInfo& ni,
|
|||
}
|
||||
}
|
||||
|
||||
// Leader is the process that will be elected by coordinators as the cluster controller
|
||||
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
||||
Reference<ClusterConnectionFile> connFile,
|
||||
Reference<AsyncVar<ClientDBInfo>> clientInfo,
|
||||
|
@ -771,9 +757,9 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
|||
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions,
|
||||
Key traceLogGroup) {
|
||||
state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString();
|
||||
state vector<NetworkAddress> addrs = cs.coordinators();
|
||||
state std::vector<NetworkAddress> addrs = cs.coordinators();
|
||||
state int idx = 0;
|
||||
state int successIdx = 0;
|
||||
state int successIndex = 0;
|
||||
state Optional<double> incorrectTime;
|
||||
state std::vector<UID> lastCommitProxyUIDs;
|
||||
state std::vector<CommitProxyInterface> lastCommitProxies;
|
||||
|
@ -840,11 +826,11 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
|||
auto& ni = rep.get().mutate();
|
||||
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
||||
clientInfo->set(ni);
|
||||
successIdx = idx;
|
||||
successIndex = idx;
|
||||
} else {
|
||||
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cant talk to cluster controller
|
||||
idx = (idx + 1) % addrs.size();
|
||||
if (idx == successIdx) {
|
||||
if (idx == successIndex) {
|
||||
wait(delay(CLIENT_KNOBS->COORDINATOR_RECONNECTION_DELAY));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,17 +61,23 @@ struct MonitorLeaderInfo {
|
|||
: hasConnected(false), intermediateConnFile(intermediateConnFile) {}
|
||||
};
|
||||
|
||||
// Monitors the given coordination group's leader election process and provides a best current guess
|
||||
// of the current leader. If a leader is elected for long enough and communication with a quorum of
|
||||
// coordinators is possible, eventually outKnownLeader will be that leader's interface.
|
||||
Optional<std::pair<LeaderInfo, bool>> getLeader(const std::vector<Optional<LeaderInfo>>& nominees);
|
||||
|
||||
// This is one place where the leader election algorithm is run. The coodinator contacts all coodinators to collect
|
||||
// nominees, the nominee with the most nomination is the leader. This function also monitors the change of the leader.
|
||||
// If a leader is elected for long enough and communication with a quorum of coordinators is possible, eventually
|
||||
// outKnownLeader will be that leader's interface.
|
||||
template <class LeaderInterface>
|
||||
Future<Void> monitorLeader(Reference<ClusterConnectionFile> const& connFile,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader);
|
||||
|
||||
Future<Void> monitorLeaderForProxies(Value const& key,
|
||||
vector<NetworkAddress> const& coordinators,
|
||||
ClientData* const& clientData,
|
||||
Reference<AsyncVar<Optional<LeaderInfo>>> const& leaderInfo);
|
||||
// This is one place where the leader election algorithm is run. The coodinator contacts all coodinators to collect
|
||||
// nominees, the nominee with the most nomination is the leader, and collects client data from the leader. This function
|
||||
// also monitors the change of the leader.
|
||||
Future<Void> monitorLeaderAndGetClientInfo(Value const& key,
|
||||
std::vector<NetworkAddress> const& coordinators,
|
||||
ClientData* const& clientData,
|
||||
Reference<AsyncVar<Optional<LeaderInfo>>> const& leaderInfo);
|
||||
|
||||
Future<Void> monitorProxies(
|
||||
Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile,
|
||||
|
|
|
@ -606,7 +606,7 @@ void DLApi::addNetworkThreadCompletionHook(void (*hook)(void*), void* hookParame
|
|||
// MultiVersionTransaction
|
||||
MultiVersionTransaction::MultiVersionTransaction(Reference<MultiVersionDatabase> db,
|
||||
UniqueOrderedOptionList<FDBTransactionOptions> defaultOptions)
|
||||
: db(db) {
|
||||
: db(db), startTime(timer_monotonic()), timeoutTsav(new ThreadSingleAssignmentVar<Void>()) {
|
||||
setDefaultOptions(defaultOptions);
|
||||
updateTransaction();
|
||||
}
|
||||
|
@ -622,20 +622,23 @@ void MultiVersionTransaction::updateTransaction() {
|
|||
TransactionInfo newTr;
|
||||
if (currentDb.value) {
|
||||
newTr.transaction = currentDb.value->createTransaction();
|
||||
}
|
||||
|
||||
Optional<StringRef> timeout;
|
||||
for (auto option : persistentOptions) {
|
||||
if (option.first == FDBTransactionOptions::TIMEOUT) {
|
||||
timeout = option.second.castTo<StringRef>();
|
||||
} else {
|
||||
newTr.transaction->setOption(option.first, option.second.castTo<StringRef>());
|
||||
}
|
||||
Optional<StringRef> timeout;
|
||||
for (auto option : persistentOptions) {
|
||||
if (option.first == FDBTransactionOptions::TIMEOUT) {
|
||||
timeout = option.second.castTo<StringRef>();
|
||||
} else if (currentDb.value) {
|
||||
newTr.transaction->setOption(option.first, option.second.castTo<StringRef>());
|
||||
}
|
||||
}
|
||||
|
||||
// Setting a timeout can immediately cause a transaction to fail. The only timeout
|
||||
// that matters is the one most recently set, so we ignore any earlier set timeouts
|
||||
// that might inadvertently fail the transaction.
|
||||
if (timeout.present()) {
|
||||
// Setting a timeout can immediately cause a transaction to fail. The only timeout
|
||||
// that matters is the one most recently set, so we ignore any earlier set timeouts
|
||||
// that might inadvertently fail the transaction.
|
||||
if (timeout.present()) {
|
||||
setTimeout(timeout);
|
||||
if (currentDb.value) {
|
||||
newTr.transaction->setOption(FDBTransactionOptions::TIMEOUT, timeout);
|
||||
}
|
||||
}
|
||||
|
@ -670,19 +673,19 @@ void MultiVersionTransaction::setVersion(Version v) {
|
|||
}
|
||||
ThreadFuture<Version> MultiVersionTransaction::getReadVersion() {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getReadVersion() : ThreadFuture<Version>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->getReadVersion() : makeTimeout<Version>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
ThreadFuture<Optional<Value>> MultiVersionTransaction::get(const KeyRef& key, bool snapshot) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->get(key, snapshot) : ThreadFuture<Optional<Value>>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->get(key, snapshot) : makeTimeout<Optional<Value>>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
ThreadFuture<Key> MultiVersionTransaction::getKey(const KeySelectorRef& key, bool snapshot) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getKey(key, snapshot) : ThreadFuture<Key>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->getKey(key, snapshot) : makeTimeout<Key>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -692,8 +695,8 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeySelectorRef
|
|||
bool snapshot,
|
||||
bool reverse) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getRange(begin, end, limit, snapshot, reverse)
|
||||
: ThreadFuture<RangeResult>(Never());
|
||||
auto f =
|
||||
tr.transaction ? tr.transaction->getRange(begin, end, limit, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -703,8 +706,8 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeySelectorRef
|
|||
bool snapshot,
|
||||
bool reverse) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getRange(begin, end, limits, snapshot, reverse)
|
||||
: ThreadFuture<RangeResult>(Never());
|
||||
auto f =
|
||||
tr.transaction ? tr.transaction->getRange(begin, end, limits, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -713,8 +716,7 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeyRangeRef& k
|
|||
bool snapshot,
|
||||
bool reverse) {
|
||||
auto tr = getTransaction();
|
||||
auto f =
|
||||
tr.transaction ? tr.transaction->getRange(keys, limit, snapshot, reverse) : ThreadFuture<RangeResult>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->getRange(keys, limit, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -723,21 +725,20 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeyRangeRef& k
|
|||
bool snapshot,
|
||||
bool reverse) {
|
||||
auto tr = getTransaction();
|
||||
auto f =
|
||||
tr.transaction ? tr.transaction->getRange(keys, limits, snapshot, reverse) : ThreadFuture<RangeResult>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->getRange(keys, limits, snapshot, reverse) : makeTimeout<RangeResult>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
ThreadFuture<Standalone<StringRef>> MultiVersionTransaction::getVersionstamp() {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getVersionstamp() : ThreadFuture<Standalone<StringRef>>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->getVersionstamp() : makeTimeout<Standalone<StringRef>>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> MultiVersionTransaction::getAddressesForKey(const KeyRef& key) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getAddressesForKey(key)
|
||||
: ThreadFuture<Standalone<VectorRef<const char*>>>(Never());
|
||||
auto f =
|
||||
tr.transaction ? tr.transaction->getAddressesForKey(key) : makeTimeout<Standalone<VectorRef<const char*>>>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -750,7 +751,7 @@ void MultiVersionTransaction::addReadConflictRange(const KeyRangeRef& keys) {
|
|||
|
||||
ThreadFuture<int64_t> MultiVersionTransaction::getEstimatedRangeSizeBytes(const KeyRangeRef& keys) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getEstimatedRangeSizeBytes(keys) : ThreadFuture<int64_t>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->getEstimatedRangeSizeBytes(keys) : makeTimeout<int64_t>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -758,7 +759,7 @@ ThreadFuture<Standalone<VectorRef<KeyRef>>> MultiVersionTransaction::getRangeSpl
|
|||
int64_t chunkSize) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getRangeSplitPoints(range, chunkSize)
|
||||
: ThreadFuture<Standalone<VectorRef<KeyRef>>>(Never());
|
||||
: makeTimeout<Standalone<VectorRef<KeyRef>>>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -799,7 +800,7 @@ void MultiVersionTransaction::clear(const KeyRef& key) {
|
|||
|
||||
ThreadFuture<Void> MultiVersionTransaction::watch(const KeyRef& key) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->watch(key) : ThreadFuture<Void>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->watch(key) : makeTimeout<Void>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -812,7 +813,7 @@ void MultiVersionTransaction::addWriteConflictRange(const KeyRangeRef& keys) {
|
|||
|
||||
ThreadFuture<Void> MultiVersionTransaction::commit() {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->commit() : ThreadFuture<Void>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->commit() : makeTimeout<Void>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -827,7 +828,7 @@ Version MultiVersionTransaction::getCommittedVersion() {
|
|||
|
||||
ThreadFuture<int64_t> MultiVersionTransaction::getApproximateSize() {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getApproximateSize() : ThreadFuture<int64_t>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->getApproximateSize() : makeTimeout<int64_t>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
|
@ -841,6 +842,11 @@ void MultiVersionTransaction::setOption(FDBTransactionOptions::Option option, Op
|
|||
if (MultiVersionApi::apiVersionAtLeast(610) && itr->second.persistent) {
|
||||
persistentOptions.emplace_back(option, value.castTo<Standalone<StringRef>>());
|
||||
}
|
||||
|
||||
if (itr->first == FDBTransactionOptions::TIMEOUT) {
|
||||
setTimeout(value);
|
||||
}
|
||||
|
||||
auto tr = getTransaction();
|
||||
if (tr.transaction) {
|
||||
tr.transaction->setOption(option, value);
|
||||
|
@ -853,7 +859,7 @@ ThreadFuture<Void> MultiVersionTransaction::onError(Error const& e) {
|
|||
return ThreadFuture<Void>(Void());
|
||||
} else {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->onError(e) : ThreadFuture<Void>(Never());
|
||||
auto f = tr.transaction ? tr.transaction->onError(e) : makeTimeout<Void>();
|
||||
f = abortableFuture(f, tr.onChange);
|
||||
|
||||
return flatMapThreadFuture<Void, Void>(f, [this, e](ErrorOr<Void> ready) {
|
||||
|
@ -871,12 +877,92 @@ ThreadFuture<Void> MultiVersionTransaction::onError(Error const& e) {
|
|||
}
|
||||
}
|
||||
|
||||
// Waits for the specified duration and signals the assignment variable with a timed out error
|
||||
// This will be canceled if a new timeout is set, in which case the tsav will not be signaled.
|
||||
ACTOR Future<Void> timeoutImpl(Reference<ThreadSingleAssignmentVar<Void>> tsav, double duration) {
|
||||
wait(delay(duration));
|
||||
|
||||
tsav->trySendError(transaction_timed_out());
|
||||
return Void();
|
||||
}
|
||||
|
||||
// Configure a timeout based on the options set for this transaction. This timeout only applies
|
||||
// if we don't have an underlying database object to connect with.
|
||||
void MultiVersionTransaction::setTimeout(Optional<StringRef> value) {
|
||||
double timeoutDuration = extractIntOption(value, 0, std::numeric_limits<int>::max()) / 1000.0;
|
||||
|
||||
ThreadFuture<Void> prevTimeout;
|
||||
ThreadFuture<Void> newTimeout = onMainThread([this, timeoutDuration]() {
|
||||
return timeoutImpl(timeoutTsav, timeoutDuration - std::max(0.0, now() - startTime));
|
||||
});
|
||||
|
||||
{ // lock scope
|
||||
ThreadSpinLockHolder holder(timeoutLock);
|
||||
|
||||
prevTimeout = currentTimeout;
|
||||
currentTimeout = newTimeout;
|
||||
}
|
||||
|
||||
// Cancel the previous timeout now that we have a new one. This means that changing the timeout
|
||||
// affects in-flight operations, which is consistent with the behavior in RYW.
|
||||
if (prevTimeout.isValid()) {
|
||||
prevTimeout.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a ThreadFuture<T> that will signal an error if the transaction times out.
|
||||
template <class T>
|
||||
ThreadFuture<T> MultiVersionTransaction::makeTimeout() {
|
||||
ThreadFuture<Void> f;
|
||||
|
||||
// We create a ThreadFuture that holds a reference to this below,
|
||||
// but the ThreadFuture does not increment the ref count
|
||||
timeoutTsav->addref();
|
||||
|
||||
{ // lock scope
|
||||
ThreadSpinLockHolder holder(timeoutLock);
|
||||
f = ThreadFuture<Void>(timeoutTsav.getPtr());
|
||||
}
|
||||
|
||||
// When our timeoutTsav gets set, map it to the appropriate type
|
||||
return mapThreadFuture<Void, T>(f, [](ErrorOr<Void> v) {
|
||||
ASSERT(v.isError());
|
||||
return ErrorOr<T>(v.getError());
|
||||
});
|
||||
}
|
||||
|
||||
void MultiVersionTransaction::reset() {
|
||||
persistentOptions.clear();
|
||||
|
||||
// Reset the timeout state
|
||||
Reference<ThreadSingleAssignmentVar<Void>> prevTimeoutTsav;
|
||||
ThreadFuture<Void> prevTimeout;
|
||||
startTime = timer_monotonic();
|
||||
|
||||
{ // lock scope
|
||||
ThreadSpinLockHolder holder(timeoutLock);
|
||||
|
||||
prevTimeoutTsav = timeoutTsav;
|
||||
timeoutTsav = makeReference<ThreadSingleAssignmentVar<Void>>();
|
||||
|
||||
prevTimeout = currentTimeout;
|
||||
currentTimeout = ThreadFuture<Void>();
|
||||
}
|
||||
|
||||
// Cancel any outstanding operations if they don't have an underlying transaction object to cancel them
|
||||
prevTimeoutTsav->trySendError(transaction_cancelled());
|
||||
if (prevTimeout.isValid()) {
|
||||
prevTimeout.cancel();
|
||||
}
|
||||
|
||||
setDefaultOptions(db->dbState->transactionDefaultOptions);
|
||||
updateTransaction();
|
||||
}
|
||||
|
||||
MultiVersionTransaction::~MultiVersionTransaction() {
|
||||
timeoutTsav->trySendError(transaction_cancelled());
|
||||
}
|
||||
|
||||
bool MultiVersionTransaction::isValid() {
|
||||
auto tr = getTransaction();
|
||||
return tr.transaction.isValid();
|
||||
|
@ -1896,8 +1982,28 @@ void MultiVersionApi::loadEnvironmentVariableNetworkOptions() {
|
|||
std::string valueStr;
|
||||
try {
|
||||
if (platform::getEnvironmentVar(("FDB_NETWORK_OPTION_" + option.second.name).c_str(), valueStr)) {
|
||||
FDBOptionInfo::ParamType curParamType = option.second.paramType;
|
||||
for (auto value : parseOptionValues(valueStr)) {
|
||||
Standalone<StringRef> currentValue = StringRef(value);
|
||||
Standalone<StringRef> currentValue;
|
||||
int64_t intParamVal;
|
||||
if (curParamType == FDBOptionInfo::ParamType::Int) {
|
||||
try {
|
||||
size_t nextIdx;
|
||||
intParamVal = std::stoll(value, &nextIdx);
|
||||
if (nextIdx != value.length()) {
|
||||
throw invalid_option_value();
|
||||
}
|
||||
} catch (std::exception e) {
|
||||
TraceEvent(SevError, "EnvironmentVariableParseIntegerFailed")
|
||||
.detail("Option", option.second.name)
|
||||
.detail("Value", valueStr)
|
||||
.detail("Error", e.what());
|
||||
throw invalid_option_value();
|
||||
}
|
||||
currentValue = StringRef(reinterpret_cast<uint8_t*>(&intParamVal), 8);
|
||||
} else {
|
||||
currentValue = StringRef(value);
|
||||
}
|
||||
{ // lock scope
|
||||
MutexHolder holder(lock);
|
||||
if (setEnvOptions[option.first].count(currentValue) == 0) {
|
||||
|
|
|
@ -334,6 +334,8 @@ public:
|
|||
MultiVersionTransaction(Reference<MultiVersionDatabase> db,
|
||||
UniqueOrderedOptionList<FDBTransactionOptions> defaultOptions);
|
||||
|
||||
~MultiVersionTransaction() override;
|
||||
|
||||
void cancel() override;
|
||||
void setVersion(Version v) override;
|
||||
ThreadFuture<Version> getReadVersion() override;
|
||||
|
@ -400,6 +402,29 @@ private:
|
|||
ThreadFuture<Void> onChange;
|
||||
};
|
||||
|
||||
// Timeout related variables for MultiVersionTransaction objects that do not have an underlying ITransaction
|
||||
|
||||
// The time when the MultiVersionTransaction was last created or reset
|
||||
std::atomic<double> startTime;
|
||||
|
||||
// A lock that needs to be held if using timeoutTsav or currentTimeout
|
||||
ThreadSpinLock timeoutLock;
|
||||
|
||||
// A single assignment var (i.e. promise) that gets set with an error when the timeout elapses or the transaction
|
||||
// is reset or destroyed.
|
||||
Reference<ThreadSingleAssignmentVar<Void>> timeoutTsav;
|
||||
|
||||
// A reference to the current actor waiting for the timeout. This actor will set the timeoutTsav promise.
|
||||
ThreadFuture<Void> currentTimeout;
|
||||
|
||||
// Configure a timeout based on the options set for this transaction. This timeout only applies
|
||||
// if we don't have an underlying database object to connect with.
|
||||
void setTimeout(Optional<StringRef> value);
|
||||
|
||||
// Creates a ThreadFuture<T> that will signal an error if the transaction times out.
|
||||
template <class T>
|
||||
ThreadFuture<T> makeTimeout();
|
||||
|
||||
TransactionInfo transaction;
|
||||
|
||||
TransactionInfo getTransaction();
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/TransactionLineage.h"
|
||||
#include "fdbclient/versions.h"
|
||||
#include "fdbclient/WellKnownEndpoints.h"
|
||||
#include "fdbrpc/LoadBalance.h"
|
||||
#include "fdbrpc/Net2FileSystem.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
|
@ -85,10 +86,6 @@
|
|||
|
||||
extern const char* getSourceVersion();
|
||||
|
||||
using std::max;
|
||||
using std::min;
|
||||
using std::pair;
|
||||
|
||||
namespace {
|
||||
|
||||
TransactionLineageCollector transactionLineageCollector;
|
||||
|
@ -674,8 +671,8 @@ ACTOR static Future<Void> clientStatusUpdateActor(DatabaseContext* cx) {
|
|||
|
||||
ACTOR static Future<Void> monitorProxiesChange(Reference<AsyncVar<ClientDBInfo> const> clientDBInfo,
|
||||
AsyncTrigger* triggerVar) {
|
||||
state vector<CommitProxyInterface> curCommitProxies;
|
||||
state vector<GrvProxyInterface> curGrvProxies;
|
||||
state std::vector<CommitProxyInterface> curCommitProxies;
|
||||
state std::vector<GrvProxyInterface> curGrvProxies;
|
||||
curCommitProxies = clientDBInfo->get().commitProxies;
|
||||
curGrvProxies = clientDBInfo->get().grvProxies;
|
||||
|
||||
|
@ -1435,7 +1432,7 @@ DatabaseContext::~DatabaseContext() {
|
|||
locationCache.insert(allKeys, Reference<LocationInfo>());
|
||||
}
|
||||
|
||||
pair<KeyRange, Reference<LocationInfo>> DatabaseContext::getCachedLocation(const KeyRef& key, Reverse isBackward) {
|
||||
std::pair<KeyRange, Reference<LocationInfo>> DatabaseContext::getCachedLocation(const KeyRef& key, Reverse isBackward) {
|
||||
if (isBackward) {
|
||||
auto range = locationCache.rangeContainingKeyBefore(key);
|
||||
return std::make_pair(range->range(), range->value());
|
||||
|
@ -1446,7 +1443,7 @@ pair<KeyRange, Reference<LocationInfo>> DatabaseContext::getCachedLocation(const
|
|||
}
|
||||
|
||||
bool DatabaseContext::getCachedLocations(const KeyRangeRef& range,
|
||||
vector<std::pair<KeyRange, Reference<LocationInfo>>>& result,
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>>& result,
|
||||
int limit,
|
||||
Reverse reverse) {
|
||||
result.clear();
|
||||
|
@ -1476,8 +1473,8 @@ bool DatabaseContext::getCachedLocations(const KeyRangeRef& range,
|
|||
}
|
||||
|
||||
Reference<LocationInfo> DatabaseContext::setCachedLocation(const KeyRangeRef& keys,
|
||||
const vector<StorageServerInterface>& servers) {
|
||||
vector<Reference<ReferencedInterface<StorageServerInterface>>> serverRefs;
|
||||
const std::vector<StorageServerInterface>& servers) {
|
||||
std::vector<Reference<ReferencedInterface<StorageServerInterface>>> serverRefs;
|
||||
serverRefs.reserve(servers.size());
|
||||
for (const auto& interf : servers) {
|
||||
serverRefs.push_back(StorageServerInfo::getInterface(this, interf, clientLocality));
|
||||
|
@ -2068,7 +2065,7 @@ void setupNetwork(uint64_t transportId, UseMetrics useMetrics) {
|
|||
g_network = newNet2(tlsConfig, false, useMetrics || networkOptions.traceDirectory.present());
|
||||
g_network->addStopCallback(Net2FileSystem::stop);
|
||||
g_network->addStopCallback(TLS::DestroyOpenSSLGlobalState);
|
||||
FlowTransport::createInstance(true, transportId);
|
||||
FlowTransport::createInstance(true, transportId, WLTOKEN_RESERVED_COUNT);
|
||||
Net2FileSystem::newFileSystem();
|
||||
|
||||
uncancellable(monitorNetworkBusyness());
|
||||
|
@ -2251,23 +2248,23 @@ ACTOR Future<Optional<StorageServerInterface>> fetchServerInterface(Database cx,
|
|||
return decodeServerListValue(val.get());
|
||||
}
|
||||
|
||||
ACTOR Future<Optional<vector<StorageServerInterface>>> transactionalGetServerInterfaces(Future<Version> ver,
|
||||
Database cx,
|
||||
TransactionInfo info,
|
||||
vector<UID> ids,
|
||||
TagSet tags) {
|
||||
state vector<Future<Optional<StorageServerInterface>>> serverListEntries;
|
||||
ACTOR Future<Optional<std::vector<StorageServerInterface>>> transactionalGetServerInterfaces(Future<Version> ver,
|
||||
Database cx,
|
||||
TransactionInfo info,
|
||||
std::vector<UID> ids,
|
||||
TagSet tags) {
|
||||
state std::vector<Future<Optional<StorageServerInterface>>> serverListEntries;
|
||||
serverListEntries.reserve(ids.size());
|
||||
for (int s = 0; s < ids.size(); s++) {
|
||||
serverListEntries.push_back(fetchServerInterface(cx, info, ids[s], tags, ver));
|
||||
}
|
||||
|
||||
vector<Optional<StorageServerInterface>> serverListValues = wait(getAll(serverListEntries));
|
||||
vector<StorageServerInterface> serverInterfaces;
|
||||
std::vector<Optional<StorageServerInterface>> serverListValues = wait(getAll(serverListEntries));
|
||||
std::vector<StorageServerInterface> serverInterfaces;
|
||||
for (int s = 0; s < serverListValues.size(); s++) {
|
||||
if (!serverListValues[s].present()) {
|
||||
// A storage server has been removed from ServerList since we read keyServers
|
||||
return Optional<vector<StorageServerInterface>>();
|
||||
return Optional<std::vector<StorageServerInterface>>();
|
||||
}
|
||||
serverInterfaces.push_back(serverListValues[s].get());
|
||||
}
|
||||
|
@ -2299,10 +2296,8 @@ void updateTssMappings(Database cx, const GetKeyServerLocationsReply& reply) {
|
|||
|
||||
// If isBackward == true, returns the shard containing the key before 'key' (an infinitely long, inexpressible key).
|
||||
// Otherwise returns the shard containing key
|
||||
ACTOR Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation_internal(Database cx,
|
||||
Key key,
|
||||
TransactionInfo info,
|
||||
Reverse isBackward = Reverse::False) {
|
||||
ACTOR Future<std::pair<KeyRange, Reference<LocationInfo>>>
|
||||
getKeyLocation_internal(Database cx, Key key, TransactionInfo info, Reverse isBackward = Reverse::False) {
|
||||
state Span span("NAPI:getKeyLocation"_loc, info.spanID);
|
||||
if (isBackward) {
|
||||
ASSERT(key != allKeys.begin && key <= allKeys.end);
|
||||
|
@ -2337,11 +2332,11 @@ ACTOR Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation_internal(Da
|
|||
}
|
||||
|
||||
template <class F>
|
||||
Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation(Database const& cx,
|
||||
Key const& key,
|
||||
F StorageServerInterface::*member,
|
||||
TransactionInfo const& info,
|
||||
Reverse isBackward = Reverse::False) {
|
||||
Future<std::pair<KeyRange, Reference<LocationInfo>>> getKeyLocation(Database const& cx,
|
||||
Key const& key,
|
||||
F StorageServerInterface::*member,
|
||||
TransactionInfo const& info,
|
||||
Reverse isBackward = Reverse::False) {
|
||||
// we first check whether this range is cached
|
||||
auto ssi = cx->getCachedLocation(key, isBackward);
|
||||
if (!ssi.second) {
|
||||
|
@ -2359,11 +2354,8 @@ Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation(Database const& c
|
|||
return ssi;
|
||||
}
|
||||
|
||||
ACTOR Future<vector<pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocations_internal(Database cx,
|
||||
KeyRange keys,
|
||||
int limit,
|
||||
Reverse reverse,
|
||||
TransactionInfo info) {
|
||||
ACTOR Future<std::vector<std::pair<KeyRange, Reference<LocationInfo>>>>
|
||||
getKeyRangeLocations_internal(Database cx, KeyRange keys, int limit, Reverse reverse, TransactionInfo info) {
|
||||
state Span span("NAPI:getKeyRangeLocations"_loc, info.spanID);
|
||||
if (info.debugID.present())
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocations.Before");
|
||||
|
@ -2384,7 +2376,7 @@ ACTOR Future<vector<pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocatio
|
|||
"TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocations.After");
|
||||
ASSERT(rep.results.size());
|
||||
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> results;
|
||||
state std::vector<std::pair<KeyRange, Reference<LocationInfo>>> results;
|
||||
state int shard = 0;
|
||||
for (; shard < rep.results.size(); shard++) {
|
||||
// FIXME: these shards are being inserted into the map sequentially, it would be much more CPU
|
||||
|
@ -2408,15 +2400,16 @@ ACTOR Future<vector<pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocatio
|
|||
// Example: If query the function with key range (b, d), the returned list of pairs could be something like:
|
||||
// [([a, b1), locationInfo), ([b1, c), locationInfo), ([c, d1), locationInfo)].
|
||||
template <class F>
|
||||
Future<vector<pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocations(Database const& cx,
|
||||
KeyRange const& keys,
|
||||
int limit,
|
||||
Reverse reverse,
|
||||
F StorageServerInterface::*member,
|
||||
TransactionInfo const& info) {
|
||||
Future<std::vector<std::pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocations(
|
||||
Database const& cx,
|
||||
KeyRange const& keys,
|
||||
int limit,
|
||||
Reverse reverse,
|
||||
F StorageServerInterface::*member,
|
||||
TransactionInfo const& info) {
|
||||
ASSERT(!keys.empty());
|
||||
|
||||
vector<pair<KeyRange, Reference<LocationInfo>>> locations;
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations;
|
||||
if (!cx->getCachedLocations(keys, locations, limit, reverse)) {
|
||||
return getKeyRangeLocations_internal(cx, keys, limit, reverse, info);
|
||||
}
|
||||
|
@ -2448,7 +2441,7 @@ ACTOR Future<Void> warmRange_impl(Transaction* self, Database cx, KeyRange keys)
|
|||
state int totalRanges = 0;
|
||||
state int totalRequests = 0;
|
||||
loop {
|
||||
vector<pair<KeyRange, Reference<LocationInfo>>> locations = wait(
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations = wait(
|
||||
getKeyRangeLocations_internal(cx, keys, CLIENT_KNOBS->WARM_RANGE_SHARD_LIMIT, Reverse::False, self->info));
|
||||
totalRanges += CLIENT_KNOBS->WARM_RANGE_SHARD_LIMIT;
|
||||
totalRequests++;
|
||||
|
@ -2493,7 +2486,7 @@ ACTOR Future<Optional<Value>> getValue(Future<Version> version,
|
|||
cx->validateVersion(ver);
|
||||
|
||||
loop {
|
||||
state pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
state std::pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
wait(getKeyLocation(cx, key, &StorageServerInterface::getValue, info));
|
||||
state Optional<UID> getValueID = Optional<UID>();
|
||||
state uint64_t startTime;
|
||||
|
@ -2618,7 +2611,7 @@ ACTOR Future<Key> getKey(Database cx, KeySelector k, Future<Version> version, Tr
|
|||
}
|
||||
|
||||
Key locationKey(k.getKey(), k.arena());
|
||||
state pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
state std::pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
wait(getKeyLocation(cx, locationKey, &StorageServerInterface::getKey, info, Reverse{ k.isBackward() }));
|
||||
|
||||
try {
|
||||
|
@ -2739,7 +2732,7 @@ ACTOR Future<Version> watchValue(Future<Version> version,
|
|||
ASSERT(ver != latestVersion);
|
||||
|
||||
loop {
|
||||
state pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
state std::pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
wait(getKeyLocation(cx, key, &StorageServerInterface::watchValue, info));
|
||||
|
||||
try {
|
||||
|
@ -2992,7 +2985,7 @@ ACTOR Future<RangeResult> getExactRange(Database cx,
|
|||
|
||||
// printf("getExactRange( '%s', '%s' )\n", keys.begin.toString().c_str(), keys.end.toString().c_str());
|
||||
loop {
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations = wait(getKeyRangeLocations(
|
||||
state std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations = wait(getKeyRangeLocations(
|
||||
cx, keys, CLIENT_KNOBS->GET_RANGE_SHARD_LIMIT, reverse, &StorageServerInterface::getKeyValues, info));
|
||||
ASSERT(locations.size());
|
||||
state int shard = 0;
|
||||
|
@ -3315,7 +3308,7 @@ ACTOR Future<RangeResult> getRange(Database cx,
|
|||
|
||||
Key locationKey = reverse ? Key(end.getKey(), end.arena()) : Key(begin.getKey(), begin.arena());
|
||||
Reverse locationBackward{ reverse ? (end - 1).isBackward() : begin.isBackward() };
|
||||
state pair<KeyRange, Reference<LocationInfo>> beginServer =
|
||||
state std::pair<KeyRange, Reference<LocationInfo>> beginServer =
|
||||
wait(getKeyLocation(cx, locationKey, &StorageServerInterface::getKeyValues, info, locationBackward));
|
||||
state KeyRange shard = beginServer.first;
|
||||
state bool modifiedSelectors = false;
|
||||
|
@ -3745,7 +3738,7 @@ ACTOR Future<Void> getRangeStreamFragment(ParallelStream<RangeResult>::Fragment*
|
|||
TagSet tags,
|
||||
SpanID spanContext) {
|
||||
loop {
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations = wait(getKeyRangeLocations(
|
||||
state std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations = wait(getKeyRangeLocations(
|
||||
cx, keys, CLIENT_KNOBS->GET_RANGE_SHARD_LIMIT, reverse, &StorageServerInterface::getKeyValuesStream, info));
|
||||
ASSERT(locations.size());
|
||||
state int shard = 0;
|
||||
|
@ -3806,7 +3799,7 @@ ACTOR Future<Void> getRangeStreamFragment(ParallelStream<RangeResult>::Fragment*
|
|||
break;
|
||||
}
|
||||
|
||||
vector<Future<Void>> ok(locations[shard].second->size());
|
||||
std::vector<Future<Void>> ok(locations[shard].second->size());
|
||||
for (int i = 0; i < ok.size(); i++) {
|
||||
ok[i] = IFailureMonitor::failureMonitor().onStateEqual(
|
||||
locations[shard].second->get(i, &StorageServerInterface::getKeyValuesStream).getEndpoint(),
|
||||
|
@ -4048,7 +4041,7 @@ ACTOR Future<Void> getRangeStream(PromiseStream<RangeResult> _results,
|
|||
|
||||
state std::vector<Future<Void>> outstandingRequests;
|
||||
while (b < e) {
|
||||
state pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
state std::pair<KeyRange, Reference<LocationInfo>> ssi =
|
||||
wait(getKeyLocation(cx, reverse ? e : b, &StorageServerInterface::getKeyValuesStream, info, reverse));
|
||||
state KeyRange shardIntersection = intersect(ssi.first, KeyRangeRef(b, e));
|
||||
state Standalone<VectorRef<KeyRef>> splitPoints =
|
||||
|
@ -4309,7 +4302,7 @@ ACTOR Future<Standalone<VectorRef<const char*>>> getAddressesForKeyActor(Key key
|
|||
Database cx,
|
||||
TransactionInfo info,
|
||||
TransactionOptions options) {
|
||||
state vector<StorageServerInterface> ssi;
|
||||
state std::vector<StorageServerInterface> ssi;
|
||||
|
||||
// If key >= allKeys.end, then getRange will return a kv-pair with an empty value. This will result in our
|
||||
// serverInterfaces vector being empty, which will cause us to return an empty addresses list.
|
||||
|
@ -4336,12 +4329,12 @@ ACTOR Future<Standalone<VectorRef<const char*>>> getAddressesForKeyActor(Key key
|
|||
|
||||
ASSERT(serverUids.size()); // every shard needs to have a team
|
||||
|
||||
vector<UID> src;
|
||||
vector<UID> ignore; // 'ignore' is so named because it is the vector into which we decode the 'dest' servers in the
|
||||
// case where this key is being relocated. But 'src' is the canonical location until the move is
|
||||
// finished, because it could be cancelled at any time.
|
||||
std::vector<UID> src;
|
||||
std::vector<UID> ignore; // 'ignore' is so named because it is the vector into which we decode the 'dest' servers in
|
||||
// the case where this key is being relocated. But 'src' is the canonical location until
|
||||
// the move is finished, because it could be cancelled at any time.
|
||||
decodeKeyServersValue(serverTagResult, serverUids[0].value, src, ignore);
|
||||
Optional<vector<StorageServerInterface>> serverInterfaces =
|
||||
Optional<std::vector<StorageServerInterface>> serverInterfaces =
|
||||
wait(transactionalGetServerInterfaces(ver, cx, info, src, options.readTags));
|
||||
|
||||
ASSERT(serverInterfaces.present()); // since this is happening transactionally, /FF/keyServers and /FF/serverList
|
||||
|
@ -4969,7 +4962,7 @@ ACTOR Future<Optional<ClientTrCommitCostEstimation>> estimateCommitCosts(Transac
|
|||
++trCommitCosts.expensiveCostEstCount;
|
||||
++self->getDatabase()->transactionsExpensiveClearCostEstCount;
|
||||
} else {
|
||||
std::vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(self->getDatabase(),
|
||||
keyRange,
|
||||
CLIENT_KNOBS->TOO_MANY,
|
||||
|
@ -5616,7 +5609,7 @@ ACTOR Future<Void> readVersionBatcher(DatabaseContext* cx,
|
|||
// dynamic batching monitors reply latencies
|
||||
when(double reply_latency = waitNext(replyTimes.getFuture())) {
|
||||
double target_latency = reply_latency * 0.5;
|
||||
batchTime = min(0.1 * target_latency + 0.9 * batchTime, CLIENT_KNOBS->GRV_BATCH_TIMEOUT);
|
||||
batchTime = std::min(0.1 * target_latency + 0.9 * batchTime, CLIENT_KNOBS->GRV_BATCH_TIMEOUT);
|
||||
}
|
||||
when(wait(collection)) {} // for errors
|
||||
}
|
||||
|
@ -5808,7 +5801,8 @@ Future<Standalone<StringRef>> Transaction::getVersionstamp() {
|
|||
|
||||
// Gets the protocol version reported by a coordinator via the protocol info interface
|
||||
ACTOR Future<ProtocolVersion> getCoordinatorProtocol(NetworkAddressList coordinatorAddresses) {
|
||||
RequestStream<ProtocolInfoRequest> requestStream{ Endpoint{ { coordinatorAddresses }, WLTOKEN_PROTOCOL_INFO } };
|
||||
RequestStream<ProtocolInfoRequest> requestStream{ Endpoint::wellKnown({ coordinatorAddresses },
|
||||
WLTOKEN_PROTOCOL_INFO) };
|
||||
ProtocolInfoReply reply = wait(retryBrokenPromise(requestStream, ProtocolInfoRequest{}));
|
||||
|
||||
return reply.version;
|
||||
|
@ -5969,7 +5963,7 @@ ACTOR Future<StorageMetrics> doGetStorageMetrics(Database cx, KeyRange keys, Ref
|
|||
|
||||
ACTOR Future<StorageMetrics> getStorageMetricsLargeKeyRange(Database cx, KeyRange keys) {
|
||||
state Span span("NAPI:GetStorageMetricsLargeKeyRange"_loc);
|
||||
vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
std::numeric_limits<int>::max(),
|
||||
|
@ -5977,7 +5971,7 @@ ACTOR Future<StorageMetrics> getStorageMetricsLargeKeyRange(Database cx, KeyRang
|
|||
&StorageServerInterface::waitMetrics,
|
||||
TransactionInfo(TaskPriority::DataDistribution, span.context)));
|
||||
state int nLocs = locations.size();
|
||||
state vector<Future<StorageMetrics>> fx(nLocs);
|
||||
state std::vector<Future<StorageMetrics>> fx(nLocs);
|
||||
state StorageMetrics total;
|
||||
KeyRef partBegin, partEnd;
|
||||
for (int i = 0; i < nLocs; i++) {
|
||||
|
@ -6011,15 +6005,15 @@ ACTOR Future<Void> trackBoundedStorageMetrics(KeyRange keys,
|
|||
}
|
||||
|
||||
ACTOR Future<StorageMetrics> waitStorageMetricsMultipleLocations(
|
||||
vector<pair<KeyRange, Reference<LocationInfo>>> locations,
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations,
|
||||
StorageMetrics min,
|
||||
StorageMetrics max,
|
||||
StorageMetrics permittedError) {
|
||||
state int nLocs = locations.size();
|
||||
state vector<Future<StorageMetrics>> fx(nLocs);
|
||||
state std::vector<Future<StorageMetrics>> fx(nLocs);
|
||||
state StorageMetrics total;
|
||||
state PromiseStream<StorageMetrics> deltas;
|
||||
state vector<Future<Void>> wx(fx.size());
|
||||
state std::vector<Future<Void>> wx(fx.size());
|
||||
state StorageMetrics halfErrorPerMachine = permittedError * (0.5 / nLocs);
|
||||
state StorageMetrics maxPlus = max + halfErrorPerMachine * (nLocs - 1);
|
||||
state StorageMetrics minMinus = min - halfErrorPerMachine * (nLocs - 1);
|
||||
|
@ -6068,7 +6062,7 @@ ACTOR Future<Standalone<VectorRef<ReadHotRangeWithMetrics>>> getReadHotRanges(Da
|
|||
loop {
|
||||
int64_t shardLimit = 100; // Shard limit here does not really matter since this function is currently only used
|
||||
// to find the read-hot sub ranges within a read-hot shard.
|
||||
vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
shardLimit,
|
||||
|
@ -6087,7 +6081,7 @@ ACTOR Future<Standalone<VectorRef<ReadHotRangeWithMetrics>>> getReadHotRanges(Da
|
|||
// .detail("KeysBegin", keys.begin.printable().c_str())
|
||||
// .detail("KeysEnd", keys.end.printable().c_str());
|
||||
// }
|
||||
state vector<Future<ReadHotSubRangeReply>> fReplies(nLocs);
|
||||
state std::vector<Future<ReadHotSubRangeReply>> fReplies(nLocs);
|
||||
KeyRef partBegin, partEnd;
|
||||
for (int i = 0; i < nLocs; i++) {
|
||||
partBegin = (i == 0) ? keys.begin : locations[i].first.begin;
|
||||
|
@ -6136,7 +6130,7 @@ ACTOR Future<std::pair<Optional<StorageMetrics>, int>> waitStorageMetrics(Databa
|
|||
int expectedShardCount) {
|
||||
state Span span("NAPI:WaitStorageMetrics"_loc);
|
||||
loop {
|
||||
vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
shardLimit,
|
||||
|
@ -6228,7 +6222,7 @@ Future<Standalone<VectorRef<ReadHotRangeWithMetrics>>> Transaction::getReadHotRa
|
|||
ACTOR Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(Database cx, KeyRange keys, int64_t chunkSize) {
|
||||
state Span span("NAPI:GetRangeSplitPoints"_loc);
|
||||
loop {
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
state std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
CLIENT_KNOBS->TOO_MANY,
|
||||
|
@ -6237,7 +6231,7 @@ ACTOR Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(Database cx, Key
|
|||
TransactionInfo(TaskPriority::DataDistribution, span.context)));
|
||||
try {
|
||||
state int nLocs = locations.size();
|
||||
state vector<Future<SplitRangeReply>> fReplies(nLocs);
|
||||
state std::vector<Future<SplitRangeReply>> fReplies(nLocs);
|
||||
KeyRef partBegin, partEnd;
|
||||
for (int i = 0; i < nLocs; i++) {
|
||||
partBegin = (i == 0) ? keys.begin : locations[i].first.begin;
|
||||
|
@ -6289,7 +6283,7 @@ ACTOR Future<Standalone<VectorRef<KeyRef>>> splitStorageMetrics(Database cx,
|
|||
StorageMetrics estimated) {
|
||||
state Span span("NAPI:SplitStorageMetrics"_loc);
|
||||
loop {
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
state std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT,
|
||||
|
@ -6412,7 +6406,7 @@ ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID sn
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exclusions) {
|
||||
ACTOR Future<bool> checkSafeExclusions(Database cx, std::vector<AddressExclusion> exclusions) {
|
||||
TraceEvent("ExclusionSafetyCheckBegin")
|
||||
.detail("NumExclusion", exclusions.size())
|
||||
.detail("Exclusions", describe(exclusions));
|
||||
|
@ -6443,7 +6437,7 @@ ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exc
|
|||
}
|
||||
TraceEvent("ExclusionSafetyCheckCoordinators").log();
|
||||
state ClientCoordinators coordinatorList(cx->getConnectionFile());
|
||||
state vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
state std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
leaderServers.reserve(coordinatorList.clientLeaderServers.size());
|
||||
for (int i = 0; i < coordinatorList.clientLeaderServers.size(); i++) {
|
||||
leaderServers.push_back(retryBrokenPromise(coordinatorList.clientLeaderServers[i].getLeader,
|
||||
|
|
|
@ -411,7 +411,7 @@ public:
|
|||
void setTransactionID(uint64_t id);
|
||||
void setToken(uint64_t token);
|
||||
|
||||
const vector<Future<std::pair<Key, Key>>>& getExtraReadConflictRanges() const { return extraConflictRanges; }
|
||||
const std::vector<Future<std::pair<Key, Key>>>& getExtraReadConflictRanges() const { return extraConflictRanges; }
|
||||
Standalone<VectorRef<KeyRangeRef>> readConflictRanges() const {
|
||||
return Standalone<VectorRef<KeyRangeRef>>(tr.transaction.read_conflict_ranges, tr.arena);
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ private:
|
|||
CommitTransactionRequest tr;
|
||||
Future<Version> readVersion;
|
||||
Promise<Optional<Value>> metadataVersion;
|
||||
vector<Future<std::pair<Key, Key>>> extraConflictRanges;
|
||||
std::vector<Future<std::pair<Key, Key>>> extraConflictRanges;
|
||||
Promise<Void> commitResult;
|
||||
Future<Void> committing;
|
||||
};
|
||||
|
@ -449,7 +449,7 @@ int64_t extractIntOption(Optional<StringRef> value,
|
|||
ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID);
|
||||
|
||||
// Checks with Data Distributor that it is safe to mark all servers in exclusions as failed
|
||||
ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exclusions);
|
||||
ACTOR Future<bool> checkSafeExclusions(Database cx, std::vector<AddressExclusion> exclusions);
|
||||
|
||||
inline uint64_t getWriteOperationCost(uint64_t bytes) {
|
||||
return bytes / std::max(1, CLIENT_KNOBS->WRITE_COST_BYTE_FACTOR) + 1;
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
#include "fdbclient/AnnotateActor.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
|
||||
constexpr UID WLTOKEN_PROCESS(-1, 21);
|
||||
#include "fdbclient/WellKnownEndpoints.h"
|
||||
|
||||
struct ProcessInterface {
|
||||
constexpr static FileIdentifier file_identifier = 985636;
|
||||
|
|
|
@ -1342,7 +1342,7 @@ ACTOR Future<RangeResult> getWorkerInterfaces(Reference<ClusterConnectionFile> c
|
|||
|
||||
loop {
|
||||
choose {
|
||||
when(vector<ClientWorkerInterface> workers =
|
||||
when(std::vector<ClientWorkerInterface> workers =
|
||||
wait(clusterInterface->get().present()
|
||||
? brokenPromiseToNever(
|
||||
clusterInterface->get().get().getClientWorkers.getReply(GetClientWorkersRequest()))
|
||||
|
|
|
@ -1145,7 +1145,7 @@ Future<RangeResult> ExclusionInProgressRangeImpl::getRange(ReadYourWritesTransac
|
|||
}
|
||||
|
||||
ACTOR Future<RangeResult> getProcessClassActor(ReadYourWritesTransaction* ryw, KeyRef prefix, KeyRangeRef kr) {
|
||||
vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
|
||||
std::vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
|
||||
auto workers = _workers; // strip const
|
||||
// Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5
|
||||
std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) {
|
||||
|
@ -1168,7 +1168,7 @@ ACTOR Future<Optional<std::string>> processClassCommitActor(ReadYourWritesTransa
|
|||
ryw->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
ryw->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
ryw->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||
vector<ProcessData> workers = wait(
|
||||
std::vector<ProcessData> workers = wait(
|
||||
getWorkers(&ryw->getTransaction())); // make sure we use the Transaction object to avoid used_during_commit()
|
||||
|
||||
auto ranges = ryw->getSpecialKeySpaceWriteMap().containedRanges(range);
|
||||
|
@ -1259,7 +1259,7 @@ void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef&
|
|||
}
|
||||
|
||||
ACTOR Future<RangeResult> getProcessClassSourceActor(ReadYourWritesTransaction* ryw, KeyRef prefix, KeyRangeRef kr) {
|
||||
vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
|
||||
std::vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
|
||||
auto workers = _workers; // strip const
|
||||
// Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5
|
||||
std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) {
|
||||
|
@ -2092,7 +2092,7 @@ ACTOR static Future<RangeResult> actorLineageGetRangeActor(ReadYourWritesTransac
|
|||
// Open endpoint to target process on each call. This can be optimized at
|
||||
// some point...
|
||||
state ProcessInterface process;
|
||||
process.getInterface = RequestStream<GetProcessInterfaceRequest>(Endpoint({ host }, WLTOKEN_PROCESS));
|
||||
process.getInterface = RequestStream<GetProcessInterfaceRequest>(Endpoint::wellKnown({ host }, WLTOKEN_PROCESS));
|
||||
ProcessInterface p = wait(retryBrokenPromise(process.getInterface, GetProcessInterfaceRequest{}));
|
||||
process = p;
|
||||
|
||||
|
|
|
@ -309,18 +309,18 @@ ACTOR Future<Optional<StatusObject>> clientCoordinatorsStatusFetcher(Reference<C
|
|||
state ClientCoordinators coord(f);
|
||||
state StatusObject statusObj;
|
||||
|
||||
state vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
state std::vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
leaderServers.reserve(coord.clientLeaderServers.size());
|
||||
for (int i = 0; i < coord.clientLeaderServers.size(); i++)
|
||||
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader,
|
||||
GetLeaderRequest(coord.clusterKey, UID()),
|
||||
TaskPriority::CoordinationReply));
|
||||
|
||||
state vector<Future<ProtocolInfoReply>> coordProtocols;
|
||||
state std::vector<Future<ProtocolInfoReply>> coordProtocols;
|
||||
coordProtocols.reserve(coord.clientLeaderServers.size());
|
||||
for (int i = 0; i < coord.clientLeaderServers.size(); i++) {
|
||||
RequestStream<ProtocolInfoRequest> requestStream{ Endpoint{
|
||||
{ coord.clientLeaderServers[i].getLeader.getEndpoint().addresses }, WLTOKEN_PROTOCOL_INFO } };
|
||||
RequestStream<ProtocolInfoRequest> requestStream{ Endpoint::wellKnown(
|
||||
{ coord.clientLeaderServers[i].getLeader.getEndpoint().addresses }, WLTOKEN_PROTOCOL_INFO) };
|
||||
coordProtocols.push_back(retryBrokenPromise(requestStream, ProtocolInfoRequest{}));
|
||||
}
|
||||
|
||||
|
|
|
@ -313,7 +313,14 @@ struct GetKeyValuesStreamReply : public ReplyPromiseStreamReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, data, version, more, cached, arena);
|
||||
serializer(ar,
|
||||
ReplyPromiseStreamReply::acknowledgeToken,
|
||||
ReplyPromiseStreamReply::sequence,
|
||||
data,
|
||||
version,
|
||||
more,
|
||||
cached,
|
||||
arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -242,13 +242,13 @@ const Key storageCacheKey(const KeyRef& k) {
|
|||
return k.withPrefix(storageCachePrefix);
|
||||
}
|
||||
|
||||
const Value storageCacheValue(const vector<uint16_t>& serverIndices) {
|
||||
const Value storageCacheValue(const std::vector<uint16_t>& serverIndices) {
|
||||
BinaryWriter wr((IncludeVersion(ProtocolVersion::withStorageCacheValue())));
|
||||
wr << serverIndices;
|
||||
return wr.toValue();
|
||||
}
|
||||
|
||||
void decodeStorageCacheValue(const ValueRef& value, vector<uint16_t>& serverIndices) {
|
||||
void decodeStorageCacheValue(const ValueRef& value, std::vector<uint16_t>& serverIndices) {
|
||||
serverIndices.clear();
|
||||
if (value.size()) {
|
||||
BinaryReader rd(value, IncludeVersion());
|
||||
|
@ -256,17 +256,17 @@ void decodeStorageCacheValue(const ValueRef& value, vector<uint16_t>& serverIndi
|
|||
}
|
||||
}
|
||||
|
||||
const Value logsValue(const vector<std::pair<UID, NetworkAddress>>& logs,
|
||||
const vector<std::pair<UID, NetworkAddress>>& oldLogs) {
|
||||
const Value logsValue(const std::vector<std::pair<UID, NetworkAddress>>& logs,
|
||||
const std::vector<std::pair<UID, NetworkAddress>>& oldLogs) {
|
||||
BinaryWriter wr(IncludeVersion(ProtocolVersion::withLogsValue()));
|
||||
wr << logs;
|
||||
wr << oldLogs;
|
||||
return wr.toValue();
|
||||
}
|
||||
std::pair<vector<std::pair<UID, NetworkAddress>>, vector<std::pair<UID, NetworkAddress>>> decodeLogsValue(
|
||||
std::pair<std::vector<std::pair<UID, NetworkAddress>>, std::vector<std::pair<UID, NetworkAddress>>> decodeLogsValue(
|
||||
const ValueRef& value) {
|
||||
vector<std::pair<UID, NetworkAddress>> logs;
|
||||
vector<std::pair<UID, NetworkAddress>> oldLogs;
|
||||
std::vector<std::pair<UID, NetworkAddress>> logs;
|
||||
std::vector<std::pair<UID, NetworkAddress>> oldLogs;
|
||||
BinaryReader reader(value, IncludeVersion());
|
||||
reader >> logs;
|
||||
reader >> oldLogs;
|
||||
|
|
|
@ -43,7 +43,7 @@ extern const KeyRangeRef specialKeys; // [FF][FF] to [FF][FF][FF], some client f
|
|||
// using these special keys, see pr#2662
|
||||
extern const KeyRef afterAllKeys;
|
||||
|
||||
// "\xff/keyServers/[[begin]]" := "[[vector<serverID>, vector<serverID>]|[vector<Tag>, vector<Tag>]]"
|
||||
// "\xff/keyServers/[[begin]]" := "[[vector<serverID>, std::vector<serverID>]|[vector<Tag>, std::vector<Tag>]]"
|
||||
// An internal mapping of where shards are located in the database. [[begin]] is the start of the shard range
|
||||
// and the result is a list of serverIDs or Tags where these shards are located. These values can be changed
|
||||
// as data movement occurs.
|
||||
|
@ -330,9 +330,9 @@ extern const KeyRef logsKey;
|
|||
// Used during backup/recovery to restrict version requirements
|
||||
extern const KeyRef minRequiredCommitVersionKey;
|
||||
|
||||
const Value logsValue(const vector<std::pair<UID, NetworkAddress>>& logs,
|
||||
const vector<std::pair<UID, NetworkAddress>>& oldLogs);
|
||||
std::pair<vector<std::pair<UID, NetworkAddress>>, vector<std::pair<UID, NetworkAddress>>> decodeLogsValue(
|
||||
const Value logsValue(const std::vector<std::pair<UID, NetworkAddress>>& logs,
|
||||
const std::vector<std::pair<UID, NetworkAddress>>& oldLogs);
|
||||
std::pair<std::vector<std::pair<UID, NetworkAddress>>, std::vector<std::pair<UID, NetworkAddress>>> decodeLogsValue(
|
||||
const ValueRef& value);
|
||||
|
||||
// The "global keys" are sent to each storage server any time they are changed
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* WellKnownEndpoints.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_WELLKNOWNENDPOINTS_H
|
||||
#define FDBCLIENT_WELLKNOWNENDPOINTS_H
|
||||
#pragma once
|
||||
|
||||
#include <fdbrpc/fdbrpc.h>
|
||||
|
||||
/*
|
||||
* All well-known endpoints of FDB must be listed here to guarantee their uniqueness
|
||||
*/
|
||||
enum WellKnownEndpoints {
|
||||
WLTOKEN_CLIENTLEADERREG_GETLEADER = WLTOKEN_FIRST_AVAILABLE, // 2
|
||||
WLTOKEN_CLIENTLEADERREG_OPENDATABASE, // 3
|
||||
WLTOKEN_LEADERELECTIONREG_CANDIDACY, // 4
|
||||
WLTOKEN_LEADERELECTIONREG_ELECTIONRESULT, // 5
|
||||
WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT, // 6
|
||||
WLTOKEN_LEADERELECTIONREG_FORWARD, // 7
|
||||
WLTOKEN_GENERATIONREG_READ, // 8
|
||||
WLTOKEN_GENERATIONREG_WRITE, // 9
|
||||
WLTOKEN_PROTOCOL_INFO, // 10 : the value of this endpoint should be stable and not change.
|
||||
WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE, // 11
|
||||
WLTOKEN_CONFIGTXN_GETGENERATION, // 12
|
||||
WLTOKEN_CONFIGTXN_GET, // 13
|
||||
WLTOKEN_CONFIGTXN_GETCLASSES, // 14
|
||||
WLTOKEN_CONFIGTXN_GETKNOBS, // 15
|
||||
WLTOKEN_CONFIGTXN_COMMIT, // 16
|
||||
WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES, // 17
|
||||
WLTOKEN_CONFIGFOLLOWER_GETCHANGES, // 18
|
||||
WLTOKEN_CONFIGFOLLOWER_COMPACT, // 19
|
||||
WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION, // 20
|
||||
WLTOKEN_PROCESS, // 21
|
||||
WLTOKEN_RESERVED_COUNT // 22
|
||||
};
|
||||
|
||||
#endif
|
|
@ -862,8 +862,7 @@ public:
|
|||
Helper function to enable support for common swap implementation pattern based on \c std::swap:
|
||||
\code
|
||||
void swap(MyClass& a, MyClass& b) {
|
||||
using std::swap;
|
||||
swap(a.value, b.value);
|
||||
std::swap(a.value, b.value);
|
||||
// ...
|
||||
}
|
||||
\endcode
|
||||
|
@ -2326,8 +2325,7 @@ public:
|
|||
Helper function to enable support for common swap implementation pattern based on \c std::swap:
|
||||
\code
|
||||
void swap(MyClass& a, MyClass& b) {
|
||||
using std::swap;
|
||||
swap(a.doc, b.doc);
|
||||
std::swap(a.doc, b.doc);
|
||||
// ...
|
||||
}
|
||||
\endcode
|
||||
|
|
|
@ -47,8 +47,9 @@ namespace vexillographer
|
|||
|
||||
private static string getCInfoLine(Option o, string indent, string structName)
|
||||
{
|
||||
return String.Format("{0}ADD_OPTION_INFO({1}, {2}, \"{2}\", \"{3}\", \"{4}\", {5}, {6}, {7}, {8})",
|
||||
indent, structName, o.name.ToUpper(), o.comment, o.getParameterComment(), (o.paramDesc != null).ToString().ToLower(), o.hidden.ToString().ToLower(), o.persistent.ToString().ToLower(), o.defaultFor);
|
||||
return String.Format("{0}ADD_OPTION_INFO({1}, {2}, \"{2}\", \"{3}\", \"{4}\", {5}, {6}, {7}, {8}, FDBOptionInfo::ParamType::{9})",
|
||||
indent, structName, o.name.ToUpper(), o.comment, o.getParameterComment(), (o.paramDesc != null).ToString().ToLower(),
|
||||
o.hidden.ToString().ToLower(), o.persistent.ToString().ToLower(), o.defaultFor, o.paramType);
|
||||
}
|
||||
|
||||
private static void writeCppInfo(TextWriter outFile, Scope scope, IEnumerable<Option> options)
|
||||
|
|
|
@ -22,8 +22,6 @@
|
|||
#include <vector>
|
||||
#include "flow/actorcompiler.h"
|
||||
|
||||
using std::vector;
|
||||
|
||||
inline void throw_operation_failed() {
|
||||
throw operation_failed();
|
||||
}
|
||||
|
@ -31,7 +29,7 @@ inline void throw_operation_failed() {
|
|||
// This is in dsltest.actor.cpp:
|
||||
bool testFuzzActor(Future<int> (*actor)(FutureStream<int> const&, PromiseStream<int> const&, Future<Void> const&),
|
||||
const char* desc,
|
||||
vector<int> const& expectedOutput);
|
||||
std::vector<int> const& expectedOutput);
|
||||
|
||||
// This is defined by ActorFuzz.actor.cpp (generated by actorFuzz.py)
|
||||
// Returns (tests passed, tests total)
|
||||
|
|
|
@ -415,7 +415,7 @@ private:
|
|||
// results
|
||||
ACTOR Future<int> onRead(AsyncFileNonDurable* self, void* data, int length, int64_t offset) {
|
||||
wait(checkKilled(self, "Read"));
|
||||
vector<Future<Void>> priorModifications = self->getModificationsAndInsert(offset, length);
|
||||
std::vector<Future<Void>> priorModifications = self->getModificationsAndInsert(offset, length);
|
||||
wait(waitForAll(priorModifications));
|
||||
state Future<int> readFuture = self->file->read(data, length, offset);
|
||||
wait(success(readFuture) || self->killed.getFuture());
|
||||
|
@ -513,7 +513,7 @@ private:
|
|||
int diskPageLength = saveDurable ? length : 4096;
|
||||
int diskSectorLength = saveDurable ? length : 512;
|
||||
|
||||
vector<Future<Void>> writeFutures;
|
||||
std::vector<Future<Void>> writeFutures;
|
||||
for (int writeOffset = 0; writeOffset < length;) {
|
||||
// Number of bytes until the next diskPageLength file offset within the write or the end of the write.
|
||||
int pageLength = diskPageLength;
|
||||
|
|
|
@ -27,8 +27,6 @@
|
|||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
using std::vector;
|
||||
|
||||
/*
|
||||
|
||||
IFailureMonitor is used by load balancing, data distribution and other components
|
||||
|
|
|
@ -32,8 +32,6 @@
|
|||
|
||||
void forceLinkFlowTests() {}
|
||||
|
||||
using std::vector;
|
||||
|
||||
constexpr int firstLine = __LINE__;
|
||||
TEST_CASE("/flow/actorcompiler/lineNumbers") {
|
||||
loop {
|
||||
|
@ -438,9 +436,9 @@ TEST_CASE("/flow/flow/networked futures") {
|
|||
}
|
||||
|
||||
TEST_CASE("/flow/flow/quorum") {
|
||||
vector<Promise<int>> ps(5);
|
||||
vector<Future<int>> fs;
|
||||
vector<Future<Void>> qs;
|
||||
std::vector<Promise<int>> ps(5);
|
||||
std::vector<Future<int>> fs;
|
||||
std::vector<Future<Void>> qs;
|
||||
for (auto& p : ps)
|
||||
fs.push_back(p.getFuture());
|
||||
|
||||
|
@ -774,7 +772,7 @@ TEST_CASE("/flow/perf/yieldedFuture") {
|
|||
|
||||
Promise<Void> p;
|
||||
Future<Void> f = p.getFuture();
|
||||
vector<Future<Void>> ys;
|
||||
std::vector<Future<Void>> ys;
|
||||
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++)
|
||||
|
@ -879,8 +877,8 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
out[i] = oneWaitActor(pipe[i].getFuture());
|
||||
|
@ -893,8 +891,8 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
out[i] = oneWaitActor(pipe[i].getFuture());
|
||||
|
@ -955,8 +953,8 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
out[i] = chooseTwoActor(pipe[i].getFuture(), never);
|
||||
|
@ -969,8 +967,8 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
out[i] = chooseTwoActor(pipe[i].getFuture(), pipe[i].getFuture());
|
||||
|
@ -983,8 +981,8 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
out[i] = chooseTwoActor(chooseTwoActor(pipe[i].getFuture(), never), never);
|
||||
|
@ -1008,8 +1006,8 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
out[i] = oneWaitActor(chooseTwoActor(pipe[i].getFuture(), never));
|
||||
|
@ -1035,9 +1033,9 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out1(N);
|
||||
vector<Future<Void>> out2(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out1(N);
|
||||
std::vector<Future<Void>> out2(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
Future<Void> f = chooseTwoActor(pipe[i].getFuture(), never);
|
||||
|
@ -1052,9 +1050,9 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out1(N);
|
||||
vector<Future<Void>> out2(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out1(N);
|
||||
std::vector<Future<Void>> out2(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
Future<Void> f = chooseTwoActor(oneWaitActor(pipe[i].getFuture()), never);
|
||||
|
@ -1069,9 +1067,9 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
}
|
||||
|
||||
{
|
||||
vector<Promise<Void>> pipe(N);
|
||||
vector<Future<Void>> out1(N);
|
||||
vector<Future<Void>> out2(N);
|
||||
std::vector<Promise<Void>> pipe(N);
|
||||
std::vector<Future<Void>> out1(N);
|
||||
std::vector<Future<Void>> out2(N);
|
||||
start = timer();
|
||||
for (int i = 0; i < N; i++) {
|
||||
g_cheese = pipe[i].getFuture();
|
||||
|
@ -1101,8 +1099,8 @@ TEST_CASE("/flow/flow/perf/actor patterns") {
|
|||
|
||||
{
|
||||
start = timer();
|
||||
vector<Promise<Void>> ps(3);
|
||||
vector<Future<Void>> fs(3);
|
||||
std::vector<Promise<Void>> ps(3);
|
||||
std::vector<Future<Void>> fs(3);
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
ps.clear();
|
||||
|
|
|
@ -45,13 +45,9 @@
|
|||
|
||||
static NetworkAddressList g_currentDeliveryPeerAddress = NetworkAddressList();
|
||||
|
||||
constexpr UID WLTOKEN_ENDPOINT_NOT_FOUND(-1, 0);
|
||||
constexpr UID WLTOKEN_PING_PACKET(-1, 1);
|
||||
constexpr int PACKET_LEN_WIDTH = sizeof(uint32_t);
|
||||
const uint64_t TOKEN_STREAM_FLAG = 1;
|
||||
|
||||
static constexpr int WLTOKEN_COUNTS = 22; // number of wellKnownEndpoints
|
||||
|
||||
class EndpointMap : NonCopyable {
|
||||
public:
|
||||
// Reserve space for this many wellKnownEndpoints
|
||||
|
@ -97,7 +93,7 @@ void EndpointMap::realloc() {
|
|||
|
||||
void EndpointMap::insertWellKnown(NetworkMessageReceiver* r, const Endpoint::Token& token, TaskPriority priority) {
|
||||
int index = token.second();
|
||||
ASSERT(index <= WLTOKEN_COUNTS);
|
||||
ASSERT(index <= wellKnownEndpointCount);
|
||||
ASSERT(data[index].receiver == nullptr);
|
||||
data[index].receiver = r;
|
||||
data[index].token() =
|
||||
|
@ -196,7 +192,8 @@ void EndpointMap::remove(Endpoint::Token const& token, NetworkMessageReceiver* r
|
|||
|
||||
struct EndpointNotFoundReceiver final : NetworkMessageReceiver {
|
||||
EndpointNotFoundReceiver(EndpointMap& endpoints) {
|
||||
endpoints.insertWellKnown(this, WLTOKEN_ENDPOINT_NOT_FOUND, TaskPriority::DefaultEndpoint);
|
||||
endpoints.insertWellKnown(
|
||||
this, Endpoint::wellKnownToken(WLTOKEN_ENDPOINT_NOT_FOUND), TaskPriority::DefaultEndpoint);
|
||||
}
|
||||
|
||||
void receive(ArenaObjectReader& reader) override {
|
||||
|
@ -220,7 +217,7 @@ struct PingRequest {
|
|||
|
||||
struct PingReceiver final : NetworkMessageReceiver {
|
||||
PingReceiver(EndpointMap& endpoints) {
|
||||
endpoints.insertWellKnown(this, WLTOKEN_PING_PACKET, TaskPriority::ReadSocket);
|
||||
endpoints.insertWellKnown(this, Endpoint::wellKnownToken(WLTOKEN_PING_PACKET), TaskPriority::ReadSocket);
|
||||
}
|
||||
void receive(ArenaObjectReader& reader) override {
|
||||
PingRequest req;
|
||||
|
@ -234,7 +231,7 @@ struct PingReceiver final : NetworkMessageReceiver {
|
|||
|
||||
class TransportData {
|
||||
public:
|
||||
TransportData(uint64_t transportId);
|
||||
TransportData(uint64_t transportId, int maxWellKnownEndpoints);
|
||||
|
||||
~TransportData();
|
||||
|
||||
|
@ -341,8 +338,8 @@ ACTOR Future<Void> pingLatencyLogger(TransportData* self) {
|
|||
}
|
||||
}
|
||||
|
||||
TransportData::TransportData(uint64_t transportId)
|
||||
: warnAlwaysForLargePacket(true), endpoints(WLTOKEN_COUNTS), endpointNotFoundReceiver(endpoints),
|
||||
TransportData::TransportData(uint64_t transportId, int maxWellKnownEndpoints)
|
||||
: warnAlwaysForLargePacket(true), endpoints(maxWellKnownEndpoints), endpointNotFoundReceiver(endpoints),
|
||||
pingReceiver(endpoints), numIncompatibleConnections(0), lastIncompatibleMessage(0), transportId(transportId) {
|
||||
degraded = makeReference<AsyncVar<bool>>(false);
|
||||
pingLogger = pingLatencyLogger(this);
|
||||
|
@ -430,7 +427,7 @@ static ReliablePacket* sendPacket(TransportData* self,
|
|||
bool reliable);
|
||||
|
||||
ACTOR Future<Void> connectionMonitor(Reference<Peer> peer) {
|
||||
state Endpoint remotePingEndpoint({ peer->destination }, WLTOKEN_PING_PACKET);
|
||||
state Endpoint remotePingEndpoint({ peer->destination }, Endpoint::wellKnownToken(WLTOKEN_PING_PACKET));
|
||||
loop {
|
||||
if (!FlowTransport::isClient() && !peer->destination.isPublic() && peer->compatible) {
|
||||
// Don't send ping messages to clients unless necessary. Instead monitor incoming client pings.
|
||||
|
@ -961,13 +958,13 @@ ACTOR static void deliver(TransportData* self,
|
|||
if (self->isLocalAddress(destination.getPrimaryAddress())) {
|
||||
sendLocal(self,
|
||||
SerializeSource<UID>(destination.token),
|
||||
Endpoint(destination.addresses, WLTOKEN_ENDPOINT_NOT_FOUND));
|
||||
Endpoint::wellKnown(destination.addresses, WLTOKEN_ENDPOINT_NOT_FOUND));
|
||||
} else {
|
||||
Reference<Peer> peer = self->getOrOpenPeer(destination.getPrimaryAddress());
|
||||
sendPacket(self,
|
||||
peer,
|
||||
SerializeSource<UID>(destination.token),
|
||||
Endpoint(destination.addresses, WLTOKEN_ENDPOINT_NOT_FOUND),
|
||||
Endpoint::wellKnown(destination.addresses, WLTOKEN_ENDPOINT_NOT_FOUND),
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
@ -1421,7 +1418,8 @@ ACTOR static Future<Void> multiVersionCleanupWorker(TransportData* self) {
|
|||
}
|
||||
}
|
||||
|
||||
FlowTransport::FlowTransport(uint64_t transportId) : self(new TransportData(transportId)) {
|
||||
FlowTransport::FlowTransport(uint64_t transportId, int maxWellKnownEndpoints)
|
||||
: self(new TransportData(transportId, maxWellKnownEndpoints)) {
|
||||
self->multiVersionCleanup = multiVersionCleanupWorker(self);
|
||||
}
|
||||
|
||||
|
@ -1566,7 +1564,8 @@ static ReliablePacket* sendPacket(TransportData* self,
|
|||
|
||||
// If there isn't an open connection, a public address, or the peer isn't compatible, we can't send
|
||||
if (!peer || (peer->outgoingConnectionIdle && !destination.getPrimaryAddress().isPublic()) ||
|
||||
(peer->incompatibleProtocolVersionNewer && destination.token != WLTOKEN_PING_PACKET)) {
|
||||
(peer->incompatibleProtocolVersionNewer &&
|
||||
destination.token != Endpoint::wellKnownToken(WLTOKEN_PING_PACKET))) {
|
||||
TEST(true); // Can't send to private address without a compatible open connection
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -1651,7 +1650,7 @@ static ReliablePacket* sendPacket(TransportData* self,
|
|||
#endif
|
||||
|
||||
peer->send(pb, rp, firstUnsent);
|
||||
if (destination.token != WLTOKEN_PING_PACKET) {
|
||||
if (destination.token != Endpoint::wellKnownToken(WLTOKEN_PING_PACKET)) {
|
||||
peer->lastDataPacketSentTime = now();
|
||||
}
|
||||
return rp;
|
||||
|
@ -1716,8 +1715,9 @@ bool FlowTransport::incompatibleOutgoingConnectionsPresent() {
|
|||
return self->numIncompatibleConnections > 0;
|
||||
}
|
||||
|
||||
void FlowTransport::createInstance(bool isClient, uint64_t transportId) {
|
||||
g_network->setGlobal(INetwork::enFlowTransport, (flowGlobalType) new FlowTransport(transportId));
|
||||
void FlowTransport::createInstance(bool isClient, uint64_t transportId, int maxWellKnownEndpoints) {
|
||||
g_network->setGlobal(INetwork::enFlowTransport,
|
||||
(flowGlobalType) new FlowTransport(transportId, maxWellKnownEndpoints));
|
||||
g_network->setGlobal(INetwork::enNetworkAddressFunc, (flowGlobalType)&FlowTransport::getGlobalLocalAddress);
|
||||
g_network->setGlobal(INetwork::enNetworkAddressesFunc, (flowGlobalType)&FlowTransport::getGlobalLocalAddresses);
|
||||
g_network->setGlobal(INetwork::enFailureMonitor, (flowGlobalType) new SimpleFailureMonitor());
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include "flow/Net2Packet.h"
|
||||
#include "fdbrpc/ContinuousSample.h"
|
||||
|
||||
enum { WLTOKEN_ENDPOINT_NOT_FOUND = 0, WLTOKEN_PING_PACKET, WLTOKEN_FIRST_AVAILABLE };
|
||||
|
||||
#pragma pack(push, 4)
|
||||
class Endpoint {
|
||||
public:
|
||||
|
@ -46,6 +48,12 @@ public:
|
|||
choosePrimaryAddress();
|
||||
}
|
||||
|
||||
static Token wellKnownToken(int wlTokenID) { return UID(-1, wlTokenID); }
|
||||
|
||||
static Endpoint wellKnown(const NetworkAddressList& addresses, int wlTokenID) {
|
||||
return Endpoint(addresses, wellKnownToken(wlTokenID));
|
||||
}
|
||||
|
||||
void choosePrimaryAddress() {
|
||||
if (addresses.secondaryAddress.present() &&
|
||||
((!g_network->getLocalAddresses().secondaryAddress.present() &&
|
||||
|
@ -175,12 +183,12 @@ struct Peer : public ReferenceCounted<Peer> {
|
|||
|
||||
class FlowTransport {
|
||||
public:
|
||||
FlowTransport(uint64_t transportId);
|
||||
FlowTransport(uint64_t transportId, int maxWellKnownEndpoints);
|
||||
~FlowTransport();
|
||||
|
||||
// Creates a new FlowTransport and makes FlowTransport::transport() return it. This uses g_network->global()
|
||||
// variables, so it will be private to a simulation.
|
||||
static void createInstance(bool isClient, uint64_t transportId);
|
||||
static void createInstance(bool isClient, uint64_t transportId, int maxWellKnownEndpoints);
|
||||
|
||||
static bool isClient() { return g_network->global(INetwork::enClientFailureMonitor) != nullptr; }
|
||||
|
||||
|
|
|
@ -41,8 +41,6 @@
|
|||
#include "fdbrpc/TSSComparison.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using std::vector;
|
||||
|
||||
ACTOR Future<Void> allAlternativesFailedDelay(Future<Void> okFuture);
|
||||
|
||||
struct ModelHolder : NonCopyable, public ReferenceCounted<ModelHolder> {
|
||||
|
@ -609,7 +607,7 @@ Future<REPLY_TYPE(Request)> loadBalance(
|
|||
if (!stream && !firstRequestData.isValid()) {
|
||||
// Everything is down! Wait for someone to be up.
|
||||
|
||||
vector<Future<Void>> ok(alternatives->size());
|
||||
std::vector<Future<Void>> ok(alternatives->size());
|
||||
for (int i = 0; i < ok.size(); i++) {
|
||||
ok[i] = IFailureMonitor::failureMonitor().onStateEqual(alternatives->get(i, channel).getEndpoint(),
|
||||
FailureStatus(false));
|
||||
|
@ -769,7 +767,7 @@ Future<REPLY_TYPE(Request)> basicLoadBalance(Reference<ModelInterface<Multi>> al
|
|||
if (!stream) {
|
||||
// Everything is down! Wait for someone to be up.
|
||||
|
||||
vector<Future<Void>> ok(alternatives->size());
|
||||
std::vector<Future<Void>> ok(alternatives->size());
|
||||
for (int i = 0; i < ok.size(); i++) {
|
||||
ok[i] = IFailureMonitor::failureMonitor().onStateEqual(alternatives->get(i, channel).getEndpoint(),
|
||||
FailureStatus(false));
|
||||
|
|
|
@ -97,7 +97,7 @@ public:
|
|||
// If balanceOnRequests is true, the client will load balance based on the number of GRVs released by each proxy
|
||||
// If balanceOnRequests is false, the client will load balance based on the CPU usage of each proxy
|
||||
// Only requests which take from the GRV budget on the proxy should set balanceOnRequests to true
|
||||
ModelInterface(const vector<T>& v, bool balanceOnRequests) : balanceOnRequests(balanceOnRequests) {
|
||||
ModelInterface(const std::vector<T>& v, bool balanceOnRequests) : balanceOnRequests(balanceOnRequests) {
|
||||
for (int i = 0; i < v.size(); i++) {
|
||||
alternatives.push_back(AlternativeInfo(v[i], 1.0 / v.size(), (i + 1.0) / v.size()));
|
||||
}
|
||||
|
@ -174,14 +174,14 @@ public:
|
|||
std::string description() { return describe(alternatives); }
|
||||
|
||||
private:
|
||||
vector<AlternativeInfo<T>> alternatives;
|
||||
std::vector<AlternativeInfo<T>> alternatives;
|
||||
Future<Void> updater;
|
||||
bool balanceOnRequests;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
class MultiInterface : public ReferenceCounted<MultiInterface<T>> {
|
||||
MultiInterface(const vector<T>& v, LocalityData const& locality = LocalityData()) {
|
||||
MultiInterface(const std::vector<T>& v, LocalityData const& locality = LocalityData()) {
|
||||
// This version of MultInterface is no longer used, but was kept around because of templating
|
||||
ASSERT(false);
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@
|
|||
#include "flow/BooleanParam.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
using std::vector;
|
||||
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Averaged);
|
||||
|
||||
struct PerfMetric {
|
||||
|
@ -62,7 +60,7 @@ private:
|
|||
|
||||
struct PerfIntCounter {
|
||||
PerfIntCounter(std::string name) : name(name), value(0) {}
|
||||
PerfIntCounter(std::string name, vector<PerfIntCounter*>& v) : name(name), value(0) { v.push_back(this); }
|
||||
PerfIntCounter(std::string name, std::vector<PerfIntCounter*>& v) : name(name), value(0) { v.push_back(this); }
|
||||
void operator+=(int64_t delta) { value += delta; }
|
||||
void operator++() { value += 1; }
|
||||
PerfMetric getMetric() const { return PerfMetric(name, static_cast<double>(value), Averaged::False, "%.0lf"); }
|
||||
|
@ -76,7 +74,9 @@ private:
|
|||
|
||||
struct PerfDoubleCounter {
|
||||
PerfDoubleCounter(std::string name) : name(name), value(0) {}
|
||||
PerfDoubleCounter(std::string name, vector<PerfDoubleCounter*>& v) : name(name), value(0) { v.push_back(this); }
|
||||
PerfDoubleCounter(std::string name, std::vector<PerfDoubleCounter*>& v) : name(name), value(0) {
|
||||
v.push_back(this);
|
||||
}
|
||||
void operator+=(double delta) { value += delta; }
|
||||
void operator++() { value += 1.0; }
|
||||
PerfMetric getMetric() const { return PerfMetric(name, value, Averaged::False); }
|
||||
|
|
|
@ -28,16 +28,11 @@
|
|||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
|
||||
using std::vector;
|
||||
|
||||
void* allocateLargePages(int total);
|
||||
|
||||
bool testFuzzActor(Future<int> (*actor)(FutureStream<int> const&, PromiseStream<int> const&, Future<Void> const&),
|
||||
const char* desc,
|
||||
vector<int> const& expectedOutput) {
|
||||
std::vector<int> const& expectedOutput) {
|
||||
// Run the test 5 times with different "timing"
|
||||
int i, outCount;
|
||||
bool ok = true;
|
||||
|
@ -130,21 +125,21 @@ void memoryTest2() {
|
|||
for(int threads=1; threads<=MaxThreads; threads++) {
|
||||
double tstart = timer();
|
||||
|
||||
vector<ThreadFuture<Void>> done;
|
||||
std::vector<ThreadFuture<Void>> done;
|
||||
for(int t=0; t<threads; t++) {
|
||||
char** r = random + Reads*t;
|
||||
done.push_back(
|
||||
inThread<Void>( [r,Reads] () -> Void {
|
||||
for(int i=0; i<Reads; i++)
|
||||
if ( *r[i] )
|
||||
cout << "Does not happen" << endl;
|
||||
std::cout << "Does not happen" << std::endl;
|
||||
return Void();
|
||||
}));
|
||||
}
|
||||
waitForAll(done).getBlocking();
|
||||
double duration = timer() - tstart;
|
||||
|
||||
cout << format("%d threads: %f sec, %0.2fM/sec", threads, duration, Reads*threads/1e6/duration) << endl;
|
||||
std::cout << format("%d threads: %f sec, %0.2fM/sec", threads, duration, Reads*threads/1e6/duration) << std::endl;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -163,17 +158,17 @@ void memoryTest() {
|
|||
|
||||
const int N = 128<<20; // 128 = 1GB
|
||||
const int N2 = 8<<20;
|
||||
cout << "Preparing memory test with " << N / 1e6 * sizeof(void*) << " MB" << endl;
|
||||
std::cout << "Preparing memory test with " << N / 1e6 * sizeof(void*) << " MB" << std::endl;
|
||||
void **x;
|
||||
if (0) {
|
||||
cout << " NUMA large pages" << endl;
|
||||
std::cout << " NUMA large pages" << std::endl;
|
||||
x = (void**)numaAllocate(size_t(N)*sizeof(void*));
|
||||
} else if (1) {
|
||||
cout << " Normal pages" << endl;
|
||||
std::cout << " Normal pages" << std::endl;
|
||||
x = new void*[ N ];
|
||||
printf(" at %p\n", x);
|
||||
} else {
|
||||
cout << " Large pages" << endl;
|
||||
std::cout << " Large pages" << std::endl;
|
||||
x = (void**)allocate(N*sizeof(void*), true);
|
||||
}
|
||||
memset(x, 0, ((int64_t)N) * sizeof(void*));
|
||||
|
@ -181,7 +176,7 @@ void memoryTest() {
|
|||
showNumaStatus();
|
||||
|
||||
if (1) {
|
||||
cout <<" Random permutation" << endl;
|
||||
std::cout <<" Random permutation" << std::endl;
|
||||
// Random cyclic permutation
|
||||
for(int i=0; i<N; i++)
|
||||
x[i] = &x[i];
|
||||
|
@ -191,7 +186,7 @@ void memoryTest() {
|
|||
std::swap( x[k], x[n] );
|
||||
}
|
||||
} else {
|
||||
cout <<" Sequential permutation" << endl;
|
||||
std::cout <<" Sequential permutation" << std::endl;
|
||||
// Sequential
|
||||
for(int i=0; i<N-1; i++)
|
||||
x[i] = &x[i+1];
|
||||
|
@ -201,7 +196,7 @@ void memoryTest() {
|
|||
for(int i=0; i<N; i++) {
|
||||
p = (void**)*p;
|
||||
if (p == x) {
|
||||
cout << "Cycle " << i << endl;
|
||||
std::cout << "Cycle " << i << std::endl;
|
||||
if (i != N-1) terminate();
|
||||
}
|
||||
}
|
||||
|
@ -217,7 +212,7 @@ void memoryTest() {
|
|||
}
|
||||
for(int T=1; T<=MT; T+=T) {
|
||||
double start = timer();
|
||||
vector< Future<double> > done;
|
||||
std::vector< Future<double> > done;
|
||||
for(int t=0; t<T; t++) {
|
||||
void*** start = starts + t*TraversalsPerThread;
|
||||
done.push_back(
|
||||
|
@ -233,7 +228,7 @@ void memoryTest() {
|
|||
}
|
||||
for(int j=0; j<TraversalsPerThread; j++)
|
||||
if (p[j] == p[(j+1)%TraversalsPerThread])
|
||||
cout << "N";
|
||||
std::cout << "N";
|
||||
return timer();
|
||||
}));
|
||||
}
|
||||
|
@ -262,14 +257,14 @@ ACTOR template <int N, class X>
|
|||
ACTOR template <class A, class B>
|
||||
[[flow_allow_discard]] Future<Void> switchTest(FutureStream<A> as, Future<B> oneb) {
|
||||
loop choose {
|
||||
when(A a = waitNext(as)) { cout << "A " << a << endl; }
|
||||
when(A a = waitNext(as)) { std::cout << "A " << a << std::endl; }
|
||||
when(B b = wait(oneb)) {
|
||||
cout << "B " << b << endl;
|
||||
std::cout << "B " << b << std::endl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
loop {
|
||||
cout << "Done!" << endl;
|
||||
std::cout << "Done!" << std::endl;
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
|
@ -287,7 +282,7 @@ public:
|
|||
}
|
||||
#if !defined(__INTEL_COMPILER)
|
||||
void operator delete(void* buf) {
|
||||
cout << "Freeing buffer" << endl;
|
||||
std::cout << "Freeing buffer" << std::endl;
|
||||
delete[](int*) buf;
|
||||
}
|
||||
#endif
|
||||
|
@ -344,12 +339,12 @@ void fastAllocTest() {
|
|||
}
|
||||
std::sort(d.begin(), d.end());
|
||||
if (std::unique(d.begin(), d.end()) != d.end())
|
||||
cout << "Pointer returned twice!?" << endl;
|
||||
std::cout << "Pointer returned twice!?" << std::endl;
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
void* p = FastAllocator<64>::allocate();
|
||||
void* q = FastAllocator<64>::allocate();
|
||||
cout << (intptr_t)p << " " << (intptr_t)q << endl;
|
||||
std::cout << (intptr_t)p << " " << (intptr_t)q << std::endl;
|
||||
FastAllocator<64>::release(p);
|
||||
FastAllocator<64>::release(q);
|
||||
}
|
||||
|
@ -358,13 +353,13 @@ void fastAllocTest() {
|
|||
for (int i = 0; i < 1000000; i++)
|
||||
(void)FastAllocator<64>::allocate();
|
||||
t = timer() - t;
|
||||
cout << "Allocations: " << (1 / t) << "M/sec" << endl;
|
||||
std::cout << "Allocations: " << (1 / t) << "M/sec" << std::endl;
|
||||
|
||||
t = timer();
|
||||
for (int i = 0; i < 1000000; i++)
|
||||
FastAllocator<64>::release(FastAllocator<64>::allocate());
|
||||
t = timer() - t;
|
||||
cout << "Allocate/Release pairs: " << (1 / t) << "M/sec" << endl;
|
||||
std::cout << "Allocate/Release pairs: " << (1 / t) << "M/sec" << std::endl;
|
||||
|
||||
t = timer();
|
||||
void* pp[100];
|
||||
|
@ -375,13 +370,13 @@ void fastAllocTest() {
|
|||
FastAllocator<64>::release(pp[j]);
|
||||
}
|
||||
t = timer() - t;
|
||||
cout << "Allocate/Release interleaved(100): " << (1 / t) << "M/sec" << endl;
|
||||
std::cout << "Allocate/Release interleaved(100): " << (1 / t) << "M/sec" << std::endl;
|
||||
|
||||
t = timer();
|
||||
for (int i = 0; i < 1000000; i++)
|
||||
delete new TestB;
|
||||
t = timer() - t;
|
||||
cout << "Allocate/Release TestB pairs: " << (1 / t) << "M/sec" << endl;
|
||||
std::cout << "Allocate/Release TestB pairs: " << (1 / t) << "M/sec" << std::endl;
|
||||
|
||||
#if FLOW_THREAD_SAFE
|
||||
t = timer();
|
||||
|
@ -399,8 +394,8 @@ void fastAllocTest() {
|
|||
}));
|
||||
waitForAll(results).getBlocking();
|
||||
t = timer() - t;
|
||||
cout << "Threaded Allocate/Release TestB interleaved (100): " << results.size() << " x " << (1 / t) << "M/sec"
|
||||
<< endl;
|
||||
std::cout << "Threaded Allocate/Release TestB interleaved (100): " << results.size() << " x " << (1 / t) << "M/sec"
|
||||
<< std::endl;
|
||||
#endif
|
||||
|
||||
volatile int32_t v = 0;
|
||||
|
@ -409,7 +404,7 @@ void fastAllocTest() {
|
|||
for (int i = 0; i < 10000000; i++)
|
||||
interlockedIncrement(&v);
|
||||
t = timer() - t;
|
||||
cout << "interlocked increment: " << 10.0 / t << "M/sec " << v << endl;
|
||||
std::cout << "interlocked increment: " << 10.0 / t << "M/sec " << v << std::endl;
|
||||
|
||||
v = 5;
|
||||
t = timer();
|
||||
|
@ -417,14 +412,14 @@ void fastAllocTest() {
|
|||
interlockedCompareExchange(&v, 5, 5);
|
||||
}
|
||||
t = timer() - t;
|
||||
cout << "1 state machine: " << 10.0 / t << "M/sec " << v << endl;
|
||||
std::cout << "1 state machine: " << 10.0 / t << "M/sec " << v << std::endl;
|
||||
|
||||
v = 0;
|
||||
t = timer();
|
||||
for (int i = 0; i < 10000000; i++)
|
||||
v++;
|
||||
t = timer() - t;
|
||||
cout << "volatile increment: " << 10.0 / t << "M/sec " << v << endl;
|
||||
std::cout << "volatile increment: " << 10.0 / t << "M/sec " << v << std::endl;
|
||||
|
||||
{
|
||||
Reference<TestBuffer> b(TestBuffer::create(1000));
|
||||
|
@ -436,14 +431,14 @@ void fastAllocTest() {
|
|||
b = std::move(r);
|
||||
}
|
||||
t = timer() - t;
|
||||
cout << "move Reference<Buffer>: " << 10.0 / t << "M/sec " << endl;
|
||||
std::cout << "move Reference<Buffer>: " << 10.0 / t << "M/sec " << std::endl;
|
||||
|
||||
t = timer();
|
||||
for (int i = 0; i < 10000000; i++) {
|
||||
Reference<TestBuffer> r = b;
|
||||
}
|
||||
t = timer() - t;
|
||||
cout << "copy (1) Reference<Buffer>: " << 10.0 / t << "M/sec " << endl;
|
||||
std::cout << "copy (1) Reference<Buffer>: " << 10.0 / t << "M/sec " << std::endl;
|
||||
|
||||
Reference<TestBuffer> c = b;
|
||||
t = timer();
|
||||
|
@ -451,27 +446,27 @@ void fastAllocTest() {
|
|||
Reference<TestBuffer> r = b;
|
||||
}
|
||||
t = timer() - t;
|
||||
cout << "copy (2) Reference<Buffer>: " << 10.0 / t << "M/sec " << endl;
|
||||
std::cout << "copy (2) Reference<Buffer>: " << 10.0 / t << "M/sec " << std::endl;
|
||||
|
||||
cout << (const char*)b->begin() << endl;
|
||||
std::cout << (const char*)b->begin() << std::endl;
|
||||
}
|
||||
t = timer();
|
||||
for (int i = 0; i < 10000000; i++) {
|
||||
delete new FastKey;
|
||||
}
|
||||
t = timer() - t;
|
||||
cout << "delete new FastKey: " << 10.0 / t << "M/sec " << fastKeyCount << endl;
|
||||
std::cout << "delete new FastKey: " << 10.0 / t << "M/sec " << fastKeyCount << std::endl;
|
||||
|
||||
t = timer();
|
||||
for (int i = 0; i < 10000000; i++) {
|
||||
Reference<FastKey> r(new FastKey);
|
||||
}
|
||||
t = timer() - t;
|
||||
cout << "new Reference<FastKey>: " << 10.0 / t << "M/sec " << fastKeyCount << endl;
|
||||
std::cout << "new Reference<FastKey>: " << 10.0 / t << "M/sec " << fastKeyCount << std::endl;
|
||||
}
|
||||
|
||||
template <class PromiseT>
|
||||
Future<Void> threadSafetySender(vector<PromiseT>& v, Event& start, Event& ready, int iterations) {
|
||||
Future<Void> threadSafetySender(std::vector<PromiseT>& v, Event& start, Event& ready, int iterations) {
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
start.block();
|
||||
if (v.size() == 0)
|
||||
|
@ -500,7 +495,7 @@ void threadSafetyTest() {
|
|||
|
||||
int N = 10000, V = 100;
|
||||
|
||||
vector<Promise<Void>> v;
|
||||
std::vector<Promise<Void>> v;
|
||||
Event start, ready;
|
||||
Future<Void> sender = inThread<Void>( [&] { return threadSafetySender( v, start, ready, N ); } );
|
||||
|
||||
|
@ -508,7 +503,7 @@ void threadSafetyTest() {
|
|||
v.clear();
|
||||
for (int j = 0; j < V; j++)
|
||||
v.push_back(Promise<Void>());
|
||||
vector<Future<Void>> f( v.size() );
|
||||
std::vector<Future<Void>> f( v.size() );
|
||||
for(int i=0; i<v.size(); i++)
|
||||
f[i] = v[i].getFuture();
|
||||
std::random_shuffle( f.begin(), f.end() );
|
||||
|
@ -520,11 +515,11 @@ void threadSafetyTest() {
|
|||
ready.block();
|
||||
|
||||
if (count != V)
|
||||
cout << "Thread safety error: " << count << endl;
|
||||
std::cout << "Thread safety error: " << count << std::endl;
|
||||
}
|
||||
|
||||
t = timer()-t;
|
||||
cout << "Thread safety test (2t): " << (V*N/1e6/t) << "M/sec" << endl;
|
||||
std::cout << "Thread safety test (2t): " << (V*N/1e6/t) << "M/sec" << std::endl;
|
||||
}
|
||||
|
||||
void threadSafetyTest2() {
|
||||
|
@ -532,16 +527,16 @@ void threadSafetyTest2() {
|
|||
|
||||
int N = 1000, V = 100;
|
||||
|
||||
// vector<PromiseStream<Void>> streams( 100 );
|
||||
vector<PromiseStream<Void>> streams;
|
||||
// std::vector<PromiseStream<Void>> streams( 100 );
|
||||
std::vector<PromiseStream<Void>> streams;
|
||||
for (int i = 0; i < 100; i++)
|
||||
streams.push_back(PromiseStream<Void>());
|
||||
vector<PromiseStream<Void>> v;
|
||||
std::vector<PromiseStream<Void>> v;
|
||||
Event start, ready;
|
||||
Future<Void> sender = inThread<Void>( [&] { return threadSafetySender( v, start, ready, N ); } );
|
||||
|
||||
for(int i=0; i<N; i++) {
|
||||
vector<int> counts( streams.size() );
|
||||
std::vector<int> counts( streams.size() );
|
||||
v.clear();
|
||||
for(int k=0; k<V; k++) {
|
||||
int i = deterministicRandom()->randomInt(0, (int)streams.size());
|
||||
|
@ -556,11 +551,11 @@ void threadSafetyTest2() {
|
|||
ready.block();
|
||||
|
||||
if (count != V)
|
||||
cout << "Thread safety error: " << count << endl;
|
||||
std::cout << "Thread safety error: " << count << std::endl;
|
||||
}
|
||||
|
||||
t = timer()-t;
|
||||
cout << "Thread safety test 2 (2t): " << (V*N/1e6/t) << "M/sec" << endl;
|
||||
std::cout << "Thread safety test 2 (2t): " << (V*N/1e6/t) << "M/sec" << std::endl;
|
||||
}
|
||||
|
||||
volatile int32_t cancelled = 0, returned = 0;
|
||||
|
@ -579,8 +574,8 @@ ACTOR [[flow_allow_discard]] Future<Void> returnCancelRacer( Future<Void> f ) {
|
|||
void returnCancelRaceTest() {
|
||||
int N = 100, M = 100;
|
||||
for(int i=0; i<N; i++) {
|
||||
vector< Promise<Void> > promises;
|
||||
vector< Future<Void> > futures;
|
||||
std::vector< Promise<Void> > promises;
|
||||
std::vector< Future<Void> > futures;
|
||||
for(int i=0; i < M; i++) {
|
||||
promises.push_back( Promise<Void>() );
|
||||
futures.push_back( returnCancelRacer( promises.back().getFuture() ) );
|
||||
|
@ -654,8 +649,8 @@ void arenaTest() {
|
|||
|
||||
for (auto i = test.begin(); i != test.end(); ++i)
|
||||
for (auto j = i->begin(); j != i->end(); ++j)
|
||||
cout << *j;
|
||||
cout << endl;
|
||||
std::cout << *j;
|
||||
std::cout << std::endl;
|
||||
|
||||
wr << test;
|
||||
}
|
||||
|
@ -667,8 +662,8 @@ void arenaTest() {
|
|||
|
||||
for (auto i = test2.begin(); i != test2.end(); ++i)
|
||||
for (auto j = i->begin(); j != i->end(); ++j)
|
||||
cout << *j;
|
||||
cout << endl;
|
||||
std::cout << *j;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
double t = timer();
|
||||
|
@ -686,7 +681,7 @@ void arenaTest() {
|
|||
ACTOR [[flow_allow_discard]] void testStream(FutureStream<int> xs) {
|
||||
loop {
|
||||
int x = waitNext(xs);
|
||||
cout << x << endl;
|
||||
std::cout << x << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -947,10 +942,8 @@ ACTOR [[flow_allow_discard]] Future<Void> cancellable2() {
|
|||
}
|
||||
}
|
||||
|
||||
using std::string;
|
||||
|
||||
ACTOR [[flow_allow_discard]] Future<int> introLoadValueFromDisk(Future<string> filename) {
|
||||
string file = wait(filename);
|
||||
ACTOR [[flow_allow_discard]] Future<int> introLoadValueFromDisk(Future<std::string> filename) {
|
||||
std::string file = wait(filename);
|
||||
|
||||
if (file == "/dev/threes")
|
||||
return 3;
|
||||
|
@ -1075,7 +1068,7 @@ ACTOR [[flow_allow_discard]] void cycle(FutureStream<Void> in, PromiseStream<Voi
|
|||
}
|
||||
|
||||
ACTOR [[flow_allow_discard]] Future<Void> cycleTime(int nodes, int times) {
|
||||
state vector<PromiseStream<Void>> n(nodes);
|
||||
state std::vector<PromiseStream<Void>> n(nodes);
|
||||
state int total = 0;
|
||||
|
||||
// 1->2, 2->3, ..., n-1->0
|
||||
|
@ -1361,14 +1354,14 @@ void dsltest() {
|
|||
Future<int> c = chooseTest(a.getFuture(), b.getFuture());
|
||||
a.send(1);
|
||||
b.send(2);
|
||||
cout << "c=" << c.get() << endl;
|
||||
std::cout << "c=" << c.get() << std::endl;
|
||||
}
|
||||
|
||||
{
|
||||
Promise<double> i;
|
||||
Future<double> d = addN<20>(i.getFuture());
|
||||
i.send(1.1);
|
||||
cout << d.get() << endl;
|
||||
std::cout << d.get() << std::endl;
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1376,9 +1369,9 @@ void dsltest() {
|
|||
i.sendError(operation_failed());
|
||||
Future<double> d = addN<20>(i.getFuture());
|
||||
if (d.isError() && d.getError().code() == error_code_operation_failed)
|
||||
cout << "Error transmitted OK" << endl;
|
||||
std::cout << "Error transmitted OK" << std::endl;
|
||||
else
|
||||
cout << "Error not transmitted!" << endl;
|
||||
std::cout << "Error not transmitted!" << std::endl;
|
||||
}
|
||||
|
||||
/*{
|
||||
|
@ -1386,10 +1379,10 @@ void dsltest() {
|
|||
PromiseStream<int> t;
|
||||
testStream(t.getFuture());
|
||||
if (Actor::allActors.size() != na+1)
|
||||
cout << "Actor not created!" << endl;
|
||||
std::cout << "Actor not created!" << std::endl;
|
||||
t = PromiseStream<int>();
|
||||
if (Actor::allActors.size() != na)
|
||||
cout << "Actor not cleaned up!" << endl;
|
||||
std::cout << "Actor not cleaned up!" << std::endl;
|
||||
}*/
|
||||
|
||||
PromiseStream<int> as;
|
||||
|
@ -1439,7 +1432,7 @@ void pingtest() {
|
|||
Future<Void> pS = pingServer( serverInterface.getFuture(), 5000000 );
|
||||
Future<int> count = ping( serverInterface );
|
||||
double end = timer();
|
||||
cout << count.get() << " pings completed in " << (end-start) << " sec" << endl;
|
||||
std::cout << count.get() << " pings completed in " << (end-start) << " sec" << std::endl;
|
||||
}*/
|
||||
|
||||
void copyTest() {
|
||||
|
@ -1495,12 +1488,12 @@ void copyTest() {
|
|||
|
||||
loop choose {
|
||||
when( int j = waitNext( js.getFuture() ) ) {
|
||||
cout << "J" << j << endl;
|
||||
std::cout << "J" << j << std::endl;
|
||||
}
|
||||
when( int i = waitNext( is ) ) {
|
||||
cout << "I" << i << endl;
|
||||
std::cout << "I" << i << std::endl;
|
||||
js.send( i );
|
||||
cout << "-I" << i << endl;
|
||||
std::cout << "-I" << i << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -255,6 +255,7 @@ void setReplyPriority(const ReplyPromise<Reply>& p, TaskPriority taskID) {
|
|||
|
||||
struct ReplyPromiseStreamReply {
|
||||
Optional<UID> acknowledgeToken;
|
||||
uint16_t sequence;
|
||||
ReplyPromiseStreamReply() {}
|
||||
};
|
||||
|
||||
|
@ -277,15 +278,15 @@ struct AcknowledgementReceiver final : FlowReceiver, FastAllocated<Acknowledgeme
|
|||
using FastAllocated<AcknowledgementReceiver>::operator new;
|
||||
using FastAllocated<AcknowledgementReceiver>::operator delete;
|
||||
|
||||
int64_t bytesSent;
|
||||
int64_t bytesAcknowledged;
|
||||
int64_t bytesLimit;
|
||||
uint16_t sequence = 0;
|
||||
int64_t bytesSent = 0;
|
||||
int64_t bytesAcknowledged = 0;
|
||||
int64_t bytesLimit = 0;
|
||||
Promise<Void> ready;
|
||||
Future<Void> failures;
|
||||
|
||||
AcknowledgementReceiver() : bytesSent(0), bytesAcknowledged(0), bytesLimit(0), ready(nullptr) {}
|
||||
AcknowledgementReceiver(const Endpoint& remoteEndpoint)
|
||||
: FlowReceiver(remoteEndpoint, false), bytesSent(0), bytesAcknowledged(0), bytesLimit(0), ready(nullptr) {}
|
||||
AcknowledgementReceiver() : ready(nullptr) {}
|
||||
AcknowledgementReceiver(const Endpoint& remoteEndpoint) : FlowReceiver(remoteEndpoint, false), ready(nullptr) {}
|
||||
|
||||
void receive(ArenaObjectReader& reader) override {
|
||||
ErrorOr<AcknowledgementReply> message;
|
||||
|
@ -353,20 +354,29 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
|
|||
acknowledgements = AcknowledgementReceiver(
|
||||
FlowTransport::transport().loadedEndpoint(message.get().asUnderlyingType().acknowledgeToken.get()));
|
||||
}
|
||||
if (this->shouldFireImmediately()) {
|
||||
// This message is going to be consumed by the client immediately (and therefore will not call pop()) so
|
||||
// send an ack immediately
|
||||
if (acknowledgements.getRawEndpoint().isValid()) {
|
||||
acknowledgements.bytesAcknowledged += message.get().asUnderlyingType().expectedSize();
|
||||
FlowTransport::transport().sendUnreliable(
|
||||
SerializeSource<ErrorOr<AcknowledgementReply>>(
|
||||
AcknowledgementReply(acknowledgements.bytesAcknowledged)),
|
||||
acknowledgements.getEndpoint(TaskPriority::ReadSocket),
|
||||
false);
|
||||
if (acknowledgements.sequence != message.get().asUnderlyingType().sequence) {
|
||||
TraceEvent(SevError, "StreamSequenceMismatch")
|
||||
.detail("Expected", acknowledgements.sequence)
|
||||
.detail("Actual", message.get().asUnderlyingType().sequence);
|
||||
ASSERT_WE_THINK(false);
|
||||
this->sendError(connection_failed());
|
||||
} else {
|
||||
acknowledgements.sequence++;
|
||||
if (this->shouldFireImmediately()) {
|
||||
// This message is going to be consumed by the client immediately (and therefore will not call
|
||||
// pop()) so send an ack immediately
|
||||
if (acknowledgements.getRawEndpoint().isValid()) {
|
||||
acknowledgements.bytesAcknowledged += message.get().asUnderlyingType().expectedSize();
|
||||
FlowTransport::transport().sendUnreliable(
|
||||
SerializeSource<ErrorOr<AcknowledgementReply>>(
|
||||
AcknowledgementReply(acknowledgements.bytesAcknowledged)),
|
||||
acknowledgements.getEndpoint(TaskPriority::ReadSocket),
|
||||
false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->send(std::move(message.get().asUnderlyingType()));
|
||||
this->send(std::move(message.get().asUnderlyingType()));
|
||||
}
|
||||
}
|
||||
this->delPromiseRef();
|
||||
}
|
||||
|
@ -413,10 +423,14 @@ public:
|
|||
template <class U>
|
||||
void send(U&& value) const {
|
||||
if (queue->isRemoteEndpoint()) {
|
||||
if (queue->acknowledgements.failures.isError()) {
|
||||
throw queue->acknowledgements.failures.getError();
|
||||
}
|
||||
if (!queue->acknowledgements.getRawEndpoint().isValid()) {
|
||||
// register acknowledge receiver on sender and tell the receiver where to send acknowledge messages
|
||||
value.acknowledgeToken = queue->acknowledgements.getEndpoint(TaskPriority::ReadSocket).token;
|
||||
}
|
||||
value.sequence = queue->acknowledgements.sequence++;
|
||||
queue->acknowledgements.bytesSent += value.expectedSize();
|
||||
FlowTransport::transport().sendUnreliable(
|
||||
SerializeSource<ErrorOr<EnsureTable<T>>>(value), getEndpoint(), false);
|
||||
|
@ -782,8 +796,9 @@ public:
|
|||
const Endpoint& getEndpoint(TaskPriority taskID = TaskPriority::DefaultEndpoint) const {
|
||||
return queue->getEndpoint(taskID);
|
||||
}
|
||||
void makeWellKnownEndpoint(Endpoint::Token token, TaskPriority taskID) {
|
||||
queue->makeWellKnownEndpoint(token, taskID);
|
||||
|
||||
void makeWellKnownEndpoint(uint64_t wlTokenID, TaskPriority taskID) {
|
||||
queue->makeWellKnownEndpoint(Endpoint::Token(-1, wlTokenID), taskID);
|
||||
}
|
||||
|
||||
bool operator==(const RequestStream<T>& rhs) const { return queue == rhs.queue; }
|
||||
|
|
|
@ -154,7 +154,7 @@ private:
|
|||
|
||||
KeyRef end = keyInfo->rangeContaining(k).end();
|
||||
KeyRangeRef insertRange(k, end);
|
||||
vector<UID> src, dest;
|
||||
std::vector<UID> src, dest;
|
||||
// txnStateStore is always an in-memory KVS, and must always be recovered before
|
||||
// applyMetadataMutations is called, so a wait here should never be needed.
|
||||
Future<RangeResult> fResult = txnStateStore->readRange(serverTagKeys);
|
||||
|
@ -261,7 +261,7 @@ private:
|
|||
}
|
||||
if (k != allKeys.end) {
|
||||
KeyRef end = cacheInfo->rangeContaining(k).end();
|
||||
vector<uint16_t> serverIndices;
|
||||
std::vector<uint16_t> serverIndices;
|
||||
decodeStorageCacheValue(m.param2, serverIndices);
|
||||
cacheInfo->insert(KeyRangeRef(k, end), serverIndices.size() > 0);
|
||||
}
|
||||
|
@ -904,7 +904,7 @@ private:
|
|||
|
||||
std::map<KeyRef, MutationRef>::iterator itr;
|
||||
KeyRef keyBegin, keyEnd;
|
||||
vector<uint16_t> serverIndices;
|
||||
std::vector<uint16_t> serverIndices;
|
||||
MutationRef mutationBegin, mutationEnd;
|
||||
|
||||
for (itr = cachedRangeInfo.begin(); itr != cachedRangeInfo.end(); ++itr) {
|
||||
|
|
|
@ -43,7 +43,7 @@ public:
|
|||
// savedVersion is used.
|
||||
void addBackupStatus(const WorkerBackupStatus& status);
|
||||
|
||||
// Returns a map of tuple<Epoch, endVersion, logRouterTags> : map<tag, savedVersion>, so that
|
||||
// Returns a map of tuple<Epoch, endVersion, logRouterTags> : std::map<tag, savedVersion>, so that
|
||||
// the backup range should be [savedVersion + 1, endVersion) for the "tag" of the "Epoch".
|
||||
//
|
||||
// Specifically, the backup ranges for each old epoch are:
|
||||
|
|
|
@ -231,6 +231,7 @@ set(FDBSERVER_SRCS
|
|||
workloads/SlowTaskWorkload.actor.cpp
|
||||
workloads/SnapTest.actor.cpp
|
||||
workloads/SpecialKeySpaceCorrectness.actor.cpp
|
||||
workloads/StreamingRangeRead.actor.cpp
|
||||
workloads/StatusWorkload.actor.cpp
|
||||
workloads/Storefront.actor.cpp
|
||||
workloads/StreamingRead.actor.cpp
|
||||
|
|
|
@ -53,6 +53,12 @@
|
|||
|
||||
void failAfter(Future<Void> trigger, Endpoint e);
|
||||
|
||||
// This is used to artificially amplify the used count for processes
|
||||
// occupied by non-singletons. This ultimately makes it less desirable
|
||||
// for singletons to use those processes as well. This constant should
|
||||
// be increased if we ever have more than 100 singletons (unlikely).
|
||||
static const int PID_USED_AMP_FOR_NON_SINGLETON = 100;
|
||||
|
||||
struct WorkerInfo : NonCopyable {
|
||||
Future<Void> watcher;
|
||||
ReplyPromise<RegisterWorkerReply> reply;
|
||||
|
@ -282,7 +288,7 @@ public:
|
|||
DatabaseConfiguration const& conf,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
Optional<Optional<Standalone<StringRef>>> const& dcId = Optional<Optional<Standalone<StringRef>>>()) {
|
||||
std::map<ProcessClass::Fitness, vector<WorkerDetails>> fitness_workers;
|
||||
std::map<ProcessClass::Fitness, std::vector<WorkerDetails>> fitness_workers;
|
||||
std::vector<WorkerDetails> results;
|
||||
Reference<LocalitySet> logServerSet = Reference<LocalitySet>(new LocalityMap<WorkerDetails>());
|
||||
LocalityMap<WorkerDetails>* logServerMap = (LocalityMap<WorkerDetails>*)logServerSet.getPtr();
|
||||
|
@ -506,7 +512,7 @@ public:
|
|||
bool checkStable,
|
||||
const std::set<Optional<Key>>& dcIds,
|
||||
const std::vector<UID>& exclusionWorkerIds) {
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool>, vector<WorkerDetails>> fitness_workers;
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool>, std::vector<WorkerDetails>> fitness_workers;
|
||||
|
||||
// Go through all the workers to list all the workers that can be recruited.
|
||||
for (const auto& [worker_process_id, worker_info] : id_worker) {
|
||||
|
@ -751,7 +757,7 @@ public:
|
|||
bool checkStable,
|
||||
const std::set<Optional<Key>>& dcIds,
|
||||
const std::vector<UID>& exclusionWorkerIds) {
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, bool, bool>, vector<WorkerDetails>> fitness_workers;
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, bool, bool>, std::vector<WorkerDetails>> fitness_workers;
|
||||
|
||||
// Go through all the workers to list all the workers that can be recruited.
|
||||
for (const auto& [worker_process_id, worker_info] : id_worker) {
|
||||
|
@ -888,7 +894,7 @@ public:
|
|||
bool checkStable = false,
|
||||
const std::set<Optional<Key>>& dcIds = std::set<Optional<Key>>(),
|
||||
const std::vector<UID>& exclusionWorkerIds = {}) {
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, bool>, vector<WorkerDetails>> fitness_workers;
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, bool>, std::vector<WorkerDetails>> fitness_workers;
|
||||
std::vector<WorkerDetails> results;
|
||||
Reference<LocalitySet> logServerSet = Reference<LocalitySet>(new LocalityMap<WorkerDetails>());
|
||||
LocalityMap<WorkerDetails>* logServerMap = (LocalityMap<WorkerDetails>*)logServerSet.getPtr();
|
||||
|
@ -1341,7 +1347,7 @@ public:
|
|||
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
||||
std::map<Optional<Standalone<StringRef>>, int> preferredSharing = {},
|
||||
bool checkStable = false) {
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, int>, vector<WorkerDetails>> fitness_workers;
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, int>, std::vector<WorkerDetails>> fitness_workers;
|
||||
|
||||
for (auto& it : id_worker) {
|
||||
auto fitness = it.second.details.processClass.machineClassFitness(role);
|
||||
|
@ -1371,7 +1377,7 @@ public:
|
|||
throw no_more_servers();
|
||||
}
|
||||
|
||||
vector<WorkerDetails> getWorkersForRoleInDatacenter(
|
||||
std::vector<WorkerDetails> getWorkersForRoleInDatacenter(
|
||||
Optional<Standalone<StringRef>> const& dcId,
|
||||
ProcessClass::ClusterRole role,
|
||||
int amount,
|
||||
|
@ -1380,8 +1386,8 @@ public:
|
|||
std::map<Optional<Standalone<StringRef>>, int> preferredSharing = {},
|
||||
Optional<WorkerFitnessInfo> minWorker = Optional<WorkerFitnessInfo>(),
|
||||
bool checkStable = false) {
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, int>, vector<WorkerDetails>> fitness_workers;
|
||||
vector<WorkerDetails> results;
|
||||
std::map<std::tuple<ProcessClass::Fitness, int, bool, int>, std::vector<WorkerDetails>> fitness_workers;
|
||||
std::vector<WorkerDetails> results;
|
||||
if (minWorker.present()) {
|
||||
results.push_back(minWorker.get().worker);
|
||||
}
|
||||
|
@ -1444,7 +1450,7 @@ public:
|
|||
: bestFit(ProcessClass::NeverAssign), worstFit(ProcessClass::NeverAssign), role(ProcessClass::NoRole),
|
||||
count(0) {}
|
||||
|
||||
RoleFitness(const vector<WorkerDetails>& workers,
|
||||
RoleFitness(const std::vector<WorkerDetails>& workers,
|
||||
ProcessClass::ClusterRole role,
|
||||
const std::map<Optional<Standalone<StringRef>>, int>& id_used)
|
||||
: role(role) {
|
||||
|
@ -1771,7 +1777,7 @@ public:
|
|||
try {
|
||||
auto reply = findWorkersForConfigurationFromDC(req, regions[0].dcId);
|
||||
setPrimaryDesired = true;
|
||||
vector<Optional<Key>> dcPriority;
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(regions[0].dcId);
|
||||
dcPriority.push_back(regions[1].dcId);
|
||||
desiredDcIds.set(dcPriority);
|
||||
|
@ -1798,7 +1804,7 @@ public:
|
|||
.error(e);
|
||||
auto reply = findWorkersForConfigurationFromDC(req, regions[1].dcId);
|
||||
if (!setPrimaryDesired) {
|
||||
vector<Optional<Key>> dcPriority;
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(regions[1].dcId);
|
||||
dcPriority.push_back(regions[0].dcId);
|
||||
desiredDcIds.set(dcPriority);
|
||||
|
@ -1811,7 +1817,7 @@ public:
|
|||
throw;
|
||||
}
|
||||
} else if (req.configuration.regions.size() == 1) {
|
||||
vector<Optional<Key>> dcPriority;
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(req.configuration.regions[0].dcId);
|
||||
desiredDcIds.set(dcPriority);
|
||||
auto reply = findWorkersForConfigurationFromDC(req, req.configuration.regions[0].dcId);
|
||||
|
@ -1965,13 +1971,13 @@ public:
|
|||
|
||||
if (bestDC != clusterControllerDcId) {
|
||||
TraceEvent("BestDCIsNotClusterDC").log();
|
||||
vector<Optional<Key>> dcPriority;
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(bestDC);
|
||||
desiredDcIds.set(dcPriority);
|
||||
throw no_more_servers();
|
||||
}
|
||||
// If this cluster controller dies, do not prioritize recruiting the next one in the same DC
|
||||
desiredDcIds.set(vector<Optional<Key>>());
|
||||
desiredDcIds.set(std::vector<Optional<Key>>());
|
||||
TraceEvent("FindWorkersForConfig")
|
||||
.detail("Replication", req.configuration.tLogReplicationFactor)
|
||||
.detail("DesiredLogs", req.configuration.getDesiredLogs())
|
||||
|
@ -2173,7 +2179,7 @@ public:
|
|||
getWorkerForRoleInDatacenter(
|
||||
regions[0].dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, db.config, id_used, {}, true);
|
||||
|
||||
vector<Optional<Key>> dcPriority;
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(regions[0].dcId);
|
||||
dcPriority.push_back(regions[1].dcId);
|
||||
desiredDcIds.set(dcPriority);
|
||||
|
@ -2200,7 +2206,8 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void updateIdUsed(const vector<WorkerDetails>& workers, std::map<Optional<Standalone<StringRef>>, int>& id_used) {
|
||||
void updateIdUsed(const std::vector<WorkerDetails>& workers,
|
||||
std::map<Optional<Standalone<StringRef>>, int>& id_used) {
|
||||
for (auto& it : workers) {
|
||||
id_used[it.interf.locality.processId()]++;
|
||||
}
|
||||
|
@ -2711,7 +2718,9 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
bool isUsedNotMaster(Optional<Key> processId) {
|
||||
// Returns true iff processId is currently being used
|
||||
// for any non-singleton role other than master
|
||||
bool isUsedNotMaster(Optional<Key> processId) const {
|
||||
ASSERT(masterProcessId.present());
|
||||
if (processId == masterProcessId)
|
||||
return false;
|
||||
|
@ -2741,7 +2750,10 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
bool onMasterIsBetter(const WorkerDetails& worker, ProcessClass::ClusterRole role) {
|
||||
// Returns true iff
|
||||
// - role is master, or
|
||||
// - role is a singleton AND worker's pid is being used for any non-singleton role
|
||||
bool onMasterIsBetter(const WorkerDetails& worker, ProcessClass::ClusterRole role) const {
|
||||
ASSERT(masterProcessId.present());
|
||||
const auto& pid = worker.interf.locality.processId();
|
||||
if ((role != ProcessClass::DataDistributor && role != ProcessClass::Ratekeeper) ||
|
||||
|
@ -2751,6 +2763,7 @@ public:
|
|||
return isUsedNotMaster(pid);
|
||||
}
|
||||
|
||||
// Returns a map of <pid, numRolesUsingPid> for all non-singleton roles
|
||||
std::map<Optional<Standalone<StringRef>>, int> getUsedIds() {
|
||||
std::map<Optional<Standalone<StringRef>>, int> idUsed;
|
||||
updateKnownIds(&idUsed);
|
||||
|
@ -2997,10 +3010,10 @@ public:
|
|||
Optional<Standalone<StringRef>> masterProcessId;
|
||||
Optional<Standalone<StringRef>> clusterControllerProcessId;
|
||||
Optional<Standalone<StringRef>> clusterControllerDcId;
|
||||
AsyncVar<Optional<vector<Optional<Key>>>> desiredDcIds; // desired DC priorities
|
||||
AsyncVar<std::pair<bool, Optional<vector<Optional<Key>>>>>
|
||||
AsyncVar<Optional<std::vector<Optional<Key>>>> desiredDcIds; // desired DC priorities
|
||||
AsyncVar<std::pair<bool, Optional<std::vector<Optional<Key>>>>>
|
||||
changingDcIds; // current DC priorities to change first, and whether that is the cluster controller
|
||||
AsyncVar<std::pair<bool, Optional<vector<Optional<Key>>>>>
|
||||
AsyncVar<std::pair<bool, Optional<std::vector<Optional<Key>>>>>
|
||||
changedDcIds; // current DC priorities to change second, and whether the cluster controller has been changed
|
||||
UID id;
|
||||
std::vector<RecruitFromConfigurationRequest> outstandingRecruitmentRequests;
|
||||
|
@ -3022,9 +3035,14 @@ public:
|
|||
Version datacenterVersionDifference;
|
||||
PromiseStream<Future<Void>> addActor;
|
||||
bool versionDifferenceUpdated;
|
||||
bool recruitingDistributor;
|
||||
Optional<UID> recruitingRatekeeperID;
|
||||
|
||||
// recruitX is used to signal when role X needs to be (re)recruited.
|
||||
// recruitingXID is used to track the ID of X's interface which is being recruited.
|
||||
// We use AsyncVars to kill (i.e. halt) singletons that have been replaced.
|
||||
AsyncVar<bool> recruitDistributor;
|
||||
Optional<UID> recruitingDistributorID;
|
||||
AsyncVar<bool> recruitRatekeeper;
|
||||
Optional<UID> recruitingRatekeeperID;
|
||||
|
||||
// Stores the health information from a particular worker's perspective.
|
||||
struct WorkerHealth {
|
||||
|
@ -3060,7 +3078,7 @@ public:
|
|||
clusterControllerDcId(locality.dcId()), id(ccInterface.id()), ac(false), outstandingRequestChecker(Void()),
|
||||
outstandingRemoteRequestChecker(Void()), startTime(now()), goodRecruitmentTime(Never()),
|
||||
goodRemoteRecruitmentTime(Never()), datacenterVersionDifference(0), versionDifferenceUpdated(false),
|
||||
recruitingDistributor(false), recruitRatekeeper(false),
|
||||
recruitDistributor(false), recruitRatekeeper(false),
|
||||
clusterControllerMetrics("ClusterController", id.toString()),
|
||||
openDatabaseRequests("OpenDatabaseRequests", clusterControllerMetrics),
|
||||
registerWorkerRequests("RegisterWorkerRequests", clusterControllerMetrics),
|
||||
|
@ -3084,6 +3102,63 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
// Wrapper for singleton interfaces
|
||||
template <class Interface>
|
||||
struct Singleton {
|
||||
const Optional<Interface>& interface;
|
||||
|
||||
Singleton(const Optional<Interface>& interface) : interface(interface) {}
|
||||
|
||||
virtual Role getRole() const = 0;
|
||||
virtual ProcessClass::ClusterRole getClusterRole() const = 0;
|
||||
|
||||
virtual void setInterfaceToDbInfo(ClusterControllerData* cc) const = 0;
|
||||
virtual void halt(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const = 0;
|
||||
virtual void recruit(ClusterControllerData* cc) const = 0;
|
||||
};
|
||||
|
||||
struct RatekeeperSingleton : Singleton<RatekeeperInterface> {
|
||||
|
||||
RatekeeperSingleton(const Optional<RatekeeperInterface>& interface) : Singleton(interface) {}
|
||||
|
||||
Role getRole() const { return Role::RATEKEEPER; }
|
||||
ProcessClass::ClusterRole getClusterRole() const { return ProcessClass::Ratekeeper; }
|
||||
|
||||
void setInterfaceToDbInfo(ClusterControllerData* cc) const {
|
||||
if (interface.present()) {
|
||||
cc->db.setRatekeeper(interface.get());
|
||||
}
|
||||
}
|
||||
void halt(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const {
|
||||
if (interface.present()) {
|
||||
cc->id_worker[pid].haltRatekeeper =
|
||||
brokenPromiseToNever(interface.get().haltRatekeeper.getReply(HaltRatekeeperRequest(cc->id)));
|
||||
}
|
||||
}
|
||||
void recruit(ClusterControllerData* cc) const { cc->recruitRatekeeper.set(true); }
|
||||
};
|
||||
|
||||
struct DataDistributorSingleton : Singleton<DataDistributorInterface> {
|
||||
|
||||
DataDistributorSingleton(const Optional<DataDistributorInterface>& interface) : Singleton(interface) {}
|
||||
|
||||
Role getRole() const { return Role::DATA_DISTRIBUTOR; }
|
||||
ProcessClass::ClusterRole getClusterRole() const { return ProcessClass::DataDistributor; }
|
||||
|
||||
void setInterfaceToDbInfo(ClusterControllerData* cc) const {
|
||||
if (interface.present()) {
|
||||
cc->db.setDistributor(interface.get());
|
||||
}
|
||||
}
|
||||
void halt(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const {
|
||||
if (interface.present()) {
|
||||
cc->id_worker[pid].haltDistributor =
|
||||
brokenPromiseToNever(interface.get().haltDataDistributor.getReply(HaltDataDistributorRequest(cc->id)));
|
||||
}
|
||||
}
|
||||
void recruit(ClusterControllerData* cc) const { cc->recruitDistributor.set(true); }
|
||||
};
|
||||
|
||||
ACTOR Future<Void> clusterWatchDatabase(ClusterControllerData* cluster, ClusterControllerData::DBInfo* db) {
|
||||
state MasterInterface iMaster;
|
||||
|
||||
|
@ -3295,108 +3370,160 @@ void checkOutstandingStorageRequests(ClusterControllerData* self) {
|
|||
}
|
||||
}
|
||||
|
||||
void checkBetterDDOrRK(ClusterControllerData* self) {
|
||||
// Finds and returns a new process for role
|
||||
WorkerDetails findNewProcessForSingleton(ClusterControllerData* self,
|
||||
const ProcessClass::ClusterRole role,
|
||||
std::map<Optional<Standalone<StringRef>>, int>& id_used) {
|
||||
// find new process in cluster for role
|
||||
WorkerDetails newWorker =
|
||||
self->getWorkerForRoleInDatacenter(
|
||||
self->clusterControllerDcId, role, ProcessClass::NeverAssign, self->db.config, id_used, {}, true)
|
||||
.worker;
|
||||
|
||||
// check if master's process is actually better suited for role
|
||||
if (self->onMasterIsBetter(newWorker, role)) {
|
||||
newWorker = self->id_worker[self->masterProcessId.get()].details;
|
||||
}
|
||||
|
||||
// acknowledge that the pid is now potentially used by this role as well
|
||||
id_used[newWorker.interf.locality.processId()]++;
|
||||
|
||||
return newWorker;
|
||||
}
|
||||
|
||||
// Return best possible fitness for singleton. Note that lower fitness is better.
|
||||
ProcessClass::Fitness findBestFitnessForSingleton(const ClusterControllerData* self,
|
||||
const WorkerDetails& worker,
|
||||
const ProcessClass::ClusterRole& role) {
|
||||
auto bestFitness = worker.processClass.machineClassFitness(role);
|
||||
// If the process has been marked as excluded, we take the max with ExcludeFit to ensure its fit
|
||||
// is at least as bad as ExcludeFit. This assists with successfully offboarding such processes
|
||||
// and removing them from the cluster.
|
||||
if (self->db.config.isExcludedServer(worker.interf.addresses())) {
|
||||
bestFitness = std::max(bestFitness, ProcessClass::ExcludeFit);
|
||||
}
|
||||
return bestFitness;
|
||||
}
|
||||
|
||||
// Returns true iff the singleton is healthy. "Healthy" here means that
|
||||
// the singleton is stable (see below) and doesn't need to be rerecruited.
|
||||
// Side effects: (possibly) initiates recruitment
|
||||
template <class Interface>
|
||||
bool isHealthySingleton(ClusterControllerData* self,
|
||||
const WorkerDetails& newWorker,
|
||||
const Singleton<Interface>& singleton,
|
||||
const ProcessClass::Fitness& bestFitness,
|
||||
const Optional<UID> recruitingID) {
|
||||
// A singleton is stable if it exists in cluster, has not been killed off of proc and is not being recruited
|
||||
bool isStableSingleton = singleton.interface.present() &&
|
||||
self->id_worker.count(singleton.interface.get().locality.processId()) &&
|
||||
(!recruitingID.present() || (recruitingID.get() == singleton.interface.get().id()));
|
||||
|
||||
if (!isStableSingleton) {
|
||||
return false; // not healthy because unstable
|
||||
}
|
||||
|
||||
auto& currWorker = self->id_worker[singleton.interface.get().locality.processId()];
|
||||
auto currFitness = currWorker.details.processClass.machineClassFitness(singleton.getClusterRole());
|
||||
if (currWorker.priorityInfo.isExcluded) {
|
||||
currFitness = ProcessClass::ExcludeFit;
|
||||
}
|
||||
// If any of the following conditions are met, we will switch the singleton's process:
|
||||
// - if the current proc is used by some non-master, non-singleton role
|
||||
// - if the current fitness is less than optimal (lower fitness is better)
|
||||
// - if currently at peak fitness but on same process as master, and the new worker is on different process
|
||||
bool shouldRerecruit =
|
||||
self->isUsedNotMaster(currWorker.details.interf.locality.processId()) || bestFitness < currFitness ||
|
||||
(currFitness == bestFitness && currWorker.details.interf.locality.processId() == self->masterProcessId &&
|
||||
newWorker.interf.locality.processId() != self->masterProcessId);
|
||||
if (shouldRerecruit) {
|
||||
std::string roleAbbr = singleton.getRole().abbreviation;
|
||||
TraceEvent(("CCHalt" + roleAbbr).c_str(), self->id)
|
||||
.detail(roleAbbr + "ID", singleton.interface.get().id())
|
||||
.detail("Excluded", currWorker.priorityInfo.isExcluded)
|
||||
.detail("Fitness", currFitness)
|
||||
.detail("BestFitness", bestFitness);
|
||||
singleton.recruit(self); // SIDE EFFECT: initiating recruitment
|
||||
return false; // not healthy since needed to be rerecruited
|
||||
} else {
|
||||
return true; // healthy because doesn't need to be rerecruited
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a mapping from pid->pidCount for pids
|
||||
std::map<Optional<Standalone<StringRef>>, int> getColocCounts(
|
||||
const std::vector<Optional<Standalone<StringRef>>>& pids) {
|
||||
std::map<Optional<Standalone<StringRef>>, int> counts;
|
||||
for (const auto& pid : pids) {
|
||||
++counts[pid];
|
||||
}
|
||||
return counts;
|
||||
}
|
||||
|
||||
// Checks if there exists a better process for each singleton (e.g. DD) compared
|
||||
// to the process it is currently on.
|
||||
void checkBetterSingletons(ClusterControllerData* self) {
|
||||
if (!self->masterProcessId.present() ||
|
||||
self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
||||
return;
|
||||
}
|
||||
|
||||
// note: this map doesn't consider pids used by existing singletons
|
||||
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
||||
WorkerDetails newRKWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId,
|
||||
ProcessClass::Ratekeeper,
|
||||
ProcessClass::NeverAssign,
|
||||
self->db.config,
|
||||
id_used,
|
||||
{},
|
||||
true)
|
||||
.worker;
|
||||
if (self->onMasterIsBetter(newRKWorker, ProcessClass::Ratekeeper)) {
|
||||
newRKWorker = self->id_worker[self->masterProcessId.get()].details;
|
||||
}
|
||||
id_used = self->getUsedIds();
|
||||
for (auto& it : id_used) {
|
||||
it.second *= 2;
|
||||
}
|
||||
id_used[newRKWorker.interf.locality.processId()]++;
|
||||
WorkerDetails newDDWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId,
|
||||
ProcessClass::DataDistributor,
|
||||
ProcessClass::NeverAssign,
|
||||
self->db.config,
|
||||
id_used,
|
||||
{},
|
||||
true)
|
||||
.worker;
|
||||
if (self->onMasterIsBetter(newDDWorker, ProcessClass::DataDistributor)) {
|
||||
newDDWorker = self->id_worker[self->masterProcessId.get()].details;
|
||||
}
|
||||
auto bestFitnessForRK = newRKWorker.processClass.machineClassFitness(ProcessClass::Ratekeeper);
|
||||
if (self->db.config.isExcludedServer(newRKWorker.interf.addresses())) {
|
||||
bestFitnessForRK = std::max(bestFitnessForRK, ProcessClass::ExcludeFit);
|
||||
}
|
||||
auto bestFitnessForDD = newDDWorker.processClass.machineClassFitness(ProcessClass::DataDistributor);
|
||||
if (self->db.config.isExcludedServer(newDDWorker.interf.addresses())) {
|
||||
bestFitnessForDD = std::max(bestFitnessForDD, ProcessClass::ExcludeFit);
|
||||
}
|
||||
//TraceEvent("CheckBetterDDorRKNewRecruits", self->id).detail("MasterProcessId", self->masterProcessId)
|
||||
//.detail("NewRecruitRKProcessId", newRKWorker.interf.locality.processId()).detail("NewRecruiteDDProcessId",
|
||||
// newDDWorker.interf.locality.processId());
|
||||
|
||||
Optional<Standalone<StringRef>> currentRKProcessId;
|
||||
Optional<Standalone<StringRef>> currentDDProcessId;
|
||||
// We prefer spreading out other roles more than separating singletons on their own process
|
||||
// so we artificially amplify the pid count for the processes used by non-singleton roles.
|
||||
// In other words, we make the processes used for other roles less desirable to be used
|
||||
// by singletons as well.
|
||||
for (auto& it : id_used) {
|
||||
it.second *= PID_USED_AMP_FOR_NON_SINGLETON;
|
||||
}
|
||||
|
||||
// Try to find a new process for each singleton.
|
||||
WorkerDetails newRKWorker = findNewProcessForSingleton(self, ProcessClass::Ratekeeper, id_used);
|
||||
WorkerDetails newDDWorker = findNewProcessForSingleton(self, ProcessClass::DataDistributor, id_used);
|
||||
|
||||
// Find best possible fitnesses for each singleton.
|
||||
auto bestFitnessForRK = findBestFitnessForSingleton(self, newRKWorker, ProcessClass::Ratekeeper);
|
||||
auto bestFitnessForDD = findBestFitnessForSingleton(self, newDDWorker, ProcessClass::DataDistributor);
|
||||
|
||||
auto& db = self->db.serverInfo->get();
|
||||
bool ratekeeperHealthy = false;
|
||||
if (db.ratekeeper.present() && self->id_worker.count(db.ratekeeper.get().locality.processId()) &&
|
||||
(!self->recruitingRatekeeperID.present() || (self->recruitingRatekeeperID.get() == db.ratekeeper.get().id()))) {
|
||||
auto& rkWorker = self->id_worker[db.ratekeeper.get().locality.processId()];
|
||||
currentRKProcessId = rkWorker.details.interf.locality.processId();
|
||||
auto rkFitness = rkWorker.details.processClass.machineClassFitness(ProcessClass::Ratekeeper);
|
||||
if (rkWorker.priorityInfo.isExcluded) {
|
||||
rkFitness = ProcessClass::ExcludeFit;
|
||||
}
|
||||
if (self->isUsedNotMaster(rkWorker.details.interf.locality.processId()) || bestFitnessForRK < rkFitness ||
|
||||
(rkFitness == bestFitnessForRK && rkWorker.details.interf.locality.processId() == self->masterProcessId &&
|
||||
newRKWorker.interf.locality.processId() != self->masterProcessId)) {
|
||||
TraceEvent("CCHaltRK", self->id)
|
||||
.detail("RKID", db.ratekeeper.get().id())
|
||||
.detail("Excluded", rkWorker.priorityInfo.isExcluded)
|
||||
.detail("Fitness", rkFitness)
|
||||
.detail("BestFitness", bestFitnessForRK);
|
||||
self->recruitRatekeeper.set(true);
|
||||
} else {
|
||||
ratekeeperHealthy = true;
|
||||
}
|
||||
auto rkSingleton = RatekeeperSingleton(db.ratekeeper);
|
||||
auto ddSingleton = DataDistributorSingleton(db.distributor);
|
||||
|
||||
// Check if the singletons are healthy.
|
||||
// side effect: try to rerecruit the singletons to more optimal processes
|
||||
bool rkHealthy = isHealthySingleton<RatekeeperInterface>(
|
||||
self, newRKWorker, rkSingleton, bestFitnessForRK, self->recruitingRatekeeperID);
|
||||
|
||||
bool ddHealthy = isHealthySingleton<DataDistributorInterface>(
|
||||
self, newDDWorker, ddSingleton, bestFitnessForDD, self->recruitingDistributorID);
|
||||
|
||||
// if any of the singletons are unhealthy (rerecruited or not stable), then do not
|
||||
// consider any further re-recruitments
|
||||
if (!(rkHealthy && ddHealthy)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!self->recruitingDistributor && db.distributor.present() &&
|
||||
self->id_worker.count(db.distributor.get().locality.processId())) {
|
||||
auto& ddWorker = self->id_worker[db.distributor.get().locality.processId()];
|
||||
auto ddFitness = ddWorker.details.processClass.machineClassFitness(ProcessClass::DataDistributor);
|
||||
currentDDProcessId = ddWorker.details.interf.locality.processId();
|
||||
if (ddWorker.priorityInfo.isExcluded) {
|
||||
ddFitness = ProcessClass::ExcludeFit;
|
||||
}
|
||||
if (self->isUsedNotMaster(ddWorker.details.interf.locality.processId()) || bestFitnessForDD < ddFitness ||
|
||||
(ddFitness == bestFitnessForDD && ddWorker.details.interf.locality.processId() == self->masterProcessId &&
|
||||
newDDWorker.interf.locality.processId() != self->masterProcessId) ||
|
||||
(ddFitness == bestFitnessForDD &&
|
||||
newRKWorker.interf.locality.processId() != newDDWorker.interf.locality.processId() && ratekeeperHealthy &&
|
||||
currentRKProcessId.present() && currentDDProcessId == currentRKProcessId &&
|
||||
(newRKWorker.interf.locality.processId() != self->masterProcessId &&
|
||||
newDDWorker.interf.locality.processId() != self->masterProcessId))) {
|
||||
TraceEvent("CCHaltDD", self->id)
|
||||
.detail("DDID", db.distributor.get().id())
|
||||
.detail("Excluded", ddWorker.priorityInfo.isExcluded)
|
||||
.detail("Fitness", ddFitness)
|
||||
.detail("BestFitness", bestFitnessForDD)
|
||||
.detail("CurrentRateKeeperProcessId",
|
||||
currentRKProcessId.present() ? currentRKProcessId.get() : LiteralStringRef("None"))
|
||||
.detail("CurrentDDProcessId", currentDDProcessId)
|
||||
.detail("MasterProcessID", self->masterProcessId)
|
||||
.detail("NewRKWorkers", newRKWorker.interf.locality.processId())
|
||||
.detail("NewDDWorker", newDDWorker.interf.locality.processId());
|
||||
ddWorker.haltDistributor = brokenPromiseToNever(
|
||||
db.distributor.get().haltDataDistributor.getReply(HaltDataDistributorRequest(self->id)));
|
||||
// if we reach here, we know that the singletons are healthy so let's
|
||||
// check if we can colocate the singletons in a more optimal way
|
||||
|
||||
Optional<Standalone<StringRef>> currRKProcessId = rkSingleton.interface.get().locality.processId();
|
||||
Optional<Standalone<StringRef>> currDDProcessId = ddSingleton.interface.get().locality.processId();
|
||||
Optional<Standalone<StringRef>> newRKProcessId = newRKWorker.interf.locality.processId();
|
||||
Optional<Standalone<StringRef>> newDDProcessId = newDDWorker.interf.locality.processId();
|
||||
|
||||
auto currColocMap = getColocCounts({ currRKProcessId, currDDProcessId });
|
||||
auto newColocMap = getColocCounts({ newRKProcessId, newDDProcessId });
|
||||
|
||||
// if the new coloc counts are not worse (i.e. each singleton's coloc count has not increased)
|
||||
if (newColocMap[newRKProcessId] <= currColocMap[currRKProcessId] &&
|
||||
newColocMap[newDDProcessId] <= currColocMap[currDDProcessId]) {
|
||||
// rerecruit the singleton for which we have found a better process, if any
|
||||
if (newColocMap[newRKProcessId] < currColocMap[currRKProcessId]) {
|
||||
rkSingleton.recruit(self);
|
||||
} else if (newColocMap[newDDProcessId] < currColocMap[currDDProcessId]) {
|
||||
ddSingleton.recruit(self);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3410,7 +3537,7 @@ ACTOR Future<Void> doCheckOutstandingRequests(ClusterControllerData* self) {
|
|||
|
||||
checkOutstandingRecruitmentRequests(self);
|
||||
checkOutstandingStorageRequests(self);
|
||||
checkBetterDDOrRK(self);
|
||||
checkBetterSingletons(self);
|
||||
|
||||
self->checkRecoveryStalled();
|
||||
if (self->betterMasterExists()) {
|
||||
|
@ -3538,11 +3665,11 @@ struct FailureStatusInfo {
|
|||
}
|
||||
};
|
||||
|
||||
ACTOR Future<vector<TLogInterface>> requireAll(vector<Future<Optional<vector<TLogInterface>>>> in) {
|
||||
state vector<TLogInterface> out;
|
||||
ACTOR Future<std::vector<TLogInterface>> requireAll(std::vector<Future<Optional<std::vector<TLogInterface>>>> in) {
|
||||
state std::vector<TLogInterface> out;
|
||||
state int i;
|
||||
for (i = 0; i < in.size(); i++) {
|
||||
Optional<vector<TLogInterface>> x = wait(in[i]);
|
||||
Optional<std::vector<TLogInterface>> x = wait(in[i]);
|
||||
if (!x.present())
|
||||
throw recruitment_failed();
|
||||
out.insert(out.end(), x.get().begin(), x.get().end());
|
||||
|
@ -3740,6 +3867,47 @@ void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest co
|
|||
checkOutstandingRequests(self);
|
||||
}
|
||||
|
||||
// Halts the registering (i.e. requesting) singleton if one is already in the process of being recruited
|
||||
// or, halts the existing singleton in favour of the requesting one
|
||||
template <class Interface>
|
||||
void haltRegisteringOrCurrentSingleton(ClusterControllerData* self,
|
||||
const WorkerInterface& worker,
|
||||
const Singleton<Interface>& currSingleton,
|
||||
const Singleton<Interface>& registeringSingleton,
|
||||
const Optional<UID> recruitingID) {
|
||||
ASSERT(currSingleton.getRole() == registeringSingleton.getRole());
|
||||
const UID registeringID = registeringSingleton.interface.get().id();
|
||||
const std::string roleName = currSingleton.getRole().roleName;
|
||||
const std::string roleAbbr = currSingleton.getRole().abbreviation;
|
||||
|
||||
// halt the requesting singleton if it isn't the one currently being recruited
|
||||
if ((recruitingID.present() && recruitingID.get() != registeringID) ||
|
||||
self->clusterControllerDcId != worker.locality.dcId()) {
|
||||
TraceEvent(("CCHaltRegistering" + roleName).c_str(), self->id)
|
||||
.detail(roleAbbr + "ID", registeringID)
|
||||
.detail("DcID", printable(self->clusterControllerDcId))
|
||||
.detail("ReqDcID", printable(worker.locality.dcId()))
|
||||
.detail("Recruiting" + roleAbbr + "ID", recruitingID.present() ? recruitingID.get() : UID());
|
||||
registeringSingleton.halt(self, worker.locality.processId());
|
||||
} else if (!recruitingID.present()) {
|
||||
// if not currently recruiting, then halt previous one in favour of requesting one
|
||||
TraceEvent(("CCRegister" + roleName).c_str(), self->id).detail(roleAbbr + "ID", registeringID);
|
||||
if (currSingleton.interface.present() && currSingleton.interface.get().id() != registeringID &&
|
||||
self->id_worker.count(currSingleton.interface.get().locality.processId())) {
|
||||
TraceEvent(("CCHaltPrevious" + roleName).c_str(), self->id)
|
||||
.detail(roleAbbr + "ID", currSingleton.interface.get().id())
|
||||
.detail("DcID", printable(self->clusterControllerDcId))
|
||||
.detail("ReqDcID", printable(worker.locality.dcId()))
|
||||
.detail("Recruiting" + roleAbbr + "ID", recruitingID.present() ? recruitingID.get() : UID());
|
||||
currSingleton.halt(self, currSingleton.interface.get().locality.processId());
|
||||
}
|
||||
// set the curr singleton if it doesn't exist or its different from the requesting one
|
||||
if (!currSingleton.interface.present() || currSingleton.interface.get().id() != registeringID) {
|
||||
registeringSingleton.setInterfaceToDbInfo(self);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void registerWorker(RegisterWorkerRequest req, ClusterControllerData* self, ConfigBroadcaster* configBroadcaster) {
|
||||
const WorkerInterface& w = req.wi;
|
||||
ProcessClass newProcessClass = req.processClass;
|
||||
|
@ -3866,43 +4034,21 @@ void registerWorker(RegisterWorkerRequest req, ClusterControllerData* self, Conf
|
|||
TEST(true); // Received an old worker registration request.
|
||||
}
|
||||
|
||||
if (req.distributorInterf.present() && !self->db.serverInfo->get().distributor.present() &&
|
||||
self->clusterControllerDcId == req.distributorInterf.get().locality.dcId() && !self->recruitingDistributor) {
|
||||
const DataDistributorInterface& di = req.distributorInterf.get();
|
||||
TraceEvent("CCRegisterDataDistributor", self->id).detail("DDID", di.id());
|
||||
self->db.setDistributor(di);
|
||||
// For each singleton
|
||||
// - if the registering singleton conflicts with the singleton being recruited, kill the registering one
|
||||
// - if the singleton is not being recruited, kill the existing one in favour of the registering one
|
||||
if (req.distributorInterf.present()) {
|
||||
auto currSingleton = DataDistributorSingleton(self->db.serverInfo->get().distributor);
|
||||
auto registeringSingleton = DataDistributorSingleton(req.distributorInterf);
|
||||
haltRegisteringOrCurrentSingleton<DataDistributorInterface>(
|
||||
self, w, currSingleton, registeringSingleton, self->recruitingDistributorID);
|
||||
}
|
||||
|
||||
if (req.ratekeeperInterf.present()) {
|
||||
if ((self->recruitingRatekeeperID.present() &&
|
||||
self->recruitingRatekeeperID.get() != req.ratekeeperInterf.get().id()) ||
|
||||
self->clusterControllerDcId != w.locality.dcId()) {
|
||||
TraceEvent("CCHaltRegisteringRatekeeper", self->id)
|
||||
.detail("RKID", req.ratekeeperInterf.get().id())
|
||||
.detail("DcID", printable(self->clusterControllerDcId))
|
||||
.detail("ReqDcID", printable(w.locality.dcId()))
|
||||
.detail("RecruitingRKID",
|
||||
self->recruitingRatekeeperID.present() ? self->recruitingRatekeeperID.get() : UID());
|
||||
self->id_worker[w.locality.processId()].haltRatekeeper = brokenPromiseToNever(
|
||||
req.ratekeeperInterf.get().haltRatekeeper.getReply(HaltRatekeeperRequest(self->id)));
|
||||
} else if (!self->recruitingRatekeeperID.present()) {
|
||||
const RatekeeperInterface& rki = req.ratekeeperInterf.get();
|
||||
const auto& ratekeeper = self->db.serverInfo->get().ratekeeper;
|
||||
TraceEvent("CCRegisterRatekeeper", self->id).detail("RKID", rki.id());
|
||||
if (ratekeeper.present() && ratekeeper.get().id() != rki.id() &&
|
||||
self->id_worker.count(ratekeeper.get().locality.processId())) {
|
||||
TraceEvent("CCHaltPreviousRatekeeper", self->id)
|
||||
.detail("RKID", ratekeeper.get().id())
|
||||
.detail("DcID", printable(self->clusterControllerDcId))
|
||||
.detail("ReqDcID", printable(w.locality.dcId()))
|
||||
.detail("RecruitingRKID",
|
||||
self->recruitingRatekeeperID.present() ? self->recruitingRatekeeperID.get() : UID());
|
||||
self->id_worker[ratekeeper.get().locality.processId()].haltRatekeeper =
|
||||
brokenPromiseToNever(ratekeeper.get().haltRatekeeper.getReply(HaltRatekeeperRequest(self->id)));
|
||||
}
|
||||
if (!ratekeeper.present() || ratekeeper.get().id() != rki.id()) {
|
||||
self->db.setRatekeeper(rki);
|
||||
}
|
||||
}
|
||||
auto currSingleton = RatekeeperSingleton(self->db.serverInfo->get().ratekeeper);
|
||||
auto registeringSingleton = RatekeeperSingleton(req.ratekeeperInterf);
|
||||
haltRegisteringOrCurrentSingleton<RatekeeperInterface>(
|
||||
self, w, currSingleton, registeringSingleton, self->recruitingRatekeeperID);
|
||||
}
|
||||
|
||||
// Notify the worker to register again with new process class/exclusive property
|
||||
|
@ -4026,7 +4172,7 @@ ACTOR Future<Void> statusServer(FutureStream<StatusRequest> requests,
|
|||
}
|
||||
|
||||
// Get status but trap errors to send back to client.
|
||||
vector<WorkerDetails> workers;
|
||||
std::vector<WorkerDetails> workers;
|
||||
std::vector<ProcessIssues> workerIssues;
|
||||
|
||||
for (auto& it : self->id_worker) {
|
||||
|
@ -4521,7 +4667,7 @@ ACTOR Future<Void> handleForcedRecoveries(ClusterControllerData* self, ClusterCo
|
|||
wait(fCommit || delay(SERVER_KNOBS->FORCE_RECOVERY_CHECK_DELAY));
|
||||
if (!fCommit.isReady() || fCommit.isError()) {
|
||||
if (self->clusterControllerDcId != req.dcId) {
|
||||
vector<Optional<Key>> dcPriority;
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(req.dcId);
|
||||
dcPriority.push_back(self->clusterControllerDcId);
|
||||
self->desiredDcIds.set(dcPriority);
|
||||
|
@ -4537,41 +4683,64 @@ ACTOR Future<Void> handleForcedRecoveries(ClusterControllerData* self, ClusterCo
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<DataDistributorInterface> startDataDistributor(ClusterControllerData* self) {
|
||||
ACTOR Future<Void> startDataDistributor(ClusterControllerData* self) {
|
||||
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
|
||||
|
||||
TraceEvent("CCStartDataDistributor", self->id).log();
|
||||
loop {
|
||||
try {
|
||||
state bool no_distributor = !self->db.serverInfo->get().distributor.present();
|
||||
state bool noDistributor = !self->db.serverInfo->get().distributor.present();
|
||||
while (!self->masterProcessId.present() ||
|
||||
self->masterProcessId != self->db.serverInfo->get().master.locality.processId() ||
|
||||
self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
||||
wait(self->db.serverInfo->onChange() || delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY));
|
||||
}
|
||||
if (no_distributor && self->db.serverInfo->get().distributor.present()) {
|
||||
return self->db.serverInfo->get().distributor.get();
|
||||
if (noDistributor && self->db.serverInfo->get().distributor.present()) {
|
||||
// Existing distributor registers while waiting, so skip.
|
||||
return Void();
|
||||
}
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
||||
WorkerFitnessInfo data_distributor = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId,
|
||||
ProcessClass::DataDistributor,
|
||||
ProcessClass::NeverAssign,
|
||||
self->db.config,
|
||||
id_used);
|
||||
state WorkerDetails worker = data_distributor.worker;
|
||||
std::map<Optional<Standalone<StringRef>>, int> idUsed = self->getUsedIds();
|
||||
WorkerFitnessInfo ddWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId,
|
||||
ProcessClass::DataDistributor,
|
||||
ProcessClass::NeverAssign,
|
||||
self->db.config,
|
||||
idUsed);
|
||||
InitializeDataDistributorRequest req(deterministicRandom()->randomUniqueID());
|
||||
state WorkerDetails worker = ddWorker.worker;
|
||||
if (self->onMasterIsBetter(worker, ProcessClass::DataDistributor)) {
|
||||
worker = self->id_worker[self->masterProcessId.get()].details;
|
||||
}
|
||||
|
||||
InitializeDataDistributorRequest req(deterministicRandom()->randomUniqueID());
|
||||
TraceEvent("CCDataDistributorRecruit", self->id).detail("Addr", worker.interf.address());
|
||||
self->recruitingDistributorID = req.reqId;
|
||||
TraceEvent("CCRecruitDataDistributor", self->id)
|
||||
.detail("Addr", worker.interf.address())
|
||||
.detail("DDID", req.reqId);
|
||||
|
||||
ErrorOr<DataDistributorInterface> distributor = wait(worker.interf.dataDistributor.getReplyUnlessFailedFor(
|
||||
ErrorOr<DataDistributorInterface> ddInterf = wait(worker.interf.dataDistributor.getReplyUnlessFailedFor(
|
||||
req, SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 0));
|
||||
if (distributor.present()) {
|
||||
TraceEvent("CCDataDistributorRecruited", self->id).detail("Addr", worker.interf.address());
|
||||
return distributor.get();
|
||||
|
||||
if (ddInterf.present()) {
|
||||
self->recruitDistributor.set(false);
|
||||
self->recruitingDistributorID = ddInterf.get().id();
|
||||
const auto& distributor = self->db.serverInfo->get().distributor;
|
||||
TraceEvent("CCDataDistributorRecruited", self->id)
|
||||
.detail("Addr", worker.interf.address())
|
||||
.detail("DDID", ddInterf.get().id());
|
||||
if (distributor.present() && distributor.get().id() != ddInterf.get().id() &&
|
||||
self->id_worker.count(distributor.get().locality.processId())) {
|
||||
|
||||
TraceEvent("CCHaltDataDistributorAfterRecruit", self->id)
|
||||
.detail("DDID", distributor.get().id())
|
||||
.detail("DcID", printable(self->clusterControllerDcId));
|
||||
|
||||
DataDistributorSingleton(distributor).halt(self, distributor.get().locality.processId());
|
||||
}
|
||||
if (!distributor.present() || distributor.get().id() != ddInterf.get().id()) {
|
||||
self->db.setDistributor(ddInterf.get());
|
||||
}
|
||||
checkOutstandingRequests(self);
|
||||
return Void();
|
||||
}
|
||||
} catch (Error& e) {
|
||||
TraceEvent("CCDataDistributorRecruitError", self->id).error(e);
|
||||
|
@ -4589,17 +4758,18 @@ ACTOR Future<Void> monitorDataDistributor(ClusterControllerData* self) {
|
|||
}
|
||||
|
||||
loop {
|
||||
if (self->db.serverInfo->get().distributor.present()) {
|
||||
wait(waitFailureClient(self->db.serverInfo->get().distributor.get().waitFailure,
|
||||
SERVER_KNOBS->DD_FAILURE_TIME));
|
||||
TraceEvent("CCDataDistributorDied", self->id)
|
||||
.detail("DistributorId", self->db.serverInfo->get().distributor.get().id());
|
||||
self->db.clearInterf(ProcessClass::DataDistributorClass);
|
||||
if (self->db.serverInfo->get().distributor.present() && !self->recruitDistributor.get()) {
|
||||
choose {
|
||||
when(wait(waitFailureClient(self->db.serverInfo->get().distributor.get().waitFailure,
|
||||
SERVER_KNOBS->DD_FAILURE_TIME))) {
|
||||
TraceEvent("CCDataDistributorDied", self->id)
|
||||
.detail("DDID", self->db.serverInfo->get().distributor.get().id());
|
||||
self->db.clearInterf(ProcessClass::DataDistributorClass);
|
||||
}
|
||||
when(wait(self->recruitDistributor.onChange())) {}
|
||||
}
|
||||
} else {
|
||||
self->recruitingDistributor = true;
|
||||
DataDistributorInterface distributorInterf = wait(startDataDistributor(self));
|
||||
self->recruitingDistributor = false;
|
||||
self->db.setDistributor(distributorInterf);
|
||||
wait(startDataDistributor(self));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4652,8 +4822,7 @@ ACTOR Future<Void> startRatekeeper(ClusterControllerData* self) {
|
|||
TraceEvent("CCHaltRatekeeperAfterRecruit", self->id)
|
||||
.detail("RKID", ratekeeper.get().id())
|
||||
.detail("DcID", printable(self->clusterControllerDcId));
|
||||
self->id_worker[ratekeeper.get().locality.processId()].haltRatekeeper =
|
||||
brokenPromiseToNever(ratekeeper.get().haltRatekeeper.getReply(HaltRatekeeperRequest(self->id)));
|
||||
RatekeeperSingleton(ratekeeper).halt(self, ratekeeper.get().locality.processId());
|
||||
}
|
||||
if (!ratekeeper.present() || ratekeeper.get().id() != interf.get().id()) {
|
||||
self->db.setRatekeeper(interf.get());
|
||||
|
@ -4866,7 +5035,7 @@ ACTOR Future<Void> clusterControllerCore(ClusterControllerFullInterface interf,
|
|||
}
|
||||
when(GetWorkersRequest req = waitNext(interf.getWorkers.getFuture())) {
|
||||
++self.getWorkersRequests;
|
||||
vector<WorkerDetails> workers;
|
||||
std::vector<WorkerDetails> workers;
|
||||
|
||||
for (auto const& [id, worker] : self.id_worker) {
|
||||
if ((req.flags & GetWorkersRequest::NON_EXCLUDED_PROCESSES_ONLY) &&
|
||||
|
@ -4886,7 +5055,7 @@ ACTOR Future<Void> clusterControllerCore(ClusterControllerFullInterface interf,
|
|||
}
|
||||
when(GetClientWorkersRequest req = waitNext(interf.clientInterface.getClientWorkers.getFuture())) {
|
||||
++self.getClientWorkersRequests;
|
||||
vector<ClientWorkerInterface> workers;
|
||||
std::vector<ClientWorkerInterface> workers;
|
||||
for (auto& it : self.id_worker) {
|
||||
if (it.second.details.processClass.classType() != ProcessClass::TesterClass) {
|
||||
workers.push_back(it.second.details.interf.clientInterface);
|
||||
|
|
|
@ -87,9 +87,9 @@ ACTOR void discardCommit(UID id, Future<LogSystemDiskQueueAdapter::CommitMessage
|
|||
|
||||
struct ResolutionRequestBuilder {
|
||||
ProxyCommitData* self;
|
||||
vector<ResolveTransactionBatchRequest> requests;
|
||||
vector<vector<int>> transactionResolverMap;
|
||||
vector<CommitTransactionRef*> outTr;
|
||||
std::vector<ResolveTransactionBatchRequest> requests;
|
||||
std::vector<std::vector<int>> transactionResolverMap;
|
||||
std::vector<CommitTransactionRef*> outTr;
|
||||
std::vector<std::vector<std::vector<int>>>
|
||||
txReadConflictRangeIndexMap; // Used to report conflicting keys, the format is
|
||||
// [CommitTransactionRef_Index][Resolver_Index][Read_Conflict_Range_Index_on_Resolver]
|
||||
|
@ -186,7 +186,7 @@ struct ResolutionRequestBuilder {
|
|||
requests[r].txnStateTransactions.push_back(requests[r].arena, transactionNumberInRequest);
|
||||
}
|
||||
|
||||
vector<int> resolversUsed;
|
||||
std::vector<int> resolversUsed;
|
||||
for (int r = 0; r < outTr.size(); r++)
|
||||
if (outTr[r]) {
|
||||
resolversUsed.push_back(r);
|
||||
|
@ -277,7 +277,7 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData* commitData,
|
|||
}
|
||||
}
|
||||
|
||||
void createWhitelistBinPathVec(const std::string& binPath, vector<Standalone<StringRef>>& binPathVec) {
|
||||
void createWhitelistBinPathVec(const std::string& binPath, std::vector<Standalone<StringRef>>& binPathVec) {
|
||||
TraceEvent(SevDebug, "BinPathConverter").detail("Input", binPath);
|
||||
StringRef input(binPath);
|
||||
while (input != StringRef()) {
|
||||
|
@ -297,7 +297,7 @@ void createWhitelistBinPathVec(const std::string& binPath, vector<Standalone<Str
|
|||
return;
|
||||
}
|
||||
|
||||
bool isWhitelisted(const vector<Standalone<StringRef>>& binPathVec, StringRef binPath) {
|
||||
bool isWhitelisted(const std::vector<Standalone<StringRef>>& binPathVec, StringRef binPath) {
|
||||
TraceEvent("BinPath").detail("Value", binPath);
|
||||
for (const auto& item : binPathVec) {
|
||||
TraceEvent("Element").detail("Value", item);
|
||||
|
@ -1407,7 +1407,7 @@ ACTOR Future<Void> reply(CommitBatchContext* self) {
|
|||
|
||||
// Commit one batch of transactions trs
|
||||
ACTOR Future<Void> commitBatch(ProxyCommitData* self,
|
||||
vector<CommitTransactionRequest>* trs,
|
||||
std::vector<CommitTransactionRequest>* trs,
|
||||
int currentBatchMemBytesCount) {
|
||||
// WARNING: this code is run at a high priority (until the first delay(0)), so it needs to do as little work as
|
||||
// possible
|
||||
|
@ -1473,7 +1473,7 @@ ACTOR static Future<Void> doKeyServerLocationRequest(GetKeyServerLocationsReques
|
|||
if (!req.end.present()) {
|
||||
auto r = req.reverse ? commitData->keyInfo.rangeContainingKeyBefore(req.begin)
|
||||
: commitData->keyInfo.rangeContaining(req.begin);
|
||||
vector<StorageServerInterface> ssis;
|
||||
std::vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for (auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
|
@ -1485,7 +1485,7 @@ ACTOR static Future<Void> doKeyServerLocationRequest(GetKeyServerLocationsReques
|
|||
for (auto r = commitData->keyInfo.rangeContaining(req.begin);
|
||||
r != commitData->keyInfo.ranges().end() && count < req.limit && r.begin() < req.end.get();
|
||||
++r) {
|
||||
vector<StorageServerInterface> ssis;
|
||||
std::vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for (auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
|
@ -1498,7 +1498,7 @@ ACTOR static Future<Void> doKeyServerLocationRequest(GetKeyServerLocationsReques
|
|||
int count = 0;
|
||||
auto r = commitData->keyInfo.rangeContainingKeyBefore(req.end.get());
|
||||
while (count < req.limit && req.begin < r.end()) {
|
||||
vector<StorageServerInterface> ssis;
|
||||
std::vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for (auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
|
@ -2008,7 +2008,7 @@ ACTOR Future<Void> commitProxyServerCore(CommitProxyInterface proxy,
|
|||
proxy.id(), master, proxy.getConsistentReadVersion, recoveryTransactionVersion, proxy.commit, db, firstProxy);
|
||||
|
||||
state Future<Sequence> sequenceFuture = (Sequence)0;
|
||||
state PromiseStream<std::pair<vector<CommitTransactionRequest>, int>> batchedCommits;
|
||||
state PromiseStream<std::pair<std::vector<CommitTransactionRequest>, int>> batchedCommits;
|
||||
state Future<Void> commitBatcherActor;
|
||||
state Future<Void> lastCommitComplete = Void();
|
||||
|
||||
|
@ -2100,9 +2100,10 @@ ACTOR Future<Void> commitProxyServerCore(CommitProxyInterface proxy,
|
|||
commitData.updateLatencyBandConfig(commitData.db->get().latencyBandConfig);
|
||||
}
|
||||
when(wait(onError)) {}
|
||||
when(std::pair<vector<CommitTransactionRequest>, int> batchedRequests = waitNext(batchedCommits.getFuture())) {
|
||||
when(std::pair<std::vector<CommitTransactionRequest>, int> batchedRequests =
|
||||
waitNext(batchedCommits.getFuture())) {
|
||||
// WARNING: this code is run at a high priority, so it needs to do as little work as possible
|
||||
const vector<CommitTransactionRequest>& trs = batchedRequests.first;
|
||||
const std::vector<CommitTransactionRequest>& trs = batchedRequests.first;
|
||||
int batchBytes = batchedRequests.second;
|
||||
//TraceEvent("CommitProxyCTR", proxy.id()).detail("CommitTransactions", trs.size()).detail("TransactionRate", transactionRate).detail("TransactionQueue", transactionQueue.size()).detail("ReleasedTransactionCount", transactionCount);
|
||||
if (trs.size() || (commitData.db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS &&
|
||||
|
|
|
@ -34,10 +34,10 @@ ConfigFollowerInterface::ConfigFollowerInterface() : _id(deterministicRandom()->
|
|||
|
||||
ConfigFollowerInterface::ConfigFollowerInterface(NetworkAddress const& remote)
|
||||
: _id(deterministicRandom()->randomUniqueID()),
|
||||
getSnapshotAndChanges(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES)),
|
||||
getChanges(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_GETCHANGES)),
|
||||
compact(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_COMPACT)),
|
||||
getCommittedVersion(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION)) {}
|
||||
getSnapshotAndChanges(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES)),
|
||||
getChanges(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGFOLLOWER_GETCHANGES)),
|
||||
compact(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGFOLLOWER_COMPACT)),
|
||||
getCommittedVersion(Endpoint::wellKnown({ remote }, WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION)) {}
|
||||
|
||||
bool ConfigFollowerInterface::operator==(ConfigFollowerInterface const& rhs) const {
|
||||
return _id == rhs._id;
|
||||
|
|
|
@ -147,8 +147,8 @@ struct CoordinatedStateImpl {
|
|||
ACTOR static Future<GenerationRegReadReply> replicatedRead(CoordinatedStateImpl* self,
|
||||
GenerationRegReadRequest req) {
|
||||
state std::vector<GenerationRegInterface>& replicas = self->coordinators.stateServers;
|
||||
state vector<Future<GenerationRegReadReply>> rep_empty_reply;
|
||||
state vector<Future<GenerationRegReadReply>> rep_reply;
|
||||
state std::vector<Future<GenerationRegReadReply>> rep_empty_reply;
|
||||
state std::vector<Future<GenerationRegReadReply>> rep_reply;
|
||||
for (int i = 0; i < replicas.size(); i++) {
|
||||
Future<GenerationRegReadReply> reply =
|
||||
waitAndSendRead(replicas[i].read, GenerationRegReadRequest(req.key, req.gen));
|
||||
|
@ -189,7 +189,7 @@ struct CoordinatedStateImpl {
|
|||
|
||||
ACTOR static Future<UniqueGeneration> replicatedWrite(CoordinatedStateImpl* self, GenerationRegWriteRequest req) {
|
||||
state std::vector<GenerationRegInterface>& replicas = self->coordinators.stateServers;
|
||||
state vector<Future<UniqueGeneration>> wrep_reply;
|
||||
state std::vector<Future<UniqueGeneration>> wrep_reply;
|
||||
for (int i = 0; i < replicas.size(); i++) {
|
||||
Future<UniqueGeneration> reply =
|
||||
waitAndSendWrite(replicas[i].write, GenerationRegWriteRequest(req.kv, req.gen));
|
||||
|
|
|
@ -74,7 +74,8 @@ struct GenerationRegVal {
|
|||
};
|
||||
|
||||
GenerationRegInterface::GenerationRegInterface(NetworkAddress remote)
|
||||
: read(Endpoint({ remote }, WLTOKEN_GENERATIONREG_READ)), write(Endpoint({ remote }, WLTOKEN_GENERATIONREG_WRITE)) {}
|
||||
: read(Endpoint::wellKnown({ remote }, WLTOKEN_GENERATIONREG_READ)),
|
||||
write(Endpoint::wellKnown({ remote }, WLTOKEN_GENERATIONREG_WRITE)) {}
|
||||
|
||||
GenerationRegInterface::GenerationRegInterface(INetwork* local) {
|
||||
read.makeWellKnownEndpoint(WLTOKEN_GENERATIONREG_READ, TaskPriority::Coordination);
|
||||
|
@ -82,10 +83,10 @@ GenerationRegInterface::GenerationRegInterface(INetwork* local) {
|
|||
}
|
||||
|
||||
LeaderElectionRegInterface::LeaderElectionRegInterface(NetworkAddress remote)
|
||||
: ClientLeaderRegInterface(remote), candidacy(Endpoint({ remote }, WLTOKEN_LEADERELECTIONREG_CANDIDACY)),
|
||||
electionResult(Endpoint({ remote }, WLTOKEN_LEADERELECTIONREG_ELECTIONRESULT)),
|
||||
leaderHeartbeat(Endpoint({ remote }, WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT)),
|
||||
forward(Endpoint({ remote }, WLTOKEN_LEADERELECTIONREG_FORWARD)) {}
|
||||
: ClientLeaderRegInterface(remote), candidacy(Endpoint::wellKnown({ remote }, WLTOKEN_LEADERELECTIONREG_CANDIDACY)),
|
||||
electionResult(Endpoint::wellKnown({ remote }, WLTOKEN_LEADERELECTIONREG_ELECTIONRESULT)),
|
||||
leaderHeartbeat(Endpoint::wellKnown({ remote }, WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT)),
|
||||
forward(Endpoint::wellKnown({ remote }, WLTOKEN_LEADERELECTIONREG_FORWARD)) {}
|
||||
|
||||
LeaderElectionRegInterface::LeaderElectionRegInterface(INetwork* local) : ClientLeaderRegInterface(local) {
|
||||
candidacy.makeWellKnownEndpoint(WLTOKEN_LEADERELECTIONREG_CANDIDACY, TaskPriority::Coordination);
|
||||
|
@ -299,8 +300,8 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|||
req.reply.send(clientData.clientInfo->get());
|
||||
} else {
|
||||
if (!leaderMon.isValid()) {
|
||||
leaderMon =
|
||||
monitorLeaderForProxies(req.clusterKey, req.coordinators, &clientData, currentElectedLeader);
|
||||
leaderMon = monitorLeaderAndGetClientInfo(
|
||||
req.clusterKey, req.coordinators, &clientData, currentElectedLeader);
|
||||
}
|
||||
actors.add(
|
||||
openDatabase(&clientData, &clientCount, hasConnectedClients, req, canConnectToLeader.checkStuck()));
|
||||
|
@ -312,7 +313,8 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|||
req.reply.send(currentElectedLeader->get());
|
||||
} else {
|
||||
if (!leaderMon.isValid()) {
|
||||
leaderMon = monitorLeaderForProxies(req.key, req.coordinators, &clientData, currentElectedLeader);
|
||||
leaderMon =
|
||||
monitorLeaderAndGetClientInfo(req.key, req.coordinators, &clientData, currentElectedLeader);
|
||||
}
|
||||
actors.add(remoteMonitorLeader(&clientCount, hasConnectedClients, currentElectedLeader, req));
|
||||
}
|
||||
|
|
|
@ -23,20 +23,9 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/WellKnownEndpoints.h"
|
||||
#include "fdbserver/ConfigFollowerInterface.h"
|
||||
|
||||
constexpr UID WLTOKEN_LEADERELECTIONREG_CANDIDACY(-1, 4);
|
||||
constexpr UID WLTOKEN_LEADERELECTIONREG_ELECTIONRESULT(-1, 5);
|
||||
constexpr UID WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT(-1, 6);
|
||||
constexpr UID WLTOKEN_LEADERELECTIONREG_FORWARD(-1, 7);
|
||||
constexpr UID WLTOKEN_GENERATIONREG_READ(-1, 8);
|
||||
constexpr UID WLTOKEN_GENERATIONREG_WRITE(-1, 9);
|
||||
|
||||
constexpr UID WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES(-1, 17);
|
||||
constexpr UID WLTOKEN_CONFIGFOLLOWER_GETCHANGES(-1, 18);
|
||||
constexpr UID WLTOKEN_CONFIGFOLLOWER_COMPACT(-1, 19);
|
||||
constexpr UID WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION(-1, 20);
|
||||
|
||||
struct GenerationRegInterface {
|
||||
constexpr static FileIdentifier file_identifier = 16726744;
|
||||
RequestStream<struct GenerationRegReadRequest> read;
|
||||
|
@ -160,7 +149,7 @@ struct CandidacyRequest {
|
|||
struct ElectionResultRequest {
|
||||
constexpr static FileIdentifier file_identifier = 11815465;
|
||||
Key key;
|
||||
vector<NetworkAddress> coordinators;
|
||||
std::vector<NetworkAddress> coordinators;
|
||||
UID knownLeader;
|
||||
ReplyPromise<Optional<LeaderInfo>> reply;
|
||||
|
||||
|
|
|
@ -144,8 +144,8 @@ struct DBCoreState {
|
|||
|
||||
DBCoreState() : logRouterTags(0), txsTags(0), recoveryCount(0), logSystemType(LogSystemType::empty) {}
|
||||
|
||||
vector<UID> getPriorCommittedLogServers() {
|
||||
vector<UID> priorCommittedLogServers;
|
||||
std::vector<UID> getPriorCommittedLogServers() {
|
||||
std::vector<UID> priorCommittedLogServers;
|
||||
for (auto& it : tLogs) {
|
||||
for (auto& log : it.tLogs) {
|
||||
priorCommittedLogServers.push_back(log);
|
||||
|
|
|
@ -67,7 +67,7 @@ FDB_BOOLEAN_PARAM(IsPrimary);
|
|||
ACTOR Future<Void> checkAndRemoveInvalidLocalityAddr(DDTeamCollection* self);
|
||||
ACTOR Future<Void> removeWrongStoreType(DDTeamCollection* self);
|
||||
ACTOR Future<Void> waitForAllDataRemoved(Database cx, UID serverID, Version addedVersion, DDTeamCollection* teams);
|
||||
bool _exclusionSafetyCheck(vector<UID>& excludeServerIDs, DDTeamCollection* teamCollection);
|
||||
bool _exclusionSafetyCheck(std::vector<UID>& excludeServerIDs, DDTeamCollection* teamCollection);
|
||||
|
||||
struct TCServerInfo : public ReferenceCounted<TCServerInfo> {
|
||||
UID id;
|
||||
|
@ -75,7 +75,7 @@ struct TCServerInfo : public ReferenceCounted<TCServerInfo> {
|
|||
DDTeamCollection* collection;
|
||||
StorageServerInterface lastKnownInterface;
|
||||
ProcessClass lastKnownClass;
|
||||
vector<Reference<TCTeamInfo>> teams;
|
||||
std::vector<Reference<TCTeamInfo>> teams;
|
||||
Reference<TCMachineInfo> machine;
|
||||
Future<Void> tracker;
|
||||
int64_t dataInFlightToServer;
|
||||
|
@ -165,12 +165,12 @@ ACTOR Future<Void> updateServerMetrics(Reference<TCServerInfo> server);
|
|||
// TeamCollection's machine team information
|
||||
class TCMachineTeamInfo : public ReferenceCounted<TCMachineTeamInfo> {
|
||||
public:
|
||||
vector<Reference<TCMachineInfo>> machines;
|
||||
vector<Standalone<StringRef>> machineIDs;
|
||||
vector<Reference<TCTeamInfo>> serverTeams;
|
||||
std::vector<Reference<TCMachineInfo>> machines;
|
||||
std::vector<Standalone<StringRef>> machineIDs;
|
||||
std::vector<Reference<TCTeamInfo>> serverTeams;
|
||||
UID id;
|
||||
|
||||
explicit TCMachineTeamInfo(vector<Reference<TCMachineInfo>> const& machines)
|
||||
explicit TCMachineTeamInfo(std::vector<Reference<TCMachineInfo>> const& machines)
|
||||
: machines(machines), id(deterministicRandom()->randomUniqueID()) {
|
||||
machineIDs.reserve(machines.size());
|
||||
for (int i = 0; i < machines.size(); i++) {
|
||||
|
@ -202,8 +202,8 @@ public:
|
|||
|
||||
// TeamCollection's server team info.
|
||||
class TCTeamInfo final : public ReferenceCounted<TCTeamInfo>, public IDataDistributionTeam {
|
||||
vector<Reference<TCServerInfo>> servers;
|
||||
vector<UID> serverIDs;
|
||||
std::vector<Reference<TCServerInfo>> servers;
|
||||
std::vector<UID> serverIDs;
|
||||
bool healthy;
|
||||
bool wrongConfiguration; // True if any of the servers in the team have the wrong configuration
|
||||
int priority;
|
||||
|
@ -213,7 +213,7 @@ public:
|
|||
Reference<TCMachineTeamInfo> machineTeam;
|
||||
Future<Void> tracker;
|
||||
|
||||
explicit TCTeamInfo(vector<Reference<TCServerInfo>> const& servers)
|
||||
explicit TCTeamInfo(std::vector<Reference<TCServerInfo>> const& servers)
|
||||
: servers(servers), healthy(true), wrongConfiguration(false), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY),
|
||||
id(deterministicRandom()->randomUniqueID()) {
|
||||
if (servers.empty()) {
|
||||
|
@ -227,8 +227,8 @@ public:
|
|||
|
||||
std::string getTeamID() const override { return id.shortString(); }
|
||||
|
||||
vector<StorageServerInterface> getLastKnownServerInterfaces() const override {
|
||||
vector<StorageServerInterface> v;
|
||||
std::vector<StorageServerInterface> getLastKnownServerInterfaces() const override {
|
||||
std::vector<StorageServerInterface> v;
|
||||
v.reserve(servers.size());
|
||||
for (const auto& server : servers) {
|
||||
v.push_back(server->lastKnownInterface);
|
||||
|
@ -239,8 +239,8 @@ public:
|
|||
ASSERT(servers.size() == serverIDs.size());
|
||||
return servers.size();
|
||||
}
|
||||
vector<UID> const& getServerIDs() const override { return serverIDs; }
|
||||
const vector<Reference<TCServerInfo>>& getServers() const { return servers; }
|
||||
std::vector<UID> const& getServerIDs() const override { return serverIDs; }
|
||||
const std::vector<Reference<TCServerInfo>>& getServers() const { return servers; }
|
||||
|
||||
std::string getServerIDsStr() const {
|
||||
std::stringstream ss;
|
||||
|
@ -361,7 +361,8 @@ public:
|
|||
return std::find(serverIDs.begin(), serverIDs.end(), server) != serverIDs.end();
|
||||
}
|
||||
|
||||
void addServers(const vector<UID>& servers) override {
|
||||
void addServers(const std::vector<UID>& servers) override {
|
||||
|
||||
serverIDs.reserve(servers.size());
|
||||
for (int i = 0; i < servers.size(); i++) {
|
||||
serverIDs.push_back(servers[i]);
|
||||
|
@ -442,7 +443,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
|
|||
state Transaction tr(cx);
|
||||
|
||||
state std::map<UID, Optional<Key>> server_dc;
|
||||
state std::map<vector<UID>, std::pair<vector<UID>, vector<UID>>> team_cache;
|
||||
state std::map<std::vector<UID>, std::pair<std::vector<UID>, std::vector<UID>>> team_cache;
|
||||
state std::vector<std::pair<StorageServerInterface, ProcessClass>> tss_servers;
|
||||
|
||||
// Get the server list in its own try/catch block since it modifies result. We don't want a subsequent failure
|
||||
|
@ -480,7 +481,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
|
|||
return result;
|
||||
}
|
||||
|
||||
state Future<vector<ProcessData>> workers = getWorkers(&tr);
|
||||
state Future<std::vector<ProcessData>> workers = getWorkers(&tr);
|
||||
state Future<RangeResult> serverList = tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
wait(success(workers) && success(serverList));
|
||||
ASSERT(!serverList.get().more && serverList.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
@ -528,7 +529,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
|
|||
SERVER_KNOBS->MOVE_KEYS_KRM_LIMIT_BYTES));
|
||||
succeeded = true;
|
||||
|
||||
vector<UID> src, dest, last;
|
||||
std::vector<UID> src, dest, last;
|
||||
|
||||
// for each range
|
||||
for (int i = 0; i < keyServers.size() - 1; i++) {
|
||||
|
@ -577,7 +578,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
|
|||
auto srcIter = team_cache.find(src);
|
||||
if (srcIter == team_cache.end()) {
|
||||
result->primaryTeams.insert(src);
|
||||
team_cache[src] = std::pair<vector<UID>, vector<UID>>();
|
||||
team_cache[src] = std::pair<std::vector<UID>, std::vector<UID>>();
|
||||
}
|
||||
if (dest.size()) {
|
||||
info.hasDest = true;
|
||||
|
@ -585,7 +586,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
|
|||
auto destIter = team_cache.find(dest);
|
||||
if (destIter == team_cache.end()) {
|
||||
result->primaryTeams.insert(dest);
|
||||
team_cache[dest] = std::pair<vector<UID>, vector<UID>>();
|
||||
team_cache[dest] = std::pair<std::vector<UID>, std::vector<UID>>();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -648,7 +649,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
MoveKeysLock lock;
|
||||
PromiseStream<RelocateShard> output;
|
||||
vector<UID> allServers;
|
||||
std::vector<UID> allServers;
|
||||
ServerStatusMap server_status;
|
||||
int64_t unhealthyServers;
|
||||
std::map<int,int> priority_teams;
|
||||
|
@ -667,8 +668,8 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
std::vector<Reference<TCMachineTeamInfo>> machineTeams; // all machine teams
|
||||
LocalityMap<UID> machineLocalityMap; // locality info of machines
|
||||
|
||||
vector<Reference<TCTeamInfo>> teams;
|
||||
vector<Reference<TCTeamInfo>> badTeams;
|
||||
std::vector<Reference<TCTeamInfo>> teams;
|
||||
std::vector<Reference<TCTeamInfo>> badTeams;
|
||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure;
|
||||
PromiseStream<UID> removedServers;
|
||||
PromiseStream<UID> removedTSS;
|
||||
|
@ -1321,7 +1322,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
.detail("MachineMaxTeams", maxMachineTeams);
|
||||
}
|
||||
|
||||
int overlappingMembers(const vector<UID>& team) const {
|
||||
int overlappingMembers(const std::vector<UID>& team) const {
|
||||
if (team.empty()) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -1357,7 +1358,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
return maxMatchingServers;
|
||||
}
|
||||
|
||||
int overlappingMachineMembers(vector<Standalone<StringRef>> const& team) const {
|
||||
int overlappingMachineMembers(std::vector<Standalone<StringRef>> const& team) const {
|
||||
if (team.empty()) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -1392,7 +1393,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
return maxMatchingServers;
|
||||
}
|
||||
|
||||
Reference<TCMachineTeamInfo> findMachineTeam(vector<Standalone<StringRef>> const& machineIDs) const {
|
||||
Reference<TCMachineTeamInfo> findMachineTeam(std::vector<Standalone<StringRef>> const& machineIDs) const {
|
||||
if (machineIDs.empty()) {
|
||||
return Reference<TCMachineTeamInfo>();
|
||||
}
|
||||
|
@ -1417,7 +1418,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
// when the team added at init() when we recreate teams by looking up DB
|
||||
template <class InputIt>
|
||||
void addTeam(InputIt begin, InputIt end, bool isInitialTeam) {
|
||||
vector<Reference<TCServerInfo>> newTeamServers;
|
||||
std::vector<Reference<TCServerInfo>> newTeamServers;
|
||||
for (auto i = begin; i != end; ++i) {
|
||||
if (server_info.find(*i) != server_info.end()) {
|
||||
newTeamServers.push_back(server_info[*i]);
|
||||
|
@ -1427,7 +1428,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
addTeam(newTeamServers, isInitialTeam);
|
||||
}
|
||||
|
||||
void addTeam(const vector<Reference<TCServerInfo>>& newTeamServers,
|
||||
void addTeam(const std::vector<Reference<TCServerInfo>>& newTeamServers,
|
||||
bool isInitialTeam,
|
||||
bool redundantTeam = false) {
|
||||
auto teamInfo = makeReference<TCTeamInfo>(newTeamServers);
|
||||
|
@ -1451,7 +1452,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
// Find or create machine team for the server team
|
||||
// Add the reference of machineTeam (with machineIDs) into process team
|
||||
vector<Standalone<StringRef>> machineIDs;
|
||||
std::vector<Standalone<StringRef>> machineIDs;
|
||||
for (auto server = newTeamServers.begin(); server != newTeamServers.end(); ++server) {
|
||||
ASSERT_WE_THINK((*server)->machine.isValid());
|
||||
machineIDs.push_back((*server)->machine->machineID);
|
||||
|
@ -1486,7 +1487,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
void addTeam(std::set<UID> const& team, bool isInitialTeam) { addTeam(team.begin(), team.end(), isInitialTeam); }
|
||||
|
||||
// Add a machine team specified by input machines
|
||||
Reference<TCMachineTeamInfo> addMachineTeam(vector<Reference<TCMachineInfo>> machines) {
|
||||
Reference<TCMachineTeamInfo> addMachineTeam(std::vector<Reference<TCMachineInfo>> machines) {
|
||||
auto machineTeamInfo = makeReference<TCMachineTeamInfo>(machines);
|
||||
machineTeams.push_back(machineTeamInfo);
|
||||
|
||||
|
@ -1502,9 +1503,9 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
}
|
||||
|
||||
// Add a machine team by using the machineIDs from begin to end
|
||||
Reference<TCMachineTeamInfo> addMachineTeam(vector<Standalone<StringRef>>::iterator begin,
|
||||
vector<Standalone<StringRef>>::iterator end) {
|
||||
vector<Reference<TCMachineInfo>> machines;
|
||||
Reference<TCMachineTeamInfo> addMachineTeam(std::vector<Standalone<StringRef>>::iterator begin,
|
||||
std::vector<Standalone<StringRef>>::iterator end) {
|
||||
std::vector<Reference<TCMachineInfo>> machines;
|
||||
|
||||
for (auto i = begin; i != end; ++i) {
|
||||
if (machine_info.find(*i) != machine_info.end()) {
|
||||
|
@ -1790,7 +1791,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
ASSERT(team.size() == configuration.storageTeamSize);
|
||||
|
||||
int score = 0;
|
||||
vector<Standalone<StringRef>> machineIDs;
|
||||
std::vector<Standalone<StringRef>> machineIDs;
|
||||
for (auto process = team.begin(); process != team.end(); process++) {
|
||||
Reference<TCServerInfo> server = server_info[**process];
|
||||
score += server->machine->machineTeams.size();
|
||||
|
@ -1822,7 +1823,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
if (bestTeam.size() == configuration.storageTeamSize) {
|
||||
// machineIDs is used to quickly check if the machineIDs belong to an existed team
|
||||
// machines keep machines reference for performance benefit by avoiding looking up machine by machineID
|
||||
vector<Reference<TCMachineInfo>> machines;
|
||||
std::vector<Reference<TCMachineInfo>> machines;
|
||||
for (auto process = bestTeam.begin(); process < bestTeam.end(); process++) {
|
||||
Reference<TCMachineInfo> machine = server_info[**process]->machine;
|
||||
machines.push_back(machine);
|
||||
|
@ -1843,7 +1844,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
return addedMachineTeams;
|
||||
}
|
||||
|
||||
bool isMachineTeamHealthy(vector<Standalone<StringRef>> const& machineIDs) const {
|
||||
bool isMachineTeamHealthy(std::vector<Standalone<StringRef>> const& machineIDs) const {
|
||||
int healthyNum = 0;
|
||||
|
||||
// A healthy machine team should have the desired number of machines
|
||||
|
@ -1892,7 +1893,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
// Return the healthy server with the least number of correct-size server teams
|
||||
Reference<TCServerInfo> findOneLeastUsedServer() const {
|
||||
vector<Reference<TCServerInfo>> leastUsedServers;
|
||||
std::vector<Reference<TCServerInfo>> leastUsedServers;
|
||||
int minTeams = std::numeric_limits<int>::max();
|
||||
for (auto& server : server_info) {
|
||||
// Only pick healthy server, which is not failed or excluded.
|
||||
|
@ -2233,7 +2234,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
// From here, chosenMachineTeam must have a healthy server team
|
||||
// Step 3: Randomly pick 1 server from each machine in the chosen machine team to form a server team
|
||||
vector<UID> serverTeam;
|
||||
std::vector<UID> serverTeam;
|
||||
int chosenServerCount = 0;
|
||||
for (auto& machine : chosenMachineTeam->machines) {
|
||||
UID serverID;
|
||||
|
@ -2435,7 +2436,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
self->lastBuildTeamsFailed = false;
|
||||
if (teamsToBuild > 0 || self->notEnoughTeamsForAServer()) {
|
||||
state vector<std::vector<UID>> builtTeams;
|
||||
state std::vector<std::vector<UID>> builtTeams;
|
||||
|
||||
// addTeamsBestOf() will not add more teams than needed.
|
||||
// If the team number is more than the desired, the extra teams are added in the code path when
|
||||
|
@ -3043,7 +3044,7 @@ ACTOR Future<Void> printSnapshotTeamsInfo(Reference<DDTeamCollection> self) {
|
|||
state DatabaseConfiguration configuration;
|
||||
state std::map<UID, Reference<TCServerInfo>> server_info;
|
||||
state std::map<UID, ServerStatus> server_status;
|
||||
state vector<Reference<TCTeamInfo>> teams;
|
||||
state std::vector<Reference<TCTeamInfo>> teams;
|
||||
state std::map<Standalone<StringRef>, Reference<TCMachineInfo>> machine_info;
|
||||
state std::vector<Reference<TCMachineTeamInfo>> machineTeams;
|
||||
// state std::vector<std::string> internedLocalityRecordKeyNameStrings;
|
||||
|
@ -3486,7 +3487,7 @@ ACTOR Future<Void> serverTeamRemover(DDTeamCollection* self) {
|
|||
|
||||
ACTOR Future<Void> zeroServerLeftLogger_impl(DDTeamCollection* self, Reference<TCTeamInfo> team) {
|
||||
wait(delay(SERVER_KNOBS->DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY));
|
||||
state vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor(
|
||||
state std::vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor(
|
||||
ShardsAffectedByTeamFailure::Team(team->getServerIDs(), self->primary));
|
||||
state std::vector<Future<StorageMetrics>> sizes;
|
||||
sizes.reserve(shards.size());
|
||||
|
@ -3570,7 +3571,7 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
}
|
||||
|
||||
// Check if the number of degraded machines has changed
|
||||
state vector<Future<Void>> change;
|
||||
state std::vector<Future<Void>> change;
|
||||
bool anyUndesired = false;
|
||||
bool anyWrongConfiguration = false;
|
||||
bool anyWigglingServer = false;
|
||||
|
@ -3758,7 +3759,7 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
self->zeroHealthyTeams->get(); // set this again in case it changed from this teams health changing
|
||||
if ((self->initialFailureReactionDelay.isReady() && !self->zeroHealthyTeams->get()) || containsFailed) {
|
||||
|
||||
vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor(
|
||||
std::vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor(
|
||||
ShardsAffectedByTeamFailure::Team(team->getServerIDs(), self->primary));
|
||||
|
||||
for (int i = 0; i < shards.size(); i++) {
|
||||
|
@ -3768,8 +3769,8 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
// The shard split/merge and DD rebooting may make a shard mapped to multiple teams,
|
||||
// so we need to recalculate the shard's priority
|
||||
if (maxPriority < SERVER_KNOBS->PRIORITY_TEAM_FAILED) {
|
||||
std::pair<vector<ShardsAffectedByTeamFailure::Team>,
|
||||
vector<ShardsAffectedByTeamFailure::Team>>
|
||||
std::pair<std::vector<ShardsAffectedByTeamFailure::Team>,
|
||||
std::vector<ShardsAffectedByTeamFailure::Team>>
|
||||
teams = self->shardsAffectedByTeamFailure->getTeamsFor(shards[i]);
|
||||
for (int j = 0; j < teams.first.size() + teams.second.size(); j++) {
|
||||
// t is the team in primary DC or the remote DC
|
||||
|
@ -3970,8 +3971,9 @@ ACTOR Future<Void> trackExcludedServers(DDTeamCollection* self) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<vector<std::pair<StorageServerInterface, ProcessClass>>> getServerListAndProcessClasses(Transaction* tr) {
|
||||
state Future<vector<ProcessData>> workers = getWorkers(tr);
|
||||
ACTOR Future<std::vector<std::pair<StorageServerInterface, ProcessClass>>> getServerListAndProcessClasses(
|
||||
Transaction* tr) {
|
||||
state Future<std::vector<ProcessData>> workers = getWorkers(tr);
|
||||
state Future<RangeResult> serverList = tr->getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
wait(success(workers) && success(serverList));
|
||||
ASSERT(!serverList.get().more && serverList.get().size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
@ -3980,7 +3982,7 @@ ACTOR Future<vector<std::pair<StorageServerInterface, ProcessClass>>> getServerL
|
|||
for (int i = 0; i < workers.get().size(); i++)
|
||||
id_data[workers.get()[i].locality.processId()] = workers.get()[i];
|
||||
|
||||
vector<std::pair<StorageServerInterface, ProcessClass>> results;
|
||||
std::vector<std::pair<StorageServerInterface, ProcessClass>> results;
|
||||
for (int i = 0; i < serverList.get().size(); i++) {
|
||||
auto ssi = decodeServerListValue(serverList.get()[i].value);
|
||||
results.emplace_back(ssi, id_data[ssi.locality.processId()].processClass);
|
||||
|
@ -4246,7 +4248,7 @@ ACTOR Future<Void> waitServerListChange(DDTeamCollection* self,
|
|||
FutureStream<Void> serverRemoved,
|
||||
const DDEnabledState* ddEnabledState) {
|
||||
state Future<Void> checkSignal = delay(SERVER_KNOBS->SERVER_LIST_DELAY, TaskPriority::DataDistributionLaunch);
|
||||
state Future<vector<std::pair<StorageServerInterface, ProcessClass>>> serverListAndProcessClasses = Never();
|
||||
state Future<std::vector<std::pair<StorageServerInterface, ProcessClass>>> serverListAndProcessClasses = Never();
|
||||
state bool isFetchingResults = false;
|
||||
state Transaction tr(self->cx);
|
||||
loop {
|
||||
|
@ -4257,7 +4259,7 @@ ACTOR Future<Void> waitServerListChange(DDTeamCollection* self,
|
|||
isFetchingResults = true;
|
||||
serverListAndProcessClasses = getServerListAndProcessClasses(&tr);
|
||||
}
|
||||
when(vector<std::pair<StorageServerInterface, ProcessClass>> results =
|
||||
when(std::vector<std::pair<StorageServerInterface, ProcessClass>> results =
|
||||
wait(serverListAndProcessClasses)) {
|
||||
serverListAndProcessClasses = Never();
|
||||
isFetchingResults = false;
|
||||
|
@ -4819,7 +4821,7 @@ ACTOR Future<Void> storageServerTracker(
|
|||
}
|
||||
// Ensure the server's server team belong to a machine team, and
|
||||
// Get the newBadTeams due to the locality change
|
||||
vector<Reference<TCTeamInfo>> newBadTeams;
|
||||
std::vector<Reference<TCTeamInfo>> newBadTeams;
|
||||
for (auto& serverTeam : server->teams) {
|
||||
if (!self->satisfiesPolicy(serverTeam->getServers())) {
|
||||
newBadTeams.push_back(serverTeam);
|
||||
|
@ -4969,7 +4971,7 @@ ACTOR Future<Void> checkAndRemoveInvalidLocalityAddr(DDTeamCollection* self) {
|
|||
|
||||
// Because worker's processId can be changed when its locality is changed, we cannot watch on the old
|
||||
// processId; This actor is inactive most time, so iterating all workers incurs little performance overhead.
|
||||
state vector<ProcessData> workers = wait(getWorkers(self->cx));
|
||||
state std::vector<ProcessData> workers = wait(getWorkers(self->cx));
|
||||
state std::set<AddressExclusion> existingAddrs;
|
||||
for (int i = 0; i < workers.size(); i++) {
|
||||
const ProcessData& workerData = workers[i];
|
||||
|
@ -6035,15 +6037,15 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
wait(yield(TaskPriority::DataDistribution));
|
||||
}
|
||||
|
||||
vector<TeamCollectionInterface> tcis;
|
||||
std::vector<TeamCollectionInterface> tcis;
|
||||
|
||||
Reference<AsyncVar<bool>> anyZeroHealthyTeams;
|
||||
vector<Reference<AsyncVar<bool>>> zeroHealthyTeams;
|
||||
std::vector<Reference<AsyncVar<bool>>> zeroHealthyTeams;
|
||||
tcis.push_back(TeamCollectionInterface());
|
||||
zeroHealthyTeams.push_back(makeReference<AsyncVar<bool>>(true));
|
||||
int storageTeamSize = configuration.storageTeamSize;
|
||||
|
||||
vector<Future<Void>> actors;
|
||||
std::vector<Future<Void>> actors;
|
||||
if (configuration.usableRegions > 1) {
|
||||
tcis.push_back(TeamCollectionInterface());
|
||||
storageTeamSize = 2 * configuration.storageTeamSize;
|
||||
|
@ -6090,7 +6092,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
self->ddId,
|
||||
&normalDDQueueErrors()));
|
||||
|
||||
vector<DDTeamCollection*> teamCollectionsPtrs;
|
||||
std::vector<DDTeamCollection*> teamCollectionsPtrs;
|
||||
primaryTeamCollection = makeReference<DDTeamCollection>(
|
||||
cx,
|
||||
self->ddId,
|
||||
|
@ -6401,16 +6403,16 @@ ACTOR Future<Void> ddSnapCreate(DistributorSnapRequest snapReq,
|
|||
}
|
||||
|
||||
// Find size of set intersection of excludeServerIDs and serverIDs on each team and see if the leftover team is valid
|
||||
bool _exclusionSafetyCheck(vector<UID>& excludeServerIDs, DDTeamCollection* teamCollection) {
|
||||
bool _exclusionSafetyCheck(std::vector<UID>& excludeServerIDs, DDTeamCollection* teamCollection) {
|
||||
std::sort(excludeServerIDs.begin(), excludeServerIDs.end());
|
||||
for (const auto& team : teamCollection->teams) {
|
||||
vector<UID> teamServerIDs = team->getServerIDs();
|
||||
std::vector<UID> teamServerIDs = team->getServerIDs();
|
||||
std::sort(teamServerIDs.begin(), teamServerIDs.end());
|
||||
TraceEvent(SevDebug, "DDExclusionSafetyCheck", teamCollection->distributorId)
|
||||
.detail("Excluding", describe(excludeServerIDs))
|
||||
.detail("Existing", team->getDesc());
|
||||
// Find size of set intersection of both vectors and see if the leftover team is valid
|
||||
vector<UID> intersectSet(teamServerIDs.size());
|
||||
std::vector<UID> intersectSet(teamServerIDs.size());
|
||||
auto it = std::set_intersection(excludeServerIDs.begin(),
|
||||
excludeServerIDs.end(),
|
||||
teamServerIDs.begin(),
|
||||
|
@ -6428,7 +6430,7 @@ ACTOR Future<Void> ddExclusionSafetyCheck(DistributorExclusionSafetyCheckRequest
|
|||
Reference<DataDistributorData> self,
|
||||
Database cx) {
|
||||
TraceEvent("DDExclusionSafetyCheckBegin", self->ddId).log();
|
||||
vector<StorageServerInterface> ssis = wait(getStorageServers(cx));
|
||||
std::vector<StorageServerInterface> ssis = wait(getStorageServers(cx));
|
||||
DistributorExclusionSafetyCheckReply reply(true);
|
||||
if (!self->teamCollection) {
|
||||
TraceEvent("DDExclusionSafetyCheckTeamCollectionInvalid", self->ddId).log();
|
||||
|
@ -6443,7 +6445,7 @@ ACTOR Future<Void> ddExclusionSafetyCheck(DistributorExclusionSafetyCheckRequest
|
|||
req.reply.send(reply);
|
||||
return Void();
|
||||
}
|
||||
vector<UID> excludeServerIDs;
|
||||
std::vector<UID> excludeServerIDs;
|
||||
// Go through storage server interfaces and translate Address -> server ID (UID)
|
||||
for (const AddressExclusion& excl : req.exclusions) {
|
||||
for (const auto& ssi : ssis) {
|
||||
|
|
|
@ -38,9 +38,9 @@ struct RelocateShard {
|
|||
};
|
||||
|
||||
struct IDataDistributionTeam {
|
||||
virtual vector<StorageServerInterface> getLastKnownServerInterfaces() const = 0;
|
||||
virtual std::vector<StorageServerInterface> getLastKnownServerInterfaces() const = 0;
|
||||
virtual int size() const = 0;
|
||||
virtual vector<UID> const& getServerIDs() const = 0;
|
||||
virtual std::vector<UID> const& getServerIDs() const = 0;
|
||||
virtual void addDataInFlightToTeam(int64_t delta) = 0;
|
||||
virtual int64_t getDataInFlightToTeam() const = 0;
|
||||
virtual int64_t getLoadBytes(bool includeInFlight = true, double inflightPenalty = 1.0) const = 0;
|
||||
|
@ -57,7 +57,7 @@ struct IDataDistributionTeam {
|
|||
virtual bool isOptimal() const = 0;
|
||||
virtual bool isWrongConfiguration() const = 0;
|
||||
virtual void setWrongConfiguration(bool) = 0;
|
||||
virtual void addServers(const vector<UID>& servers) = 0;
|
||||
virtual void addServers(const std::vector<UID>& servers) = 0;
|
||||
virtual std::string getTeamID() const = 0;
|
||||
|
||||
std::string getDesc() const {
|
||||
|
@ -133,11 +133,11 @@ public:
|
|||
ShardsAffectedByTeamFailure() {}
|
||||
|
||||
struct Team {
|
||||
vector<UID> servers; // sorted
|
||||
std::vector<UID> servers; // sorted
|
||||
bool primary;
|
||||
|
||||
Team() : primary(true) {}
|
||||
Team(vector<UID> const& servers, bool primary) : servers(servers), primary(primary) {}
|
||||
Team(std::vector<UID> const& servers, bool primary) : servers(servers), primary(primary) {}
|
||||
|
||||
bool operator<(const Team& r) const {
|
||||
if (servers == r.servers)
|
||||
|
@ -167,12 +167,12 @@ public:
|
|||
// intersecting shards.
|
||||
|
||||
int getNumberOfShards(UID ssID) const;
|
||||
vector<KeyRange> getShardsFor(Team team);
|
||||
std::vector<KeyRange> getShardsFor(Team team);
|
||||
bool hasShards(Team team) const;
|
||||
|
||||
// The first element of the pair is either the source for non-moving shards or the destination team for in-flight
|
||||
// shards The second element of the pair is all previous sources for in-flight shards
|
||||
std::pair<vector<Team>, vector<Team>> getTeamsFor(KeyRangeRef keys);
|
||||
std::pair<std::vector<Team>, std::vector<Team>> getTeamsFor(KeyRangeRef keys);
|
||||
|
||||
void defineShard(KeyRangeRef keys);
|
||||
void moveShard(KeyRangeRef keys, std::vector<Team> destinationTeam);
|
||||
|
@ -190,7 +190,7 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
KeyRangeMap<std::pair<vector<Team>, vector<Team>>>
|
||||
KeyRangeMap<std::pair<std::vector<Team>, std::vector<Team>>>
|
||||
shard_teams; // A shard can be affected by the failure of multiple teams if it is a queued merge, or when
|
||||
// usable_regions > 1
|
||||
std::set<std::pair<Team, KeyRange>, OrderByTeamKey> team_shards;
|
||||
|
@ -203,10 +203,10 @@ private:
|
|||
// DDShardInfo is so named to avoid link-time name collision with ShardInfo within the StorageServer
|
||||
struct DDShardInfo {
|
||||
Key key;
|
||||
vector<UID> primarySrc;
|
||||
vector<UID> remoteSrc;
|
||||
vector<UID> primaryDest;
|
||||
vector<UID> remoteDest;
|
||||
std::vector<UID> primarySrc;
|
||||
std::vector<UID> remoteSrc;
|
||||
std::vector<UID> primaryDest;
|
||||
std::vector<UID> remoteDest;
|
||||
bool hasDest;
|
||||
|
||||
explicit DDShardInfo(Key key) : key(key), hasDest(false) {}
|
||||
|
@ -214,10 +214,10 @@ struct DDShardInfo {
|
|||
|
||||
struct InitialDataDistribution : ReferenceCounted<InitialDataDistribution> {
|
||||
int mode;
|
||||
vector<std::pair<StorageServerInterface, ProcessClass>> allServers;
|
||||
std::set<vector<UID>> primaryTeams;
|
||||
std::set<vector<UID>> remoteTeams;
|
||||
vector<DDShardInfo> shards;
|
||||
std::vector<std::pair<StorageServerInterface, ProcessClass>> allServers;
|
||||
std::set<std::vector<UID>> primaryTeams;
|
||||
std::set<std::vector<UID>> remoteTeams;
|
||||
std::vector<DDShardInfo> shards;
|
||||
Optional<Key> initHealthyZoneValue;
|
||||
};
|
||||
|
||||
|
@ -259,7 +259,7 @@ ACTOR Future<Void> dataDistributionQueue(Database cx,
|
|||
FutureStream<RelocateShard> input,
|
||||
PromiseStream<GetMetricsRequest> getShardMetrics,
|
||||
Reference<AsyncVar<bool>> processingUnhealthy,
|
||||
vector<TeamCollectionInterface> teamCollection,
|
||||
std::vector<TeamCollectionInterface> teamCollection,
|
||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
||||
MoveKeysLock lock,
|
||||
PromiseStream<Promise<int64_t>> getAverageShardBytes,
|
||||
|
@ -288,7 +288,8 @@ ShardSizeBounds getShardSizeBounds(KeyRangeRef shard, int64_t maxShardSize);
|
|||
int64_t getMaxShardSize(double dbSizeEstimate);
|
||||
|
||||
struct DDTeamCollection;
|
||||
ACTOR Future<vector<std::pair<StorageServerInterface, ProcessClass>>> getServerListAndProcessClasses(Transaction* tr);
|
||||
ACTOR Future<std::vector<std::pair<StorageServerInterface, ProcessClass>>> getServerListAndProcessClasses(
|
||||
Transaction* tr);
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
#endif
|
||||
|
|
|
@ -102,7 +102,7 @@ class ParallelTCInfo final : public ReferenceCounted<ParallelTCInfo>, public IDa
|
|||
}
|
||||
|
||||
template <class T>
|
||||
std::vector<T> collect(std::function<vector<T>(IDataDistributionTeam const&)> func) const {
|
||||
std::vector<T> collect(std::function<std::vector<T>(IDataDistributionTeam const&)> func) const {
|
||||
std::vector<T> result;
|
||||
|
||||
for (const auto& team : teams) {
|
||||
|
@ -146,7 +146,7 @@ public:
|
|||
}
|
||||
|
||||
std::vector<UID> const& getServerIDs() const override {
|
||||
static vector<UID> tempServerIDs;
|
||||
static std::vector<UID> tempServerIDs;
|
||||
tempServerIDs.clear();
|
||||
for (const auto& team : teams) {
|
||||
std::vector<UID> const& childIDs = team->getServerIDs();
|
||||
|
|
|
@ -393,8 +393,8 @@ ACTOR Future<int64_t> getFirstSize(Reference<AsyncVar<Optional<ShardMetrics>>> s
|
|||
}
|
||||
|
||||
ACTOR Future<Void> changeSizes(DataDistributionTracker* self, KeyRange keys, int64_t oldShardsEndingSize) {
|
||||
state vector<Future<int64_t>> sizes;
|
||||
state vector<Future<int64_t>> systemSizes;
|
||||
state std::vector<Future<int64_t>> sizes;
|
||||
state std::vector<Future<int64_t>> systemSizes;
|
||||
for (auto it : self->shards.intersectingRanges(keys)) {
|
||||
Future<int64_t> thisSize = getFirstSize(it->value().stats);
|
||||
sizes.push_back(thisSize);
|
||||
|
@ -964,8 +964,8 @@ ACTOR Future<Void> dataDistributionTracker(Reference<InitialDataDistribution> in
|
|||
}
|
||||
}
|
||||
|
||||
vector<KeyRange> ShardsAffectedByTeamFailure::getShardsFor(Team team) {
|
||||
vector<KeyRange> r;
|
||||
std::vector<KeyRange> ShardsAffectedByTeamFailure::getShardsFor(Team team) {
|
||||
std::vector<KeyRange> r;
|
||||
for (auto it = team_shards.lower_bound(std::pair<Team, KeyRange>(team, KeyRangeRef()));
|
||||
it != team_shards.end() && it->first == team;
|
||||
++it)
|
||||
|
@ -983,7 +983,7 @@ int ShardsAffectedByTeamFailure::getNumberOfShards(UID ssID) const {
|
|||
return it == storageServerShards.end() ? 0 : it->second;
|
||||
}
|
||||
|
||||
std::pair<vector<ShardsAffectedByTeamFailure::Team>, vector<ShardsAffectedByTeamFailure::Team>>
|
||||
std::pair<std::vector<ShardsAffectedByTeamFailure::Team>, std::vector<ShardsAffectedByTeamFailure::Team>>
|
||||
ShardsAffectedByTeamFailure::getTeamsFor(KeyRangeRef keys) {
|
||||
return shard_teams[keys.begin];
|
||||
}
|
||||
|
@ -1107,7 +1107,7 @@ void ShardsAffectedByTeamFailure::check() {
|
|||
}
|
||||
auto rs = shard_teams.ranges();
|
||||
for (auto i = rs.begin(); i != rs.end(); ++i)
|
||||
for (vector<Team>::iterator t = i->value().first.begin(); t != i->value().first.end(); ++t)
|
||||
for (std::vector<Team>::iterator t = i->value().first.begin(); t != i->value().first.end(); ++t)
|
||||
if (!team_shards.count(std::make_pair(*t, i->range()))) {
|
||||
std::string teamDesc, shards;
|
||||
for (int k = 0; k < t->servers.size(); k++)
|
||||
|
|
|
@ -30,15 +30,16 @@ struct DataDistributorInterface {
|
|||
RequestStream<ReplyPromise<Void>> waitFailure;
|
||||
RequestStream<struct HaltDataDistributorRequest> haltDataDistributor;
|
||||
struct LocalityData locality;
|
||||
UID myId;
|
||||
RequestStream<struct DistributorSnapRequest> distributorSnapReq;
|
||||
RequestStream<struct DistributorExclusionSafetyCheckRequest> distributorExclCheckReq;
|
||||
RequestStream<struct GetDataDistributorMetricsRequest> dataDistributorMetrics;
|
||||
|
||||
DataDistributorInterface() {}
|
||||
explicit DataDistributorInterface(const struct LocalityData& l) : locality(l) {}
|
||||
explicit DataDistributorInterface(const struct LocalityData& l, UID id) : locality(l), myId(id) {}
|
||||
|
||||
void initEndpoints() {}
|
||||
UID id() const { return waitFailure.getEndpoint().token; }
|
||||
UID id() const { return myId; }
|
||||
NetworkAddress address() const { return waitFailure.getEndpoint().getPrimaryAddress(); }
|
||||
bool operator==(const DataDistributorInterface& r) const { return id() == r.id(); }
|
||||
bool operator!=(const DataDistributorInterface& r) const { return !(*this == r); }
|
||||
|
@ -49,6 +50,7 @@ struct DataDistributorInterface {
|
|||
waitFailure,
|
||||
haltDataDistributor,
|
||||
locality,
|
||||
myId,
|
||||
distributorSnapReq,
|
||||
distributorExclCheckReq,
|
||||
dataDistributorMetrics);
|
||||
|
@ -132,11 +134,12 @@ struct DistributorExclusionSafetyCheckReply {
|
|||
|
||||
struct DistributorExclusionSafetyCheckRequest {
|
||||
constexpr static FileIdentifier file_identifier = 5830931;
|
||||
vector<AddressExclusion> exclusions;
|
||||
std::vector<AddressExclusion> exclusions;
|
||||
ReplyPromise<DistributorExclusionSafetyCheckReply> reply;
|
||||
|
||||
DistributorExclusionSafetyCheckRequest() {}
|
||||
explicit DistributorExclusionSafetyCheckRequest(vector<AddressExclusion> exclusions) : exclusions(exclusions) {}
|
||||
explicit DistributorExclusionSafetyCheckRequest(std::vector<AddressExclusion> exclusions)
|
||||
: exclusions(exclusions) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
|
|
@ -330,13 +330,13 @@ public:
|
|||
}
|
||||
#endif
|
||||
|
||||
Future<Future<Void>> push(StringRef pageData, vector<Reference<SyncQueue>>* toSync) {
|
||||
Future<Future<Void>> push(StringRef pageData, std::vector<Reference<SyncQueue>>* toSync) {
|
||||
return push(this, pageData, toSync);
|
||||
}
|
||||
|
||||
ACTOR static Future<Future<Void>> push(RawDiskQueue_TwoFiles* self,
|
||||
StringRef pageData,
|
||||
vector<Reference<SyncQueue>>* toSync) {
|
||||
std::vector<Reference<SyncQueue>>* toSync) {
|
||||
// Write the given data (pageData) to the queue files, swapping or extending them if necessary.
|
||||
// Don't do any syncs, but push the modified file(s) onto toSync.
|
||||
ASSERT(self->readingFile == 2);
|
||||
|
@ -345,7 +345,7 @@ public:
|
|||
ASSERT(self->writingPos % _PAGE_SIZE == 0);
|
||||
ASSERT(self->files[0].size % _PAGE_SIZE == 0 && self->files[1].size % _PAGE_SIZE == 0);
|
||||
|
||||
state vector<Future<Void>> waitfor;
|
||||
state std::vector<Future<Void>> waitfor;
|
||||
|
||||
if (pageData.size() + self->writingPos > self->files[1].size) {
|
||||
if (self->files[0].popped == self->files[0].size) {
|
||||
|
@ -440,7 +440,7 @@ public:
|
|||
state Promise<Void> errorPromise = self->error;
|
||||
state std::string filename = self->files[0].dbgFilename;
|
||||
state UID dbgid = self->dbgid;
|
||||
state vector<Reference<SyncQueue>> syncFiles;
|
||||
state std::vector<Reference<SyncQueue>> syncFiles;
|
||||
state Future<Void> lastCommit = self->lastCommit;
|
||||
try {
|
||||
// pushing might need to wait for previous pushes to start (to maintain order) or for
|
||||
|
@ -531,7 +531,7 @@ public:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> openFiles(RawDiskQueue_TwoFiles* self) {
|
||||
state vector<Future<Reference<IAsyncFile>>> fs;
|
||||
state std::vector<Future<Reference<IAsyncFile>>> fs;
|
||||
fs.reserve(2);
|
||||
for (int i = 0; i < 2; i++)
|
||||
fs.push_back(IAsyncFileSystem::filesystem()->open(self->filename(i),
|
||||
|
@ -574,7 +574,7 @@ public:
|
|||
// It also permits the recovery code to assume that whatever it reads is durable. Otherwise a prior
|
||||
// process could have written (but not synchronized) data to the file which we will read but which
|
||||
// might not survive a reboot. The recovery code assumes otherwise and could corrupt the disk.
|
||||
vector<Future<Void>> syncs;
|
||||
std::vector<Future<Void>> syncs;
|
||||
syncs.reserve(fs.size());
|
||||
for (int i = 0; i < fs.size(); i++)
|
||||
syncs.push_back(fs[i].get()->sync());
|
||||
|
@ -635,11 +635,11 @@ public:
|
|||
wait(openFiles(self));
|
||||
|
||||
// Get the file sizes
|
||||
vector<Future<int64_t>> fsize;
|
||||
std::vector<Future<int64_t>> fsize;
|
||||
fsize.reserve(2);
|
||||
for (int i = 0; i < 2; i++)
|
||||
fsize.push_back(self->files[i].f->size());
|
||||
vector<int64_t> file_sizes = wait(getAll(fsize));
|
||||
std::vector<int64_t> file_sizes = wait(getAll(fsize));
|
||||
for (int i = 0; i < 2; i++) {
|
||||
// SOMEDAY: If the file size is not a multiple of page size, it may never be shortened. Change this?
|
||||
self->files[i].size = file_sizes[i] - file_sizes[i] % sizeof(Page);
|
||||
|
@ -647,7 +647,7 @@ public:
|
|||
}
|
||||
|
||||
// Read the first pages
|
||||
vector<Future<int>> reads;
|
||||
std::vector<Future<int>> reads;
|
||||
for (int i = 0; i < 2; i++)
|
||||
if (self->files[i].size > 0)
|
||||
reads.push_back(self->files[i].f->read(self->firstPages[i], sizeof(Page), 0));
|
||||
|
@ -669,7 +669,7 @@ public:
|
|||
|
||||
// Truncate both files, since perhaps only the first pages are corrupted. This avoids cases where
|
||||
// overwritting the first page and then terminating makes subsequent pages valid upon recovery.
|
||||
vector<Future<Void>> truncates;
|
||||
std::vector<Future<Void>> truncates;
|
||||
for (int i = 0; i < 2; ++i)
|
||||
if (self->files[i].size > 0)
|
||||
truncates.push_back(self->truncateFile(self, i, 0));
|
||||
|
@ -833,7 +833,7 @@ public:
|
|||
try {
|
||||
state int file = self->readingFile;
|
||||
state int64_t pos = (self->readingPage - self->readingBuffer.size() / sizeof(Page) - 1) * sizeof(Page);
|
||||
state vector<Future<Void>> commits;
|
||||
state std::vector<Future<Void>> commits;
|
||||
state bool swap = file == 0;
|
||||
|
||||
TEST(file == 0); // truncate before last read page on file 0
|
||||
|
|
|
@ -785,7 +785,7 @@ ACTOR static Future<Void> transactionStarter(GrvProxyInterface proxy,
|
|||
int defaultPriTransactionsStarted[2] = { 0, 0 };
|
||||
int batchPriTransactionsStarted[2] = { 0, 0 };
|
||||
|
||||
vector<vector<GetReadVersionRequest>> start(
|
||||
std::vector<std::vector<GetReadVersionRequest>> start(
|
||||
2); // start[0] is transactions starting with !(flags&CAUSAL_READ_RISKY), start[1] is transactions starting
|
||||
// with flags&CAUSAL_READ_RISKY
|
||||
Optional<UID> debugID;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <rocksdb/slice_transform.h>
|
||||
#include <rocksdb/statistics.h>
|
||||
#include <rocksdb/table.h>
|
||||
#include <rocksdb/version.h>
|
||||
#include <rocksdb/utilities/table_properties_collectors.h>
|
||||
#include "fdbserver/CoroFlow.h"
|
||||
#include "flow/flow.h"
|
||||
|
@ -24,6 +25,13 @@
|
|||
|
||||
#ifdef SSD_ROCKSDB_EXPERIMENTAL
|
||||
|
||||
// Enforcing rocksdb version to be 6.22.1 or greater.
|
||||
static_assert(ROCKSDB_MAJOR >= 6, "Unsupported rocksdb version. Update the rocksdb to 6.22.1 version");
|
||||
static_assert(ROCKSDB_MAJOR == 6 ? ROCKSDB_MINOR >= 22 : true,
|
||||
"Unsupported rocksdb version. Update the rocksdb to 6.22.1 version");
|
||||
static_assert((ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR == 22) ? ROCKSDB_PATCH >= 1 : true,
|
||||
"Unsupported rocksdb version. Update the rocksdb to 6.22.1 version");
|
||||
|
||||
namespace {
|
||||
|
||||
rocksdb::Slice toSlice(StringRef s) {
|
||||
|
@ -464,12 +472,10 @@ struct RocksDBKeyValueStore : IKeyValueStore {
|
|||
int accumulatedBytes = 0;
|
||||
rocksdb::Status s;
|
||||
auto options = getReadOptions();
|
||||
// TODO: Deadline option is not supported with current rocksdb verion. Re-enable the code
|
||||
// below when deadline option is supported.
|
||||
/* uint64_t deadlineMircos =
|
||||
uint64_t deadlineMircos =
|
||||
db->GetEnv()->NowMicros() + (readRangeTimeout - (timer_monotonic() - a.startTime)) * 1000000;
|
||||
std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000);
|
||||
options.deadline = std::chrono::duration_cast<std::chrono::microseconds>(deadlineSeconds); */
|
||||
options.deadline = std::chrono::duration_cast<std::chrono::microseconds>(deadlineSeconds);
|
||||
// When using a prefix extractor, ensure that keys are returned in order even if they cross
|
||||
// a prefix boundary.
|
||||
options.auto_prefix_mode = (SERVER_KNOBS->ROCKSDB_PREFIX_LEN > 0);
|
||||
|
|
|
@ -899,7 +899,7 @@ struct RawCursor {
|
|||
}
|
||||
}
|
||||
void fastClear(KeyRangeRef keys, bool& freeTableEmpty) {
|
||||
vector<int> clearBuffer(SERVER_KNOBS->CLEAR_BUFFER_SIZE);
|
||||
std::vector<int> clearBuffer(SERVER_KNOBS->CLEAR_BUFFER_SIZE);
|
||||
clearBuffer[0] = 0;
|
||||
|
||||
while (true) {
|
||||
|
@ -937,7 +937,7 @@ struct RawCursor {
|
|||
}
|
||||
}
|
||||
int lazyDelete(int desiredPages) {
|
||||
vector<int> clearBuffer(SERVER_KNOBS->CLEAR_BUFFER_SIZE);
|
||||
std::vector<int> clearBuffer(SERVER_KNOBS->CLEAR_BUFFER_SIZE);
|
||||
clearBuffer[0] = 0;
|
||||
|
||||
IntKeyCursor fc(db, db.freetable, true);
|
||||
|
@ -1611,7 +1611,7 @@ private:
|
|||
volatile int64_t diskBytesUsed;
|
||||
volatile int64_t freeListPages;
|
||||
|
||||
vector<Reference<ReadCursor>> readCursors;
|
||||
std::vector<Reference<ReadCursor>> readCursors;
|
||||
Reference<IAsyncFile> dbFile, walFile;
|
||||
|
||||
struct Reader : IThreadPoolReceiver {
|
||||
|
@ -1718,7 +1718,7 @@ private:
|
|||
volatile int64_t& diskBytesUsed;
|
||||
volatile int64_t& freeListPages;
|
||||
UID dbgid;
|
||||
vector<Reference<ReadCursor>>& readThreads;
|
||||
std::vector<Reference<ReadCursor>>& readThreads;
|
||||
bool checkAllChecksumsOnOpen;
|
||||
bool checkIntegrityOnOpen;
|
||||
|
||||
|
@ -1731,7 +1731,7 @@ private:
|
|||
volatile int64_t& diskBytesUsed,
|
||||
volatile int64_t& freeListPages,
|
||||
UID dbgid,
|
||||
vector<Reference<ReadCursor>>* pReadThreads)
|
||||
std::vector<Reference<ReadCursor>>* pReadThreads)
|
||||
: kvs(kvs), conn(kvs->filename, isBtreeV2, isBtreeV2), cursor(nullptr), commits(), setsThisCommit(),
|
||||
freeTableEmpty(false), writesComplete(writesComplete), springCleaningStats(springCleaningStats),
|
||||
diskBytesUsed(diskBytesUsed), freeListPages(freeListPages), dbgid(dbgid), readThreads(*pReadThreads),
|
||||
|
|
|
@ -25,13 +25,11 @@
|
|||
#include "fdbclient/MonitorLeader.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
Optional<std::pair<LeaderInfo, bool>> getLeader(const vector<Optional<LeaderInfo>>& nominees);
|
||||
|
||||
ACTOR Future<Void> submitCandidacy(Key key,
|
||||
LeaderElectionRegInterface coord,
|
||||
LeaderInfo myInfo,
|
||||
UID prevChangeID,
|
||||
Reference<AsyncVar<vector<Optional<LeaderInfo>>>> nominees,
|
||||
Reference<AsyncVar<std::vector<Optional<LeaderInfo>>>> nominees,
|
||||
int index) {
|
||||
loop {
|
||||
auto const& nom = nominees->get()[index];
|
||||
|
@ -41,7 +39,7 @@ ACTOR Future<Void> submitCandidacy(Key key,
|
|||
TaskPriority::CoordinationReply));
|
||||
|
||||
if (li != nominees->get()[index]) {
|
||||
vector<Optional<LeaderInfo>> v = nominees->get();
|
||||
std::vector<Optional<LeaderInfo>> v = nominees->get();
|
||||
v[index] = li;
|
||||
nominees->set(v);
|
||||
|
||||
|
@ -91,7 +89,8 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
|
|||
Reference<AsyncVar<Value>> outSerializedLeader,
|
||||
bool hasConnected,
|
||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo) {
|
||||
state Reference<AsyncVar<vector<Optional<LeaderInfo>>>> nominees(new AsyncVar<vector<Optional<LeaderInfo>>>());
|
||||
state Reference<AsyncVar<std::vector<Optional<LeaderInfo>>>> nominees(
|
||||
new AsyncVar<std::vector<Optional<LeaderInfo>>>());
|
||||
state LeaderInfo myInfo;
|
||||
state Future<Void> candidacies;
|
||||
state bool iAmLeader = false;
|
||||
|
@ -106,7 +105,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
|
|||
wait(delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY));
|
||||
}
|
||||
|
||||
nominees->set(vector<Optional<LeaderInfo>>(coordinators.clientLeaderServers.size()));
|
||||
nominees->set(std::vector<Optional<LeaderInfo>>(coordinators.clientLeaderServers.size()));
|
||||
|
||||
myInfo.serializedInfo = proposedSerializedInterface;
|
||||
outSerializedLeader->set(Value());
|
||||
|
@ -121,7 +120,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
|
|||
prevChangeID = myInfo.changeID;
|
||||
myInfo.updateChangeID(asyncPriorityInfo->get());
|
||||
|
||||
vector<Future<Void>> cand;
|
||||
std::vector<Future<Void>> cand;
|
||||
cand.reserve(coordinators.leaderElectionServers.size());
|
||||
for (int i = 0; i < coordinators.leaderElectionServers.size(); i++)
|
||||
cand.push_back(submitCandidacy(
|
||||
|
@ -206,8 +205,8 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
|
|||
.detail("NewChangeID", myInfo.changeID);
|
||||
}
|
||||
|
||||
state vector<Future<Void>> true_heartbeats;
|
||||
state vector<Future<Void>> false_heartbeats;
|
||||
state std::vector<Future<Void>> true_heartbeats;
|
||||
state std::vector<Future<Void>> false_heartbeats;
|
||||
for (int i = 0; i < coordinators.leaderElectionServers.size(); i++) {
|
||||
Future<LeaderHeartbeatReply> hb =
|
||||
retryBrokenPromise(coordinators.leaderElectionServers[i].leaderHeartbeat,
|
||||
|
|
|
@ -28,13 +28,6 @@
|
|||
|
||||
class ServerCoordinators;
|
||||
|
||||
template <class LeaderInterface>
|
||||
Future<Void> tryBecomeLeader(ServerCoordinators const& coordinators,
|
||||
LeaderInterface const& proposedInterface,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader,
|
||||
bool hasConnected,
|
||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> const& asyncPriorityInfo);
|
||||
|
||||
// Participates in the given coordination group's leader election process, nominating the given
|
||||
// LeaderInterface (presumed to be a local interface) as leader. The leader election process is
|
||||
// "sticky" - once a leader becomes leader, as long as its communications with other processes are
|
||||
|
@ -43,9 +36,15 @@ Future<Void> tryBecomeLeader(ServerCoordinators const& coordinators,
|
|||
// set to the proposedInterface, and then if it is displaced by another leader, the return value will
|
||||
// eventually be set. If the return value is cancelled, the candidacy or leadership of the proposedInterface
|
||||
// will eventually end.
|
||||
template <class LeaderInterface>
|
||||
Future<Void> tryBecomeLeader(ServerCoordinators const& coordinators,
|
||||
LeaderInterface const& proposedInterface,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader,
|
||||
bool hasConnected,
|
||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> const& asyncPriorityInfo);
|
||||
|
||||
Future<Void> changeLeaderCoordinators(ServerCoordinators const& coordinators, Value const& forwardingInfo);
|
||||
// Inform all the coordinators that they have been replaced with a new connection string
|
||||
Future<Void> changeLeaderCoordinators(ServerCoordinators const& coordinators, Value const& forwardingInfo);
|
||||
|
||||
#ifndef __INTEL_COMPILER
|
||||
#pragma region Implementation
|
||||
|
|
|
@ -501,7 +501,7 @@ Version ILogSystem::ServerPeekCursor::popped() const {
|
|||
return poppedVersion;
|
||||
}
|
||||
|
||||
ILogSystem::MergedPeekCursor::MergedPeekCursor(vector<Reference<ILogSystem::IPeekCursor>> const& serverCursors,
|
||||
ILogSystem::MergedPeekCursor::MergedPeekCursor(std::vector<Reference<ILogSystem::IPeekCursor>> const& serverCursors,
|
||||
Version begin)
|
||||
: serverCursors(serverCursors), tag(invalidTag), bestServer(-1), currentCursor(0), readQuorum(serverCursors.size()),
|
||||
messageVersion(begin), hasNextMessage(false), randomID(deterministicRandom()->randomUniqueID()),
|
||||
|
@ -540,7 +540,7 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor(
|
|||
sortedVersions.resize(serverCursors.size());
|
||||
}
|
||||
|
||||
ILogSystem::MergedPeekCursor::MergedPeekCursor(vector<Reference<ILogSystem::IPeekCursor>> const& serverCursors,
|
||||
ILogSystem::MergedPeekCursor::MergedPeekCursor(std::vector<Reference<ILogSystem::IPeekCursor>> const& serverCursors,
|
||||
LogMessageVersion const& messageVersion,
|
||||
int bestServer,
|
||||
int readQuorum,
|
||||
|
@ -555,7 +555,7 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor(vector<Reference<ILogSystem::IPee
|
|||
}
|
||||
|
||||
Reference<ILogSystem::IPeekCursor> ILogSystem::MergedPeekCursor::cloneNoMore() {
|
||||
vector<Reference<ILogSystem::IPeekCursor>> cursors;
|
||||
std::vector<Reference<ILogSystem::IPeekCursor>> cursors;
|
||||
for (auto it : serverCursors) {
|
||||
cursors.push_back(it->cloneNoMore());
|
||||
}
|
||||
|
@ -706,7 +706,7 @@ ACTOR Future<Void> mergedPeekGetMore(ILogSystem::MergedPeekCursor* self,
|
|||
wait(self->serverCursors[self->bestServer]->getMore(taskID) ||
|
||||
self->serverCursors[self->bestServer]->onFailed());
|
||||
} else {
|
||||
vector<Future<Void>> q;
|
||||
std::vector<Future<Void>> q;
|
||||
for (auto& c : self->serverCursors)
|
||||
if (!c->hasMessage())
|
||||
q.push_back(c->getMore(taskID));
|
||||
|
@ -826,7 +826,7 @@ ILogSystem::SetPeekCursor::SetPeekCursor(std::vector<Reference<LogSet>> const& l
|
|||
}
|
||||
|
||||
Reference<ILogSystem::IPeekCursor> ILogSystem::SetPeekCursor::cloneNoMore() {
|
||||
vector<vector<Reference<ILogSystem::IPeekCursor>>> cursors;
|
||||
std::vector<std::vector<Reference<ILogSystem::IPeekCursor>>> cursors;
|
||||
cursors.resize(logSets.size());
|
||||
for (int i = 0; i < logSets.size(); i++) {
|
||||
for (int j = 0; j < logSets[i]->logServers.size(); j++) {
|
||||
|
@ -1047,7 +1047,7 @@ ACTOR Future<Void> setPeekGetMore(ILogSystem::SetPeekCursor* self,
|
|||
}
|
||||
|
||||
//TraceEvent("LPC_GetMore3", self->randomID).detail("Start", startVersion.toString()).detail("Tag", self->tag.toString()).detail("BestSetSize", self->serverCursors[self->bestSet].size());
|
||||
vector<Future<Void>> q;
|
||||
std::vector<Future<Void>> q;
|
||||
for (auto& c : self->serverCursors[self->bestSet]) {
|
||||
if (!c->hasMessage()) {
|
||||
q.push_back(c->getMore(taskID));
|
||||
|
@ -1060,7 +1060,7 @@ ACTOR Future<Void> setPeekGetMore(ILogSystem::SetPeekCursor* self,
|
|||
} else {
|
||||
// FIXME: this will peeking way too many cursors when satellites exist, and does not need to peek
|
||||
// bestSet cursors since we cannot get anymore data from them
|
||||
vector<Future<Void>> q;
|
||||
std::vector<Future<Void>> q;
|
||||
//TraceEvent("LPC_GetMore4", self->randomID).detail("Start", startVersion.toString()).detail("Tag", self->tag.toString());
|
||||
for (auto& cursors : self->serverCursors) {
|
||||
for (auto& c : cursors) {
|
||||
|
|
|
@ -313,7 +313,7 @@ ACTOR Future<Void> updateMetricRegistration(Database cx, MetricsConfig* config,
|
|||
loop {
|
||||
state Future<Void> registrationChange = collection->metricRegistrationChanged.onTrigger();
|
||||
state Future<Void> newMetric = collection->metricAdded.onTrigger();
|
||||
state vector<Standalone<StringRef>> keys;
|
||||
state std::vector<Standalone<StringRef>> keys;
|
||||
state bool fieldsChanged = false;
|
||||
state bool enumsChanged = false;
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "flow/Util.h"
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
|
@ -25,10 +27,8 @@
|
|||
#include "fdbserver/MoveKeys.actor.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbserver/TSSMappingUtil.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using std::max;
|
||||
using std::min;
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
bool DDEnabledState::isDDEnabled() const {
|
||||
return ddEnabled;
|
||||
|
@ -166,7 +166,7 @@ Future<Void> removeOldDestinations(Reference<ReadYourWritesTransaction> tr,
|
|||
KeyRangeRef currentKeys) {
|
||||
KeyRef beginKey = currentKeys.begin;
|
||||
|
||||
vector<Future<Void>> actors;
|
||||
std::vector<Future<Void>> actors;
|
||||
for (int i = 0; i < shards.size(); i++) {
|
||||
if (beginKey < shards[i].begin)
|
||||
actors.push_back(krmSetRangeCoalescing(
|
||||
|
@ -182,17 +182,17 @@ Future<Void> removeOldDestinations(Reference<ReadYourWritesTransaction> tr,
|
|||
return waitForAll(actors);
|
||||
}
|
||||
|
||||
ACTOR Future<vector<UID>> addReadWriteDestinations(KeyRangeRef shard,
|
||||
vector<StorageServerInterface> srcInterfs,
|
||||
vector<StorageServerInterface> destInterfs,
|
||||
Version version,
|
||||
int desiredHealthy,
|
||||
int maxServers) {
|
||||
ACTOR Future<std::vector<UID>> addReadWriteDestinations(KeyRangeRef shard,
|
||||
std::vector<StorageServerInterface> srcInterfs,
|
||||
std::vector<StorageServerInterface> destInterfs,
|
||||
Version version,
|
||||
int desiredHealthy,
|
||||
int maxServers) {
|
||||
if (srcInterfs.size() >= maxServers) {
|
||||
return vector<UID>();
|
||||
return std::vector<UID>();
|
||||
}
|
||||
|
||||
state vector<Future<Optional<UID>>> srcChecks;
|
||||
state std::vector<Future<Optional<UID>>> srcChecks;
|
||||
srcChecks.reserve(srcInterfs.size());
|
||||
for (int s = 0; s < srcInterfs.size(); s++) {
|
||||
srcChecks.push_back(checkReadWrite(srcInterfs[s].getShardState.getReplyUnlessFailedFor(
|
||||
|
@ -204,7 +204,7 @@ ACTOR Future<vector<UID>> addReadWriteDestinations(KeyRangeRef shard,
|
|||
0));
|
||||
}
|
||||
|
||||
state vector<Future<Optional<UID>>> destChecks;
|
||||
state std::vector<Future<Optional<UID>>> destChecks;
|
||||
destChecks.reserve(destInterfs.size());
|
||||
for (int s = 0; s < destInterfs.size(); s++) {
|
||||
destChecks.push_back(checkReadWrite(destInterfs[s].getShardState.getReplyUnlessFailedFor(
|
||||
|
@ -225,7 +225,7 @@ ACTOR Future<vector<UID>> addReadWriteDestinations(KeyRangeRef shard,
|
|||
}
|
||||
}
|
||||
|
||||
vector<UID> result;
|
||||
std::vector<UID> result;
|
||||
int totalDesired = std::min<int>(desiredHealthy - healthySrcs, maxServers - srcInterfs.size());
|
||||
for (int s = 0; s < destInterfs.size() && result.size() < totalDesired; s++) {
|
||||
if (destChecks[s].get().present()) {
|
||||
|
@ -275,17 +275,17 @@ ACTOR Future<std::vector<UID>> pickReadWriteServers(Transaction* tr, std::vector
|
|||
return result;
|
||||
}
|
||||
|
||||
ACTOR Future<vector<vector<UID>>> additionalSources(RangeResult shards,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
int desiredHealthy,
|
||||
int maxServers) {
|
||||
ACTOR Future<std::vector<std::vector<UID>>> additionalSources(RangeResult shards,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
int desiredHealthy,
|
||||
int maxServers) {
|
||||
state RangeResult UIDtoTagMap = wait(tr->getRange(serverTagKeys, CLIENT_KNOBS->TOO_MANY));
|
||||
ASSERT(!UIDtoTagMap.more && UIDtoTagMap.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
vector<Future<Optional<Value>>> serverListEntries;
|
||||
std::vector<Future<Optional<Value>>> serverListEntries;
|
||||
std::set<UID> fetching;
|
||||
for (int i = 0; i < shards.size() - 1; ++i) {
|
||||
vector<UID> src;
|
||||
vector<UID> dest;
|
||||
std::vector<UID> src;
|
||||
std::vector<UID> dest;
|
||||
|
||||
decodeKeyServersValue(UIDtoTagMap, shards[i].value, src, dest);
|
||||
|
||||
|
@ -304,7 +304,7 @@ ACTOR Future<vector<vector<UID>>> additionalSources(RangeResult shards,
|
|||
}
|
||||
}
|
||||
|
||||
vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
std::vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
|
||||
std::map<UID, StorageServerInterface> ssiMap;
|
||||
for (int s = 0; s < serverListValues.size(); s++) {
|
||||
|
@ -312,13 +312,13 @@ ACTOR Future<vector<vector<UID>>> additionalSources(RangeResult shards,
|
|||
ssiMap[ssi.id()] = ssi;
|
||||
}
|
||||
|
||||
vector<Future<vector<UID>>> allChecks;
|
||||
std::vector<Future<std::vector<UID>>> allChecks;
|
||||
for (int i = 0; i < shards.size() - 1; ++i) {
|
||||
KeyRangeRef rangeIntersectKeys(shards[i].key, shards[i + 1].key);
|
||||
vector<UID> src;
|
||||
vector<UID> dest;
|
||||
vector<StorageServerInterface> srcInterfs;
|
||||
vector<StorageServerInterface> destInterfs;
|
||||
std::vector<UID> src;
|
||||
std::vector<UID> dest;
|
||||
std::vector<StorageServerInterface> srcInterfs;
|
||||
std::vector<StorageServerInterface> destInterfs;
|
||||
|
||||
decodeKeyServersValue(UIDtoTagMap, shards[i].value, src, dest);
|
||||
|
||||
|
@ -337,11 +337,11 @@ ACTOR Future<vector<vector<UID>>> additionalSources(RangeResult shards,
|
|||
rangeIntersectKeys, srcInterfs, destInterfs, tr->getReadVersion().get(), desiredHealthy, maxServers));
|
||||
}
|
||||
|
||||
vector<vector<UID>> result = wait(getAll(allChecks));
|
||||
std::vector<std::vector<UID>> result = wait(getAll(allChecks));
|
||||
return result;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> logWarningAfter(const char* context, double duration, vector<UID> servers) {
|
||||
ACTOR Future<Void> logWarningAfter(const char* context, double duration, std::vector<UID> servers) {
|
||||
state double startTime = now();
|
||||
loop {
|
||||
wait(delay(duration));
|
||||
|
@ -357,7 +357,7 @@ ACTOR Future<Void> logWarningAfter(const char* context, double duration, vector<
|
|||
// of servers OR if the source list is sufficiently degraded)
|
||||
ACTOR static Future<Void> startMoveKeys(Database occ,
|
||||
KeyRange keys,
|
||||
vector<UID> servers,
|
||||
std::vector<UID> servers,
|
||||
MoveKeysLock lock,
|
||||
FlowLock* startMoveKeysLock,
|
||||
UID relocationIntervalId,
|
||||
|
@ -412,11 +412,11 @@ ACTOR static Future<Void> startMoveKeys(Database occ,
|
|||
loadedTssMapping = true;
|
||||
}
|
||||
|
||||
vector<Future<Optional<Value>>> serverListEntries;
|
||||
std::vector<Future<Optional<Value>>> serverListEntries;
|
||||
serverListEntries.reserve(servers.size());
|
||||
for (int s = 0; s < servers.size(); s++)
|
||||
serverListEntries.push_back(tr->get(serverListKeyFor(servers[s])));
|
||||
state vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
state std::vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
|
||||
for (int s = 0; s < serverListValues.size(); s++) {
|
||||
if (!serverListValues[s].present()) {
|
||||
|
@ -453,15 +453,15 @@ ACTOR static Future<Void> startMoveKeys(Database occ,
|
|||
// Check that enough servers for each shard are in the correct state
|
||||
state RangeResult UIDtoTagMap = wait(tr->getRange(serverTagKeys, CLIENT_KNOBS->TOO_MANY));
|
||||
ASSERT(!UIDtoTagMap.more && UIDtoTagMap.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
vector<vector<UID>> addAsSource = wait(additionalSources(
|
||||
std::vector<std::vector<UID>> addAsSource = wait(additionalSources(
|
||||
old, tr, servers.size(), SERVER_KNOBS->MAX_ADDED_SOURCES_MULTIPLIER * servers.size()));
|
||||
|
||||
// For each intersecting range, update keyServers[range] dest to be servers and clear existing dest
|
||||
// servers from serverKeys
|
||||
for (int i = 0; i < old.size() - 1; ++i) {
|
||||
KeyRangeRef rangeIntersectKeys(old[i].key, old[i + 1].key);
|
||||
vector<UID> src;
|
||||
vector<UID> dest;
|
||||
std::vector<UID> src;
|
||||
std::vector<UID> dest;
|
||||
decodeKeyServersValue(UIDtoTagMap, old[i].value, src, dest);
|
||||
|
||||
// TraceEvent("StartMoveKeysOldRange", relocationIntervalId)
|
||||
|
@ -504,7 +504,7 @@ ACTOR static Future<Void> startMoveKeys(Database occ,
|
|||
// shards on the boundary of currentRange are actually coalesced with the ranges outside of
|
||||
// currentRange. For all shards internal to currentRange, we overwrite all consecutive keys whose
|
||||
// value is or should be serverKeysFalse in a single write
|
||||
vector<Future<Void>> actors;
|
||||
std::vector<Future<Void>> actors;
|
||||
for (oldDest = oldDests.begin(); oldDest != oldDests.end(); ++oldDest)
|
||||
if (std::find(servers.begin(), servers.end(), *oldDest) == servers.end())
|
||||
actors.push_back(removeOldDestinations(tr, *oldDest, shardMap[*oldDest], currentKeys));
|
||||
|
@ -591,7 +591,7 @@ ACTOR Future<Void> waitForShardReady(StorageServerInterface server,
|
|||
// best effort to also wait for TSS on data move
|
||||
|
||||
ACTOR Future<Void> checkFetchingState(Database cx,
|
||||
vector<UID> dest,
|
||||
std::vector<UID> dest,
|
||||
KeyRange keys,
|
||||
Promise<Void> dataMovementComplete,
|
||||
UID relocationIntervalId,
|
||||
|
@ -606,13 +606,13 @@ ACTOR Future<Void> checkFetchingState(Database cx,
|
|||
tr.info.taskID = TaskPriority::MoveKeys;
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
|
||||
vector<Future<Optional<Value>>> serverListEntries;
|
||||
std::vector<Future<Optional<Value>>> serverListEntries;
|
||||
serverListEntries.reserve(dest.size());
|
||||
for (int s = 0; s < dest.size(); s++)
|
||||
serverListEntries.push_back(tr.get(serverListKeyFor(dest[s])));
|
||||
state vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
vector<Future<Void>> requests;
|
||||
state vector<Future<Void>> tssRequests;
|
||||
state std::vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
std::vector<Future<Void>> requests;
|
||||
state std::vector<Future<Void>> tssRequests;
|
||||
for (int s = 0; s < serverListValues.size(); s++) {
|
||||
if (!serverListValues[s].present()) {
|
||||
// FIXME: Is this the right behavior? dataMovementComplete will never be sent!
|
||||
|
@ -659,7 +659,7 @@ ACTOR Future<Void> checkFetchingState(Database cx,
|
|||
// Should be cancelled and restarted if keyServers[keys].dest changes (?so this is no longer true?)
|
||||
ACTOR static Future<Void> finishMoveKeys(Database occ,
|
||||
KeyRange keys,
|
||||
vector<UID> destinationTeam,
|
||||
std::vector<UID> destinationTeam,
|
||||
MoveKeysLock lock,
|
||||
FlowLock* finishMoveKeysParallelismLock,
|
||||
bool hasRemote,
|
||||
|
@ -724,11 +724,11 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
|
|||
// Decode and sanity check the result (dest must be the same for all ranges)
|
||||
bool alreadyMoved = true;
|
||||
|
||||
state vector<UID> dest;
|
||||
state std::vector<UID> dest;
|
||||
state std::set<UID> allServers;
|
||||
state std::set<UID> intendedTeam(destinationTeam.begin(), destinationTeam.end());
|
||||
state vector<UID> src;
|
||||
vector<UID> completeSrc;
|
||||
state std::vector<UID> src;
|
||||
std::vector<UID> completeSrc;
|
||||
|
||||
// Iterate through the beginning of keyServers until we find one that hasn't already been processed
|
||||
int currentIndex;
|
||||
|
@ -790,7 +790,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
|
|||
|
||||
// Process the rest of the key servers
|
||||
for (; currentIndex < keyServers.size() - 1; currentIndex++) {
|
||||
vector<UID> src2, dest2;
|
||||
std::vector<UID> src2, dest2;
|
||||
decodeKeyServersValue(UIDtoTagMap, keyServers[currentIndex].value, src2, dest2);
|
||||
|
||||
std::set<UID> srcSet;
|
||||
|
@ -838,10 +838,10 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
|
|||
// Wait for a durable quorum of servers in destServers to have keys available (readWrite)
|
||||
// They must also have at least the transaction read version so they can't "forget" the shard
|
||||
// between now and when this transaction commits.
|
||||
state vector<Future<Void>> serverReady; // only for count below
|
||||
state vector<Future<Void>> tssReady; // for waiting in parallel with tss
|
||||
state vector<StorageServerInterface> tssReadyInterfs;
|
||||
state vector<UID> newDestinations;
|
||||
state std::vector<Future<Void>> serverReady; // only for count below
|
||||
state std::vector<Future<Void>> tssReady; // for waiting in parallel with tss
|
||||
state std::vector<StorageServerInterface> tssReadyInterfs;
|
||||
state std::vector<UID> newDestinations;
|
||||
std::set<UID> completeSrcSet(completeSrc.begin(), completeSrc.end());
|
||||
for (auto& it : dest) {
|
||||
if (!hasRemote || !completeSrcSet.count(it)) {
|
||||
|
@ -850,12 +850,12 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
|
|||
}
|
||||
|
||||
// for smartQuorum
|
||||
state vector<StorageServerInterface> storageServerInterfaces;
|
||||
vector<Future<Optional<Value>>> serverListEntries;
|
||||
state std::vector<StorageServerInterface> storageServerInterfaces;
|
||||
std::vector<Future<Optional<Value>>> serverListEntries;
|
||||
serverListEntries.reserve(newDestinations.size());
|
||||
for (int s = 0; s < newDestinations.size(); s++)
|
||||
serverListEntries.push_back(tr.get(serverListKeyFor(newDestinations[s])));
|
||||
state vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
state std::vector<Optional<Value>> serverListValues = wait(getAll(serverListEntries));
|
||||
|
||||
releaser.release();
|
||||
|
||||
|
@ -1039,7 +1039,7 @@ ACTOR Future<std::pair<Version, Tag>> addStorageServer(Database cx, StorageServe
|
|||
? tr->get(StringRef(encodeFailedServersKey(AddressExclusion(server.secondaryAddress().get().ip))))
|
||||
: Future<Optional<Value>>(Optional<Value>());
|
||||
|
||||
state vector<Future<Optional<Value>>> localityExclusions;
|
||||
state std::vector<Future<Optional<Value>>> localityExclusions;
|
||||
std::map<std::string, std::string> localityData = server.locality.getAllData();
|
||||
for (const auto& l : localityData) {
|
||||
localityExclusions.push_back(tr->get(StringRef(encodeExcludedLocalityKey(
|
||||
|
@ -1339,6 +1339,7 @@ ACTOR Future<Void> removeKeysFromFailedServer(Database cx,
|
|||
state int i = 0;
|
||||
for (; i < keyServers.size() - 1; ++i) {
|
||||
state KeyValueRef it = keyServers[i];
|
||||
|
||||
decodeKeyServersValue(UIDtoTagMap, it.value, src, dest);
|
||||
|
||||
// The failed server is not present
|
||||
|
@ -1442,8 +1443,8 @@ ACTOR Future<Void> removeKeysFromFailedServer(Database cx,
|
|||
|
||||
ACTOR Future<Void> moveKeys(Database cx,
|
||||
KeyRange keys,
|
||||
vector<UID> destinationTeam,
|
||||
vector<UID> healthyDestinations,
|
||||
std::vector<UID> destinationTeam,
|
||||
std::vector<UID> healthyDestinations,
|
||||
MoveKeysLock lock,
|
||||
Promise<Void> dataMovementComplete,
|
||||
FlowLock* startMoveKeysParallelismLock,
|
||||
|
@ -1488,7 +1489,7 @@ ACTOR Future<Void> moveKeys(Database cx,
|
|||
|
||||
// Called by the master server to write the very first transaction to the database
|
||||
// establishing a set of shard servers and all invariants of the systemKeys.
|
||||
void seedShardServers(Arena& arena, CommitTransactionRef& tr, vector<StorageServerInterface> servers) {
|
||||
void seedShardServers(Arena& arena, CommitTransactionRef& tr, std::vector<StorageServerInterface> servers) {
|
||||
std::map<Optional<Value>, Tag> dcId_locality;
|
||||
std::map<UID, Tag> server_tag;
|
||||
int8_t nextLocality = 0;
|
||||
|
|
|
@ -62,14 +62,14 @@ ACTOR Future<MoveKeysLock> takeMoveKeysLock(Database cx, UID ddId);
|
|||
// This does not modify the moveKeysLock
|
||||
Future<Void> checkMoveKeysLockReadOnly(Transaction* tr, MoveKeysLock lock, const DDEnabledState* ddEnabledState);
|
||||
|
||||
void seedShardServers(Arena& trArena, CommitTransactionRef& tr, vector<StorageServerInterface> servers);
|
||||
void seedShardServers(Arena& trArena, CommitTransactionRef& tr, std::vector<StorageServerInterface> servers);
|
||||
// Called by the master server to write the very first transaction to the database
|
||||
// establishing a set of shard servers and all invariants of the systemKeys.
|
||||
|
||||
ACTOR Future<Void> moveKeys(Database occ,
|
||||
KeyRange keys,
|
||||
vector<UID> destinationTeam,
|
||||
vector<UID> healthyDestinations,
|
||||
std::vector<UID> destinationTeam,
|
||||
std::vector<UID> healthyDestinations,
|
||||
MoveKeysLock lock,
|
||||
Promise<Void> dataMovementComplete,
|
||||
FlowLock* startMoveKeysParallelismLock,
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
TraceEvent debugMutation(const char* context, Version version, MutationRef const& mutation, UID id = UID());
|
||||
|
||||
// debugKeyRange and debugTagsAndMessage only log the *first* occurrence of a key in their range/commit.
|
||||
// TODO: Create a TraceEventGroup that forwards all calls to each element of a vector<TraceEvent>,
|
||||
// TODO: Create a TraceEventGroup that forwards all calls to each element of a std::vector<TraceEvent>,
|
||||
// to allow "multiple" TraceEvents to be returned.
|
||||
|
||||
#define DEBUG_KEY_RANGE(...) MUTATION_TRACKING_ENABLED&& debugKeyRange(__VA_ARGS__)
|
||||
|
|
|
@ -68,7 +68,7 @@ struct NetworkTestStreamingReply : ReplyPromiseStreamReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, index);
|
||||
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, ReplyPromiseStreamReply::sequence, index);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -38,10 +38,8 @@
|
|||
#include "fdbserver/WaitFailure.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using std::make_pair;
|
||||
using std::max;
|
||||
using std::min;
|
||||
using std::pair;
|
||||
|
||||
namespace oldTLog_4_6 {
|
||||
|
||||
|
@ -892,7 +890,7 @@ void commitMessages(Reference<LogData> self,
|
|||
10;
|
||||
}
|
||||
|
||||
self->version_sizes[version] = make_pair(expectedBytes, expectedBytes);
|
||||
self->version_sizes[version] = std::make_pair(expectedBytes, expectedBytes);
|
||||
self->bytesInput += addedBytes;
|
||||
bytesInput += addedBytes;
|
||||
|
||||
|
|
|
@ -42,10 +42,8 @@
|
|||
#include "fdbserver/FDBExecHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using std::make_pair;
|
||||
using std::max;
|
||||
using std::min;
|
||||
using std::pair;
|
||||
|
||||
namespace oldTLog_6_0 {
|
||||
|
||||
|
@ -880,7 +878,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
|
|||
TraceEvent("EnableTLogPlayAllIgnoredPops").log();
|
||||
// use toBePopped and issue all the pops
|
||||
state std::map<Tag, Version>::iterator it;
|
||||
state vector<Future<Void>> ignoredPops;
|
||||
state std::vector<Future<Void>> ignoredPops;
|
||||
self->ignorePopRequest = false;
|
||||
self->ignorePopUid = "";
|
||||
self->ignorePopDeadline = 0.0;
|
||||
|
@ -1923,7 +1921,7 @@ ACTOR Future<Void> tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData*
|
|||
TraceEvent("EnableTLogPlayAllIgnoredPops2").log();
|
||||
// use toBePopped and issue all the pops
|
||||
std::map<Tag, Version>::iterator it;
|
||||
vector<Future<Void>> ignoredPops;
|
||||
std::vector<Future<Void>> ignoredPops;
|
||||
self->ignorePopRequest = false;
|
||||
self->ignorePopDeadline = 0.0;
|
||||
self->ignorePopUid = "";
|
||||
|
|
|
@ -43,10 +43,8 @@
|
|||
#include "fdbserver/FDBExecHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using std::make_pair;
|
||||
using std::max;
|
||||
using std::min;
|
||||
using std::pair;
|
||||
|
||||
namespace oldTLog_6_2 {
|
||||
|
||||
|
@ -1470,7 +1468,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
|
|||
TraceEvent("EnableTLogPlayAllIgnoredPops").log();
|
||||
// use toBePopped and issue all the pops
|
||||
std::map<Tag, Version>::iterator it;
|
||||
vector<Future<Void>> ignoredPops;
|
||||
std::vector<Future<Void>> ignoredPops;
|
||||
self->ignorePopRequest = false;
|
||||
self->ignorePopUid = "";
|
||||
self->ignorePopDeadline = 0.0;
|
||||
|
@ -2368,7 +2366,7 @@ ACTOR Future<Void> tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData*
|
|||
TraceEvent("EnableTLogPlayAllIgnoredPops2").log();
|
||||
// use toBePopped and issue all the pops
|
||||
std::map<Tag, Version>::iterator it;
|
||||
state vector<Future<Void>> ignoredPops;
|
||||
state std::vector<Future<Void>> ignoredPops;
|
||||
self->ignorePopRequest = false;
|
||||
self->ignorePopDeadline = 0.0;
|
||||
self->ignorePopUid = "";
|
||||
|
|
|
@ -161,7 +161,7 @@ struct ProxyCommitData {
|
|||
int64_t commitBatchesMemBytesCount;
|
||||
ProxyStats stats;
|
||||
MasterInterface master;
|
||||
vector<ResolverInterface> resolvers;
|
||||
std::vector<ResolverInterface> resolvers;
|
||||
LogSystemDiskQueueAdapter* logAdapter;
|
||||
Reference<ILogSystem> logSystem;
|
||||
IKeyValueStore* txnStateStore;
|
||||
|
@ -201,7 +201,7 @@ struct ProxyCommitData {
|
|||
Deque<std::pair<Version, Version>> txsPopVersions;
|
||||
Version lastTxsPop;
|
||||
bool popRemoteTxs;
|
||||
vector<Standalone<StringRef>> whitelistedBinPathVec;
|
||||
std::vector<Standalone<StringRef>> whitelistedBinPathVec;
|
||||
|
||||
Optional<LatencyBandConfig> latencyBandConfig;
|
||||
double lastStartCommit;
|
||||
|
@ -209,7 +209,7 @@ struct ProxyCommitData {
|
|||
int updateCommitRequests = 0;
|
||||
NotifiedDouble lastCommitTime;
|
||||
|
||||
vector<double> commitComputePerOperation;
|
||||
std::vector<double> commitComputePerOperation;
|
||||
UIDTransactionTagMap<TransactionCommitCostEstimation> ssTrTagCommitCost;
|
||||
double lastMasterReset;
|
||||
double lastResolverReset;
|
||||
|
@ -217,7 +217,7 @@ struct ProxyCommitData {
|
|||
// The tag related to a storage server rarely change, so we keep a vector of tags for each key range to be slightly
|
||||
// more CPU efficient. When a tag related to a storage server does change, we empty out all of these vectors to
|
||||
// signify they must be repopulated. We do not repopulate them immediately to avoid a slow task.
|
||||
const vector<Tag>& tagsForKey(StringRef key) {
|
||||
const std::vector<Tag>& tagsForKey(StringRef key) {
|
||||
auto& tags = keyInfo[key].tags;
|
||||
if (!tags.size()) {
|
||||
auto& r = keyInfo.rangeContaining(key).value();
|
||||
|
|
|
@ -35,10 +35,10 @@
|
|||
#include <boost/lexical_cast.hpp>
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
ACTOR Future<vector<WorkerDetails>> getWorkers(Reference<AsyncVar<ServerDBInfo> const> dbInfo, int flags = 0) {
|
||||
ACTOR Future<std::vector<WorkerDetails>> getWorkers(Reference<AsyncVar<ServerDBInfo> const> dbInfo, int flags = 0) {
|
||||
loop {
|
||||
choose {
|
||||
when(vector<WorkerDetails> w = wait(brokenPromiseToNever(
|
||||
when(std::vector<WorkerDetails> w = wait(brokenPromiseToNever(
|
||||
dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest(flags))))) {
|
||||
return w;
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ ACTOR Future<WorkerInterface> getMasterWorker(Database cx, Reference<AsyncVar<Se
|
|||
TraceEvent("GetMasterWorker").detail("Stage", "GettingWorkers");
|
||||
|
||||
loop {
|
||||
state vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
|
||||
state std::vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
|
||||
|
||||
for (int i = 0; i < workers.size(); i++) {
|
||||
if (workers[i].interf.address() == dbInfo->get().master.address()) {
|
||||
|
@ -79,7 +79,7 @@ ACTOR Future<WorkerInterface> getDataDistributorWorker(Database cx, Reference<As
|
|||
TraceEvent("GetDataDistributorWorker").detail("Stage", "GettingWorkers");
|
||||
|
||||
loop {
|
||||
state vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
|
||||
state std::vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
|
||||
if (!dbInfo->get().distributor.present())
|
||||
continue;
|
||||
|
||||
|
@ -144,7 +144,8 @@ int64_t getPoppedVersionLag(const TraceEventFields& md) {
|
|||
return persistentDataDurableVersion - queuePoppedVersion;
|
||||
}
|
||||
|
||||
ACTOR Future<vector<WorkerInterface>> getCoordWorkers(Database cx, Reference<AsyncVar<ServerDBInfo> const> dbInfo) {
|
||||
ACTOR Future<std::vector<WorkerInterface>> getCoordWorkers(Database cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> dbInfo) {
|
||||
state std::vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
|
||||
|
||||
Optional<Value> coordinators =
|
||||
|
@ -164,7 +165,7 @@ ACTOR Future<vector<WorkerInterface>> getCoordWorkers(Database cx, Reference<Asy
|
|||
coordinatorsAddrSet.insert(addr);
|
||||
}
|
||||
|
||||
vector<WorkerInterface> result;
|
||||
std::vector<WorkerInterface> result;
|
||||
for (const auto& worker : workers) {
|
||||
NetworkAddress primary = worker.interf.address();
|
||||
Optional<NetworkAddress> secondary = worker.interf.tLog.getEndpoint().addresses.secondaryAddress;
|
||||
|
@ -223,7 +224,7 @@ ACTOR Future<std::pair<int64_t, int64_t>> getTLogQueueInfo(Database cx,
|
|||
return std::make_pair(maxQueueSize, maxPoppedVersionLag);
|
||||
}
|
||||
|
||||
ACTOR Future<vector<StorageServerInterface>> getStorageServers(Database cx, bool use_system_priority = false) {
|
||||
ACTOR Future<std::vector<StorageServerInterface>> getStorageServers(Database cx, bool use_system_priority = false) {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
if (use_system_priority) {
|
||||
|
@ -234,7 +235,7 @@ ACTOR Future<vector<StorageServerInterface>> getStorageServers(Database cx, bool
|
|||
RangeResult serverList = wait(tr.getRange(serverListKeys, CLIENT_KNOBS->TOO_MANY));
|
||||
ASSERT(!serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY);
|
||||
|
||||
vector<StorageServerInterface> servers;
|
||||
std::vector<StorageServerInterface> servers;
|
||||
servers.reserve(serverList.size());
|
||||
for (int i = 0; i < serverList.size(); i++)
|
||||
servers.push_back(decodeServerListValue(serverList[i].value));
|
||||
|
@ -245,9 +246,9 @@ ACTOR Future<vector<StorageServerInterface>> getStorageServers(Database cx, bool
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<vector<WorkerInterface>> getStorageWorkers(Database cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> dbInfo,
|
||||
bool localOnly) {
|
||||
ACTOR Future<std::vector<WorkerInterface>> getStorageWorkers(Database cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> dbInfo,
|
||||
bool localOnly) {
|
||||
state std::vector<StorageServerInterface> servers = wait(getStorageServers(cx));
|
||||
state std::map<NetworkAddress, WorkerInterface> workersMap;
|
||||
std::vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
|
||||
|
@ -267,7 +268,7 @@ ACTOR Future<vector<WorkerInterface>> getStorageWorkers(Database cx,
|
|||
}
|
||||
auto masterDcId = dbInfo->get().master.locality.dcId();
|
||||
|
||||
vector<WorkerInterface> result;
|
||||
std::vector<WorkerInterface> result;
|
||||
for (const auto& server : servers) {
|
||||
TraceEvent(SevDebug, "DcIdInfo")
|
||||
.detail("ServerLocalityID", server.locality.dcId())
|
||||
|
@ -586,17 +587,17 @@ ACTOR Future<Void> repairDeadDatacenter(Database cx,
|
|||
.detail("RemoteDead", remoteDead)
|
||||
.detail("PrimaryDead", primaryDead);
|
||||
g_simulator.usableRegions = 1;
|
||||
wait(success(changeConfig(cx,
|
||||
(primaryDead ? g_simulator.disablePrimary : g_simulator.disableRemote) +
|
||||
" repopulate_anti_quorum=1",
|
||||
true)));
|
||||
wait(success(ManagementAPI::changeConfig(
|
||||
cx.getReference(),
|
||||
(primaryDead ? g_simulator.disablePrimary : g_simulator.disableRemote) + " repopulate_anti_quorum=1",
|
||||
true)));
|
||||
while (dbInfo->get().recoveryState < RecoveryState::STORAGE_RECOVERED) {
|
||||
wait(dbInfo->onChange());
|
||||
}
|
||||
TraceEvent(SevWarnAlways, "DisablingFearlessConfiguration")
|
||||
.detail("Location", context)
|
||||
.detail("Stage", "Usable_Regions");
|
||||
wait(success(changeConfig(cx, "usable_regions=1", true)));
|
||||
wait(success(ManagementAPI::changeConfig(cx.getReference(), "usable_regions=1", true)));
|
||||
}
|
||||
}
|
||||
return Void();
|
||||
|
|
|
@ -37,17 +37,19 @@ Future<int64_t> getDataDistributionQueueSize(Database const& cx,
|
|||
bool const& reportInFlight);
|
||||
Future<bool> getTeamCollectionValid(Database const& cx, WorkerInterface const&);
|
||||
Future<bool> getTeamCollectionValid(Database const& cx, Reference<AsyncVar<struct ServerDBInfo> const> const&);
|
||||
Future<vector<StorageServerInterface>> getStorageServers(Database const& cx, bool const& use_system_priority = false);
|
||||
Future<vector<WorkerDetails>> getWorkers(Reference<AsyncVar<ServerDBInfo> const> const& dbInfo, int const& flags = 0);
|
||||
Future<std::vector<StorageServerInterface>> getStorageServers(Database const& cx,
|
||||
bool const& use_system_priority = false);
|
||||
Future<std::vector<WorkerDetails>> getWorkers(Reference<AsyncVar<ServerDBInfo> const> const& dbInfo,
|
||||
int const& flags = 0);
|
||||
Future<WorkerInterface> getMasterWorker(Database const& cx, Reference<AsyncVar<ServerDBInfo> const> const& dbInfo);
|
||||
Future<Void> repairDeadDatacenter(Database const& cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> const& dbInfo,
|
||||
std::string const& context);
|
||||
Future<vector<WorkerInterface>> getStorageWorkers(Database const& cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> const& dbInfo,
|
||||
bool const& localOnly);
|
||||
Future<vector<WorkerInterface>> getCoordWorkers(Database const& cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> const& dbInfo);
|
||||
Future<std::vector<WorkerInterface>> getStorageWorkers(Database const& cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> const& dbInfo,
|
||||
bool const& localOnly);
|
||||
Future<std::vector<WorkerInterface>> getCoordWorkers(Database const& cx,
|
||||
Reference<AsyncVar<ServerDBInfo> const> const& dbInfo);
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
#endif
|
||||
|
|
|
@ -750,7 +750,8 @@ ACTOR Future<Void> monitorServerListChange(
|
|||
.detail("Latency", now() - self->lastSSListFetchedTimestamp);
|
||||
}
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
vector<std::pair<StorageServerInterface, ProcessClass>> results = wait(getServerListAndProcessClasses(&tr));
|
||||
std::vector<std::pair<StorageServerInterface, ProcessClass>> results =
|
||||
wait(getServerListAndProcessClasses(&tr));
|
||||
self->lastSSListFetchedTimestamp = now();
|
||||
|
||||
std::map<UID, StorageServerInterface> newServers;
|
||||
|
|
|
@ -174,8 +174,8 @@ ACTOR Future<Void> resolveBatch(Reference<Resolver> self, ResolveTransactionBatc
|
|||
|
||||
ResolveTransactionBatchReply& reply = proxyInfo.outstandingBatches[req.version];
|
||||
|
||||
vector<int> commitList;
|
||||
vector<int> tooOldList;
|
||||
std::vector<int> commitList;
|
||||
std::vector<int> tooOldList;
|
||||
|
||||
// Detect conflicts
|
||||
double expire = now() + SERVER_KNOBS->SAMPLE_EXPIRATION_TIME;
|
||||
|
|
|
@ -743,7 +743,7 @@ ACTOR Future<Void> handleSendMutationsRequest(RestoreSendMutationsToAppliersRequ
|
|||
|
||||
if (!isDuplicated) {
|
||||
self->inflightSendingReqs++;
|
||||
vector<Future<Void>> fSendMutations;
|
||||
std::vector<Future<Void>> fSendMutations;
|
||||
batchData->rangeToApplier = req.rangeToApplier;
|
||||
for (auto& [loadParam, kvOps] : batchData->kvOpsPerLP) {
|
||||
if (loadParam.isRangeFile == req.useRangeFile) {
|
||||
|
|
|
@ -144,7 +144,7 @@ void handleRecruitRoleRequest(RestoreRecruitRoleRequest req,
|
|||
// This is done before we assign restore roles for restore workers.
|
||||
ACTOR Future<Void> collectRestoreWorkerInterface(Reference<RestoreWorkerData> self, Database cx, int min_num_workers) {
|
||||
state Transaction tr(cx);
|
||||
state vector<RestoreWorkerInterface> agents; // agents is cmdsInterf
|
||||
state std::vector<RestoreWorkerInterface> agents; // agents is cmdsInterf
|
||||
|
||||
loop {
|
||||
try {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue